diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-21 11:54:28 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-21 11:54:28 +0000 |
commit | e6918187568dbd01842d8d1d2c808ce16a894239 (patch) | |
tree | 64f88b554b444a49f656b6c656111a145cbbaa28 /src/test/rgw | |
parent | Initial commit. (diff) | |
download | ceph-e6918187568dbd01842d8d1d2c808ce16a894239.tar.xz ceph-e6918187568dbd01842d8d1d2c808ce16a894239.zip |
Adding upstream version 18.2.2.upstream/18.2.2
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/test/rgw')
64 files changed, 23123 insertions, 0 deletions
diff --git a/src/test/rgw/CMakeLists.txt b/src/test/rgw/CMakeLists.txt new file mode 100644 index 000000000..57d6696c7 --- /dev/null +++ b/src/test/rgw/CMakeLists.txt @@ -0,0 +1,282 @@ +if(WITH_RADOSGW_AMQP_ENDPOINT) + # amqp mock library + set(amqp_mock_src + amqp_mock.cc amqp_url.c) + add_library(amqp_mock STATIC ${amqp_mock_src}) +endif() + +if(WITH_RADOSGW_KAFKA_ENDPOINT) + # kafka stub library + set(kafka_stub_src + kafka_stub.cc) + add_library(kafka_stub STATIC ${kafka_stub_src}) +endif() + +if(WITH_RADOSGW_LUA_PACKAGES) + list(APPEND rgw_libs Boost::filesystem) +endif() + +if(WITH_JAEGER) + list(APPEND rgw_libs ${jaeger_base}) +endif() + +#unittest_rgw_bencode +add_executable(unittest_rgw_bencode test_rgw_bencode.cc) +add_ceph_unittest(unittest_rgw_bencode) +target_link_libraries(unittest_rgw_bencode ${rgw_libs}) + +# unittest_rgw_bucket_sync_cache +add_executable(unittest_rgw_bucket_sync_cache test_rgw_bucket_sync_cache.cc) +add_ceph_unittest(unittest_rgw_bucket_sync_cache) +target_link_libraries(unittest_rgw_bucket_sync_cache ${rgw_libs}) + +#unitttest_rgw_period_history +add_executable(unittest_rgw_period_history test_rgw_period_history.cc) +add_ceph_unittest(unittest_rgw_period_history) +target_link_libraries(unittest_rgw_period_history ${rgw_libs}) + +# unitttest_rgw_compression +add_executable(unittest_rgw_compression + test_rgw_compression.cc + $<TARGET_OBJECTS:unit-main>) +add_ceph_unittest(unittest_rgw_compression) +target_link_libraries(unittest_rgw_compression ${rgw_libs}) + +# unitttest_http_manager +add_executable(unittest_http_manager test_http_manager.cc) +add_ceph_unittest(unittest_http_manager) +target_link_libraries(unittest_http_manager ${rgw_libs}) + +# unitttest_rgw_reshard_wait +add_executable(unittest_rgw_reshard_wait test_rgw_reshard_wait.cc) +add_ceph_unittest(unittest_rgw_reshard_wait) +target_link_libraries(unittest_rgw_reshard_wait ${rgw_libs}) + +set(test_rgw_a_src test_rgw_common.cc) +add_library(test_rgw_a STATIC ${test_rgw_a_src}) +target_link_libraries(test_rgw_a ${rgw_libs}) + +add_executable(bench_rgw_ratelimit bench_rgw_ratelimit.cc) +target_link_libraries(bench_rgw_ratelimit ${rgw_libs}) + +add_executable(bench_rgw_ratelimit_gc bench_rgw_ratelimit_gc.cc ) +target_link_libraries(bench_rgw_ratelimit_gc ${rgw_libs}) + +add_executable(unittest_rgw_ratelimit test_rgw_ratelimit.cc $<TARGET_OBJECTS:unit-main>) +target_link_libraries(unittest_rgw_ratelimit ${rgw_libs}) +add_ceph_unittest(unittest_rgw_ratelimit) + +# ceph_test_rgw_manifest +set(test_rgw_manifest_srcs test_rgw_manifest.cc) +add_executable(ceph_test_rgw_manifest + ${test_rgw_manifest_srcs} + ) +target_link_libraries(ceph_test_rgw_manifest + test_rgw_a + cls_rgw_client + cls_lock_client + cls_refcount_client + cls_log_client + cls_timeindex_client + cls_version_client + cls_user_client + librados + global + ${BLKID_LIBRARIES} + ${CURL_LIBRARIES} + ${EXPAT_LIBRARIES} + ${CMAKE_DL_LIBS} + ${UNITTEST_LIBS} + ${CRYPTO_LIBS}) + +set(test_rgw_obj_srcs test_rgw_obj.cc) +add_executable(ceph_test_rgw_obj + ${test_rgw_obj_srcs} + ) +target_link_libraries(ceph_test_rgw_obj + test_rgw_a + cls_rgw_client + cls_lock_client + cls_refcount_client + cls_log_client + cls_version_client + cls_user_client + librados + global + ceph-common + ${CURL_LIBRARIES} + ${EXPAT_LIBRARIES} + ${CMAKE_DL_LIBS} + ${UNITTEST_LIBS} + ${CRYPTO_LIBS} + ) +install(TARGETS ceph_test_rgw_obj DESTINATION ${CMAKE_INSTALL_BINDIR}) + +set(test_rgw_crypto_srcs test_rgw_crypto.cc) +add_executable(unittest_rgw_crypto + ${test_rgw_crypto_srcs} + ) +add_ceph_unittest(unittest_rgw_crypto) +target_link_libraries(unittest_rgw_crypto + ${rgw_libs} + cls_rgw_client + cls_lock_client + cls_refcount_client + cls_log_client + cls_version_client + cls_user_client + librados + global + ${CURL_LIBRARIES} + ${EXPAT_LIBRARIES} + ${CMAKE_DL_LIBS} + ${UNITTEST_LIBS} + ${CRYPTO_LIBS} + ) + +set(test_rgw_reshard_srcs test_rgw_reshard.cc) +add_executable(unittest_rgw_reshard + ${test_rgw_reshard_srcs} + ) +add_ceph_unittest(unittest_rgw_reshard) +target_link_libraries(unittest_rgw_reshard + ${rgw_libs} + ) + +add_executable(unittest_rgw_putobj test_rgw_putobj.cc) +add_ceph_unittest(unittest_rgw_putobj) +target_link_libraries(unittest_rgw_putobj ${rgw_libs} ${UNITTEST_LIBS}) + +add_executable(ceph_test_rgw_throttle + test_rgw_throttle.cc + $<TARGET_OBJECTS:unit-main>) +target_link_libraries(ceph_test_rgw_throttle ${rgw_libs} + librados global ${UNITTEST_LIBS}) +install(TARGETS ceph_test_rgw_throttle DESTINATION ${CMAKE_INSTALL_BINDIR}) + +add_executable(unittest_rgw_iam_policy test_rgw_iam_policy.cc) +add_ceph_unittest(unittest_rgw_iam_policy) +target_link_libraries(unittest_rgw_iam_policy + ${rgw_libs} + cls_rgw_client + cls_lock_client + cls_refcount_client + cls_log_client + cls_version_client + cls_user_client + librados + global + ${CURL_LIBRARIES} + ${EXPAT_LIBRARIES} + ${CMAKE_DL_LIBS} + ${UNITTEST_LIBS} + ${CRYPTO_LIBS} + ) + +add_executable(unittest_rgw_string test_rgw_string.cc) +add_ceph_unittest(unittest_rgw_string) +target_include_directories(unittest_rgw_string + SYSTEM PRIVATE "${CMAKE_SOURCE_DIR}/src/rgw" + SYSTEM PRIVATE "${CMAKE_SOURCE_DIR}/src/rgw/store/rados") + +# unitttest_rgw_dmclock_queue +add_executable(unittest_rgw_dmclock_scheduler test_rgw_dmclock_scheduler.cc $<TARGET_OBJECTS:unit-main>) +add_ceph_unittest(unittest_rgw_dmclock_scheduler) +target_include_directories(unittest_rgw_dmclock_scheduler + SYSTEM PRIVATE "${CMAKE_SOURCE_DIR}/src/rgw" + SYSTEM PRIVATE "${CMAKE_SOURCE_DIR}/src/rgw/store/rados") + +target_link_libraries(unittest_rgw_dmclock_scheduler rgw_schedulers global ${UNITTEST_LIBS}) + +if(WITH_RADOSGW_AMQP_ENDPOINT) + add_executable(unittest_rgw_amqp test_rgw_amqp.cc) + add_ceph_unittest(unittest_rgw_amqp) + target_include_directories(unittest_rgw_amqp + SYSTEM PRIVATE "${CMAKE_SOURCE_DIR}/src/rgw" + SYSTEM PRIVATE "${CMAKE_SOURCE_DIR}/src/rgw/store/rados") + target_link_libraries(unittest_rgw_amqp ${rgw_libs}) +endif() + +# unittest_rgw_xml +add_executable(unittest_rgw_xml test_rgw_xml.cc) +add_ceph_unittest(unittest_rgw_xml) +target_include_directories(unittest_rgw_xml + SYSTEM PRIVATE "${CMAKE_SOURCE_DIR}/src/rgw" + SYSTEM PRIVATE "${CMAKE_SOURCE_DIR}/src/rgw/store/rados") +target_link_libraries(unittest_rgw_xml ${rgw_libs} ${EXPAT_LIBRARIES}) + +# unittest_rgw_lc +add_executable(unittest_rgw_lc test_rgw_lc.cc) +add_ceph_unittest(unittest_rgw_lc) +target_include_directories(unittest_rgw_lc + SYSTEM PRIVATE "${CMAKE_SOURCE_DIR}/src/rgw" + SYSTEM PRIVATE "${CMAKE_SOURCE_DIR}/src/rgw/store/rados") +target_link_libraries(unittest_rgw_lc + rgw_common ${rgw_libs} ${EXPAT_LIBRARIES}) + +# unittest_rgw_arn +add_executable(unittest_rgw_arn test_rgw_arn.cc) +add_ceph_unittest(unittest_rgw_arn) +target_include_directories(unittest_rgw_arn + SYSTEM PRIVATE "${CMAKE_SOURCE_DIR}/src/rgw" + SYSTEM PRIVATE "${CMAKE_SOURCE_DIR}/src/rgw/store/rados") +target_link_libraries(unittest_rgw_arn ${rgw_libs}) + +# unittest_rgw_kms +add_executable(unittest_rgw_kms test_rgw_kms.cc) +add_ceph_unittest(unittest_rgw_kms) +target_include_directories(unittest_rgw_kms + SYSTEM PRIVATE "${CMAKE_SOURCE_DIR}/src/rgw" + SYSTEM PRIVATE "${CMAKE_SOURCE_DIR}/src/rgw/store/rados") +target_link_libraries(unittest_rgw_kms ${rgw_libs}) + +# unittest_rgw_url +add_executable(unittest_rgw_url test_rgw_url.cc) +add_ceph_unittest(unittest_rgw_url) +target_include_directories(unittest_rgw_url + SYSTEM PRIVATE "${CMAKE_SOURCE_DIR}/src/rgw" + SYSTEM PRIVATE "${CMAKE_SOURCE_DIR}/src/rgw/store/rados") +target_link_libraries(unittest_rgw_url ${rgw_libs}) + +add_executable(ceph_test_rgw_gc_log test_rgw_gc_log.cc $<TARGET_OBJECTS:unit-main>) +target_include_directories(ceph_test_rgw_gc_log + SYSTEM PRIVATE "${CMAKE_SOURCE_DIR}/src/rgw" + SYSTEM PRIVATE "${CMAKE_SOURCE_DIR}/src/rgw/store/rados") +target_link_libraries(ceph_test_rgw_gc_log ${rgw_libs} radostest-cxx) +install(TARGETS ceph_test_rgw_gc_log DESTINATION ${CMAKE_INSTALL_BINDIR}) + +add_ceph_test(test-ceph-diff-sorted.sh + ${CMAKE_CURRENT_SOURCE_DIR}/test-ceph-diff-sorted.sh) + +# unittest_cls_fifo_legacy +add_executable(unittest_cls_fifo_legacy test_cls_fifo_legacy.cc) +target_include_directories(unittest_cls_fifo_legacy + SYSTEM PRIVATE "${CMAKE_SOURCE_DIR}/src/rgw" + SYSTEM PRIVATE "${CMAKE_SOURCE_DIR}/src/rgw/store/rados") +target_link_libraries(unittest_cls_fifo_legacy radostest-cxx ${UNITTEST_LIBS} + ${rgw_libs}) + +# unittest_log_backing +add_executable(unittest_log_backing test_log_backing.cc) +target_include_directories(unittest_log_backing + SYSTEM PRIVATE "${CMAKE_SOURCE_DIR}/src/rgw" + SYSTEM PRIVATE "${CMAKE_SOURCE_DIR}/src/rgw/store/rados") +target_link_libraries(unittest_log_backing radostest-cxx ${UNITTEST_LIBS} + ${rgw_libs}) + +add_executable(unittest_rgw_lua test_rgw_lua.cc) +add_ceph_unittest(unittest_rgw_lua) +target_include_directories(unittest_rgw_lua + SYSTEM PRIVATE "${CMAKE_SOURCE_DIR}/src/rgw" + SYSTEM PRIVATE "${CMAKE_SOURCE_DIR}/src/rgw/store/rados") +target_link_libraries(unittest_rgw_lua ${rgw_libs}) + +add_executable(radosgw-cr-test rgw_cr_test.cc) +target_link_libraries(radosgw-cr-test ${rgw_libs} librados + cls_rgw_client cls_otp_client cls_lock_client cls_refcount_client + cls_log_client cls_timeindex_client + cls_version_client cls_user_client + global ${LIB_RESOLV} + OATH::OATH + ${CURL_LIBRARIES} ${EXPAT_LIBRARIES} ${BLKID_LIBRARIES} + GTest::GTest) diff --git a/src/test/rgw/amqp_mock.cc b/src/test/rgw/amqp_mock.cc new file mode 100644 index 000000000..8674e5026 --- /dev/null +++ b/src/test/rgw/amqp_mock.cc @@ -0,0 +1,391 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#include "amqp_mock.h" +#include <amqp.h> +#include <amqp_ssl_socket.h> +#include <amqp_tcp_socket.h> +#include <string> +#include <stdarg.h> +#include <mutex> +#include <boost/lockfree/queue.hpp> +#include <openssl/ssl.h> + +namespace amqp_mock { + +std::mutex set_valid_lock; +int VALID_PORT(5672); +std::string VALID_HOST("localhost"); +std::string VALID_VHOST("/"); +std::string VALID_USER("guest"); +std::string VALID_PASSWORD("guest"); + +void set_valid_port(int port) { + std::lock_guard<std::mutex> lock(set_valid_lock); + VALID_PORT = port; +} + +void set_valid_host(const std::string& host) { + std::lock_guard<std::mutex> lock(set_valid_lock); + VALID_HOST = host; +} + +void set_valid_vhost(const std::string& vhost) { + std::lock_guard<std::mutex> lock(set_valid_lock); + VALID_VHOST = vhost; +} + +void set_valid_user(const std::string& user, const std::string& password) { + std::lock_guard<std::mutex> lock(set_valid_lock); + VALID_USER = user; + VALID_PASSWORD = password; +} + +std::atomic<unsigned> g_tag_skip = 0; +std::atomic<int> g_multiple = 0; + +void set_multiple(unsigned tag_skip) { + g_multiple = 1; + g_tag_skip = tag_skip; +} + +void reset_multiple() { + g_multiple = 0; + g_tag_skip = 0; +} + +bool FAIL_NEXT_WRITE(false); +bool FAIL_NEXT_READ(false); +bool REPLY_ACK(true); +} + +using namespace amqp_mock; + +struct amqp_connection_state_t_ { + amqp_socket_t* socket; + amqp_channel_open_ok_t* channel1; + amqp_channel_open_ok_t* channel2; + amqp_exchange_declare_ok_t* exchange; + amqp_queue_declare_ok_t* queue; + amqp_confirm_select_ok_t* confirm; + amqp_basic_consume_ok_t* consume; + bool login_called; + boost::lockfree::queue<amqp_basic_ack_t> ack_list; + boost::lockfree::queue<amqp_basic_nack_t> nack_list; + std::atomic<uint64_t> delivery_tag; + amqp_rpc_reply_t reply; + amqp_basic_ack_t ack; + amqp_basic_nack_t nack; + bool use_ssl; + // ctor + amqp_connection_state_t_() : + socket(nullptr), + channel1(nullptr), + channel2(nullptr), + exchange(nullptr), + queue(nullptr), + confirm(nullptr), + consume(nullptr), + login_called(false), + ack_list(1024), + nack_list(1024), + delivery_tag(1), + use_ssl(false) { + reply.reply_type = AMQP_RESPONSE_NONE; + } +}; + +struct amqp_socket_t_ { + void *klass; + void *ssl_ctx; + bool open_called; + // ctor + amqp_socket_t_() : klass(nullptr), ssl_ctx(nullptr), open_called(false) { + } +}; + +extern "C" { + +amqp_connection_state_t AMQP_CALL amqp_new_connection(void) { + auto s = new amqp_connection_state_t_; + return s; +} + +int amqp_destroy_connection(amqp_connection_state_t state) { + delete state->socket; + delete state->channel1; + delete state->channel2; + delete state->exchange; + delete state->queue; + delete state->confirm; + delete state->consume; + delete state; + return 0; +} + +amqp_socket_t* amqp_tcp_socket_new(amqp_connection_state_t state) { + state->socket = new amqp_socket_t; + return state->socket; +} + +amqp_socket_t* amqp_ssl_socket_new(amqp_connection_state_t state) { + state->socket = new amqp_socket_t; + state->use_ssl = true; + return state->socket; +} + +int amqp_ssl_socket_set_cacert(amqp_socket_t *self, const char *cacert) { + // do nothing + return AMQP_STATUS_OK; +} + +void amqp_ssl_socket_set_verify_peer(amqp_socket_t *self, amqp_boolean_t verify) { + // do nothing +} + +void amqp_ssl_socket_set_verify_hostname(amqp_socket_t *self, amqp_boolean_t verify) { + // do nothing +} + +#if AMQP_VERSION >= AMQP_VERSION_CODE(0, 10, 0, 1) +void* amqp_ssl_socket_get_context(amqp_socket_t *self) { + return nullptr; +} +#endif + +int SSL_CTX_set_default_verify_paths(SSL_CTX *ctx) { + return 1; +} + +int amqp_socket_open(amqp_socket_t *self, const char *host, int port) { + if (!self) { + return -1; + } + { + std::lock_guard<std::mutex> lock(set_valid_lock); + if (std::string(host) != VALID_HOST) { + return -2; + } + if (port != VALID_PORT) { + return -3; + } + } + self->open_called = true; + return 0; +} + +amqp_rpc_reply_t amqp_login( + amqp_connection_state_t state, + char const *vhost, + int channel_max, + int frame_max, + int heartbeat, + amqp_sasl_method_enum sasl_method, ...) { + state->reply.reply_type = AMQP_RESPONSE_SERVER_EXCEPTION; + state->reply.library_error = 0; + state->reply.reply.decoded = nullptr; + state->reply.reply.id = 0; + if (std::string(vhost) != VALID_VHOST) { + return state->reply; + } + if (sasl_method != AMQP_SASL_METHOD_PLAIN) { + return state->reply; + } + va_list args; + va_start(args, sasl_method); + char* user = va_arg(args, char*); + char* password = va_arg(args, char*); + va_end(args); + if (std::string(user) != VALID_USER) { + return state->reply; + } + if (std::string(password) != VALID_PASSWORD) { + return state->reply; + } + state->reply.reply_type = AMQP_RESPONSE_NORMAL; + state->login_called = true; + return state->reply; +} + +amqp_channel_open_ok_t* amqp_channel_open(amqp_connection_state_t state, amqp_channel_t channel) { + state->reply.reply_type = AMQP_RESPONSE_NORMAL; + if (state->channel1 == nullptr) { + state->channel1 = new amqp_channel_open_ok_t; + return state->channel1; + } + + state->channel2 = new amqp_channel_open_ok_t; + return state->channel2; +} + +amqp_exchange_declare_ok_t* amqp_exchange_declare( + amqp_connection_state_t state, + amqp_channel_t channel, + amqp_bytes_t exchange, + amqp_bytes_t type, + amqp_boolean_t passive, + amqp_boolean_t durable, + amqp_boolean_t auto_delete, + amqp_boolean_t internal, + amqp_table_t arguments) { + state->exchange = new amqp_exchange_declare_ok_t; + state->reply.reply_type = AMQP_RESPONSE_NORMAL; + return state->exchange; +} + +amqp_rpc_reply_t amqp_get_rpc_reply(amqp_connection_state_t state) { + return state->reply; +} + +int amqp_basic_publish( + amqp_connection_state_t state, + amqp_channel_t channel, + amqp_bytes_t exchange, + amqp_bytes_t routing_key, + amqp_boolean_t mandatory, + amqp_boolean_t immediate, + struct amqp_basic_properties_t_ const *properties, + amqp_bytes_t body) { + // make sure that all calls happened before publish + if (state->socket && state->socket->open_called && + state->login_called && state->channel1 && state->channel2 && state->exchange && + !FAIL_NEXT_WRITE) { + state->reply.reply_type = AMQP_RESPONSE_NORMAL; + if (properties) { + if (REPLY_ACK) { + state->ack_list.push(amqp_basic_ack_t{state->delivery_tag++, 0}); + } else { + state->nack_list.push(amqp_basic_nack_t{state->delivery_tag++, 0}); + } + } + return AMQP_STATUS_OK; + } + return AMQP_STATUS_CONNECTION_CLOSED; +} + +const amqp_table_t amqp_empty_table = {0, NULL}; +const amqp_bytes_t amqp_empty_bytes = {0, NULL}; + +const char* amqp_error_string2(int code) { + static const char* str = "mock error"; + return str; +} + +char const* amqp_method_name(amqp_method_number_t methodNumber) { + static const char* str = "mock method"; + return str; +} + +amqp_queue_declare_ok_t* amqp_queue_declare( + amqp_connection_state_t state, amqp_channel_t channel, amqp_bytes_t queue, + amqp_boolean_t passive, amqp_boolean_t durable, amqp_boolean_t exclusive, + amqp_boolean_t auto_delete, amqp_table_t arguments) { + state->queue = new amqp_queue_declare_ok_t; + static const char* str = "tmp-queue"; + state->queue->queue = amqp_cstring_bytes(str); + state->reply.reply_type = AMQP_RESPONSE_NORMAL; + return state->queue; +} + +amqp_confirm_select_ok_t* amqp_confirm_select(amqp_connection_state_t state, amqp_channel_t channel) { + state->confirm = new amqp_confirm_select_ok_t; + state->reply.reply_type = AMQP_RESPONSE_NORMAL; + return state->confirm; +} + +#if AMQP_VERSION >= AMQP_VERSION_CODE(0, 11, 0, 1) +int amqp_simple_wait_frame_noblock(amqp_connection_state_t state, amqp_frame_t *decoded_frame, const struct timeval* tv) { +#else +int amqp_simple_wait_frame_noblock(amqp_connection_state_t state, amqp_frame_t *decoded_frame, struct timeval* tv) { +#endif + if (state->socket && state->socket->open_called && + state->login_called && state->channel1 && state->channel2 && state->exchange && + state->queue && state->consume && state->confirm && !FAIL_NEXT_READ) { + // "wait" for queue + usleep(tv->tv_sec*1000000+tv->tv_usec); + // read from queue + if (g_multiple) { + // pop multiples and reply once at the end + for (auto i = 0U; i < g_tag_skip; ++i) { + if (REPLY_ACK && !state->ack_list.pop(state->ack)) { + // queue is empty + return AMQP_STATUS_TIMEOUT; + } else if (!REPLY_ACK && !state->nack_list.pop(state->nack)) { + // queue is empty + return AMQP_STATUS_TIMEOUT; + } + } + if (REPLY_ACK) { + state->ack.multiple = g_multiple; + decoded_frame->payload.method.id = AMQP_BASIC_ACK_METHOD; + decoded_frame->payload.method.decoded = &state->ack; + } else { + state->nack.multiple = g_multiple; + decoded_frame->payload.method.id = AMQP_BASIC_NACK_METHOD; + decoded_frame->payload.method.decoded = &state->nack; + } + decoded_frame->frame_type = AMQP_FRAME_METHOD; + state->reply.reply_type = AMQP_RESPONSE_NORMAL; + reset_multiple(); + return AMQP_STATUS_OK; + } + // pop replies one by one + if (REPLY_ACK && state->ack_list.pop(state->ack)) { + state->ack.multiple = g_multiple; + decoded_frame->frame_type = AMQP_FRAME_METHOD; + decoded_frame->payload.method.id = AMQP_BASIC_ACK_METHOD; + decoded_frame->payload.method.decoded = &state->ack; + state->reply.reply_type = AMQP_RESPONSE_NORMAL; + return AMQP_STATUS_OK; + } else if (!REPLY_ACK && state->nack_list.pop(state->nack)) { + state->nack.multiple = g_multiple; + decoded_frame->frame_type = AMQP_FRAME_METHOD; + decoded_frame->payload.method.id = AMQP_BASIC_NACK_METHOD; + decoded_frame->payload.method.decoded = &state->nack; + state->reply.reply_type = AMQP_RESPONSE_NORMAL; + return AMQP_STATUS_OK; + } else { + // queue is empty + return AMQP_STATUS_TIMEOUT; + } + } + return AMQP_STATUS_CONNECTION_CLOSED; +} + +amqp_basic_consume_ok_t* amqp_basic_consume( + amqp_connection_state_t state, amqp_channel_t channel, amqp_bytes_t queue, + amqp_bytes_t consumer_tag, amqp_boolean_t no_local, amqp_boolean_t no_ack, + amqp_boolean_t exclusive, amqp_table_t arguments) { + state->consume = new amqp_basic_consume_ok_t; + state->reply.reply_type = AMQP_RESPONSE_NORMAL; + return state->consume; +} + +} // extern "C" + +// amqp_parse_url() is linked via the actual rabbitmq-c library code. see: amqp_url.c + +// following functions are the actual implementation copied from rabbitmq-c library + +#include <string.h> + +amqp_bytes_t amqp_cstring_bytes(const char* cstr) { + amqp_bytes_t result; + result.len = strlen(cstr); + result.bytes = (void *)cstr; + return result; +} + +void amqp_bytes_free(amqp_bytes_t bytes) { free(bytes.bytes); } + +amqp_bytes_t amqp_bytes_malloc_dup(amqp_bytes_t src) { + amqp_bytes_t result; + result.len = src.len; + result.bytes = malloc(src.len); + if (result.bytes != NULL) { + memcpy(result.bytes, src.bytes, src.len); + } + return result; +} + + diff --git a/src/test/rgw/amqp_mock.h b/src/test/rgw/amqp_mock.h new file mode 100644 index 000000000..94fdfdddc --- /dev/null +++ b/src/test/rgw/amqp_mock.h @@ -0,0 +1,19 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab +#pragma once + +#include <string> + +namespace amqp_mock { +void set_valid_port(int port); +void set_valid_host(const std::string& host); +void set_valid_vhost(const std::string& vhost); +void set_valid_user(const std::string& user, const std::string& password); +void set_multiple(unsigned tag); +void reset_multiple(); + +extern bool FAIL_NEXT_WRITE; // default "false" +extern bool FAIL_NEXT_READ; // default "false" +extern bool REPLY_ACK; // default "true" +} + diff --git a/src/test/rgw/amqp_url.c b/src/test/rgw/amqp_url.c new file mode 100644 index 000000000..520e95c69 --- /dev/null +++ b/src/test/rgw/amqp_url.c @@ -0,0 +1,221 @@ +/* + * ***** BEGIN LICENSE BLOCK ***** + * Version: MIT + * + * Portions created by Alan Antonuk are Copyright (c) 2012-2013 + * Alan Antonuk. All Rights Reserved. + * + * Portions created by VMware are Copyright (c) 2007-2012 VMware, Inc. + * All Rights Reserved. + * + * Portions created by Tony Garnock-Jones are Copyright (c) 2009-2010 + * VMware, Inc. and Tony Garnock-Jones. All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person + * obtaining a copy of this software and associated documentation + * files (the "Software"), to deal in the Software without + * restriction, including without limitation the rights to use, copy, + * modify, merge, publish, distribute, sublicense, and/or sell copies + * of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be + * included in all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * ***** END LICENSE BLOCK ***** + */ + +// this version of the file is slightly modified from the original one +// as it is only used to mock amqp libraries + +#ifdef _MSC_VER +#define _CRT_SECURE_NO_WARNINGS +#endif + +#include "amqp.h" +#include <limits.h> +#include <stdint.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> + +void amqp_default_connection_info(struct amqp_connection_info *ci) { + /* Apply defaults */ + ci->user = "guest"; + ci->password = "guest"; + ci->host = "localhost"; + ci->port = 5672; + ci->vhost = "/"; + ci->ssl = 0; +} + +/* Scan for the next delimiter, handling percent-encodings on the way. */ +static char find_delim(char **pp, int colon_and_at_sign_are_delims) { + char *from = *pp; + char *to = from; + + for (;;) { + char ch = *from++; + + switch (ch) { + case ':': + case '@': + if (!colon_and_at_sign_are_delims) { + *to++ = ch; + break; + } + + /* fall through */ + case 0: + case '/': + case '?': + case '#': + case '[': + case ']': + *to = 0; + *pp = from; + return ch; + + case '%': { + unsigned int val; + int chars; + int res = sscanf(from, "%2x%n", &val, &chars); + + if (res == EOF || res < 1 || chars != 2 || val > CHAR_MAX) + /* Return a surprising delimiter to + force an error. */ + { + return '%'; + } + + *to++ = (char)val; + from += 2; + break; + } + + default: + *to++ = ch; + break; + } + } +} + +/* Parse an AMQP URL into its component parts. */ +int amqp_parse_url(char *url, struct amqp_connection_info *parsed) { + int res = AMQP_STATUS_BAD_URL; + char delim; + char *start; + char *host; + char *port = NULL; + + amqp_default_connection_info(parsed); + + parsed->port = 5672; + parsed->ssl = 0; + /* check the prefix */ + if (!strncmp(url, "amqp://", 7)) { + /* do nothing */ + } else if (!strncmp(url, "amqps://", 8)) { + parsed->port = 5671; + parsed->ssl = 1; + } else { + goto out; + } + + host = start = url += (parsed->ssl ? 8 : 7); + delim = find_delim(&url, 1); + + if (delim == ':') { + /* The colon could be introducing the port or the + password part of the userinfo. We don't know yet, + so stash the preceding component. */ + port = start = url; + delim = find_delim(&url, 1); + } + + if (delim == '@') { + /* What might have been the host and port were in fact + the username and password */ + parsed->user = host; + if (port) { + parsed->password = port; + } + + port = NULL; + host = start = url; + delim = find_delim(&url, 1); + } + + if (delim == '[') { + /* IPv6 address. The bracket should be the first + character in the host. */ + if (host != start || *host != 0) { + goto out; + } + + start = url; + delim = find_delim(&url, 0); + + if (delim != ']') { + goto out; + } + + parsed->host = start; + start = url; + delim = find_delim(&url, 1); + + /* Closing bracket should be the last character in the + host. */ + if (*start != 0) { + goto out; + } + } else { + /* If we haven't seen the host yet, this is it. */ + if (*host != 0) { + parsed->host = host; + } + } + + if (delim == ':') { + port = start = url; + delim = find_delim(&url, 1); + } + + if (port) { + char *end; + long portnum = strtol(port, &end, 10); + + if (port == end || *end != 0 || portnum < 0 || portnum > 65535) { + goto out; + } + + parsed->port = portnum; + } + + if (delim == '/') { + start = url; + delim = find_delim(&url, 1); + + if (delim != 0) { + goto out; + } + + parsed->vhost = start; + res = AMQP_STATUS_OK; + } else if (delim == 0) { + res = AMQP_STATUS_OK; + } + +/* Any other delimiter is bad, and we will return AMQP_STATUS_BAD_AMQP_URL. */ + +out: + return res; +} diff --git a/src/test/rgw/bench_rgw_ratelimit.cc b/src/test/rgw/bench_rgw_ratelimit.cc new file mode 100644 index 000000000..2bf7753ad --- /dev/null +++ b/src/test/rgw/bench_rgw_ratelimit.cc @@ -0,0 +1,247 @@ +#include "rgw_ratelimit.h" +#include "rgw_common.h" +#include "random" +#include <cstdlib> +#include <string> +#include <boost/asio.hpp> +#include <spawn/spawn.hpp> +#include <boost/asio/steady_timer.hpp> +#include <chrono> +#include <mutex> +#include <unordered_map> +#include <atomic> +#include <boost/program_options.hpp> + + +using Executor = boost::asio::io_context::executor_type; +std::uniform_int_distribution<unsigned int> dist(0, 1); +std::random_device rd; +std::default_random_engine rng{rd()}; +std::uniform_int_distribution<unsigned long long> disttenant(2, 100000000); +struct client_info { + uint64_t accepted = 0; + uint64_t rejected = 0; + uint64_t ops = 0; + uint64_t bytes = 0; + uint64_t num_retries = 0; + std::string tenant; +}; + +struct parameters { + int64_t req_size = 1; + int64_t backend_bandwidth = 1; + size_t wait_between_retries_ms = 1; + int num_clients = 1; +}; +std::shared_ptr<std::vector<client_info>> ds = std::make_shared<std::vector<client_info>>(std::vector<client_info>()); + +std::string method[2] = {"PUT", "GET"}; +void simulate_transfer(client_info& it, const RGWRateLimitInfo* info, std::shared_ptr<RateLimiter> ratelimit, const parameters& params, spawn::yield_context& yield, boost::asio::io_context& ioctx) +{ + auto dout = DoutPrefix(g_ceph_context, ceph_subsys_rgw, "rate limiter: "); + boost::asio::steady_timer timer(ioctx); + int rw = 0; // will always use PUT method as there is no difference + std::string methodop(method[rw]); + auto req_size = params.req_size; + auto backend_bandwidth = params.backend_bandwidth; +// the 4 * 1024 * 1024 is the RGW default we are sending in a typical environment + while (req_size) { + if (req_size <= backend_bandwidth) { + while (req_size > 0) { + if(req_size > 4*1024*1024) { + ratelimit->decrease_bytes(methodop.c_str(),it.tenant, 4*1024*1024, info); + it.bytes += 4*1024*1024; + req_size = req_size - 4*1024*1024; + } + else { + ratelimit->decrease_bytes(methodop.c_str(),it.tenant, req_size, info); + req_size = 0; + } + } + } else { + int64_t total_bytes = 0; + while (req_size > 0) { + if (req_size >= 4*1024*1024) { + if (total_bytes >= backend_bandwidth) + { + timer.expires_after(std::chrono::seconds(1)); + timer.async_wait(yield); + total_bytes = 0; + } + ratelimit->decrease_bytes(methodop.c_str(),it.tenant, 4*1024*1024, info); + it.bytes += 4*1024*1024; + req_size = req_size - 4*1024*1024; + total_bytes += 4*1024*1024; + } + else { + ratelimit->decrease_bytes(methodop.c_str(),it.tenant, req_size, info); + it.bytes += req_size; + total_bytes += req_size; + req_size = 0; + } + } + } + } +} +bool simulate_request(client_info& it, const RGWRateLimitInfo& info, std::shared_ptr<RateLimiter> ratelimit) +{ + boost::asio::io_context context; + auto time = ceph::coarse_real_clock::now(); + int rw = 0; // will always use PUT method as there is no different + std::string methodop = method[rw]; + auto dout = DoutPrefix(g_ceph_context, ceph_subsys_rgw, "rate limiter: "); + bool to_fail = ratelimit->should_rate_limit(methodop.c_str(), it.tenant, time, &info); + if(to_fail) + { + it.rejected++; + it.ops++; + return true; + } + it.accepted++; + return false; +} +void simulate_client(client_info& it, const RGWRateLimitInfo& info, std::shared_ptr<RateLimiter> ratelimit, const parameters& params, spawn::yield_context& ctx, bool& to_run, boost::asio::io_context& ioctx) +{ + for (;;) + { + bool to_retry = simulate_request(it, info, ratelimit); + while (to_retry && to_run) + { + if (params.wait_between_retries_ms) + { + boost::asio::steady_timer timer(ioctx); + timer.expires_after(std::chrono::milliseconds(params.wait_between_retries_ms)); + timer.async_wait(ctx); + } + to_retry = simulate_request(it, info, ratelimit); + } + if (!to_run) + { + return; + } + simulate_transfer(it, &info, ratelimit, params, ctx, ioctx); + } +} +void simulate_clients(boost::asio::io_context& context, std::string tenant, const RGWRateLimitInfo& info, std::shared_ptr<RateLimiter> ratelimit, const parameters& params, bool& to_run) +{ + for (int i = 0; i < params.num_clients; i++) + { + auto& it = ds->emplace_back(client_info()); + it.tenant = tenant; + int x = ds->size() - 1; + spawn::spawn(context, + [&to_run ,x, ratelimit, info, params, &context](spawn::yield_context ctx) + { + auto& it = ds.get()->operator[](x); + simulate_client(it, info, ratelimit, params, ctx, to_run, context); + }); + } +} +int main(int argc, char **argv) +{ + int num_ratelimit_classes = 1; + int64_t ops_limit = 1; + int64_t bw_limit = 1; + int thread_count = 512; + int runtime = 60; + parameters params; + try + { + using namespace boost::program_options; + options_description desc{"Options"}; + desc.add_options() + ("help,h", "Help screen") + ("num_ratelimit_classes", value<int>()->default_value(1), "how many ratelimit tenants") + ("request_size", value<int64_t>()->default_value(1), "what is the request size we are testing if 0, it will be randomized") + ("backend_bandwidth", value<int64_t>()->default_value(1), "what is the backend bandwidth, so there will be wait between decrease_bytes") + ("wait_between_retries_ms", value<size_t>()->default_value(1), "time in seconds to wait between retries") + ("ops_limit", value<int64_t>()->default_value(1), "ops limit for the tenants") + ("bw_limit", value<int64_t>()->default_value(1), "bytes per second limit") + ("threads", value<int>()->default_value(512), "server's threads count") + ("runtime", value<int>()->default_value(60), "For how many seconds the test will run") + ("num_clients", value<int>()->default_value(1), "number of clients per tenant to run"); + variables_map vm; + store(parse_command_line(argc, argv, desc), vm); + if (vm.count("help")) { + std::cout << desc << std::endl; + return EXIT_SUCCESS; + } + num_ratelimit_classes = vm["num_ratelimit_classes"].as<int>(); + params.req_size = vm["request_size"].as<int64_t>(); + params.backend_bandwidth = vm["backend_bandwidth"].as<int64_t>(); + params.wait_between_retries_ms = vm["wait_between_retries_ms"].as<size_t>(); + params.num_clients = vm["num_clients"].as<int>(); + ops_limit = vm["ops_limit"].as<int64_t>(); + bw_limit = vm["bw_limit"].as<int64_t>(); + thread_count = vm["threads"].as<int>(); + runtime = vm["runtime"].as<int>(); + } + catch (const boost::program_options::error &ex) + { + std::cerr << ex.what() << std::endl; + return EXIT_FAILURE; + } + RGWRateLimitInfo info; + info.enabled = true; + info.max_read_bytes = bw_limit; + info.max_write_bytes = bw_limit; + info.max_read_ops = ops_limit; + info.max_write_ops = ops_limit; + std::unique_ptr<CephContext> cct = std::make_unique<CephContext>(CEPH_ENTITY_TYPE_ANY); + if (!g_ceph_context) + { + g_ceph_context = cct.get(); + } + std::shared_ptr<ActiveRateLimiter> ratelimit(new ActiveRateLimiter(g_ceph_context)); + ratelimit->start(); + std::vector<std::thread> threads; + using Executor = boost::asio::io_context::executor_type; + std::optional<boost::asio::executor_work_guard<Executor>> work; + threads.reserve(thread_count); + boost::asio::io_context context; + boost::asio::io_context stopme; + work.emplace(boost::asio::make_work_guard(context)); + // server execution + for (int i = 0; i < thread_count; i++) { + threads.emplace_back([&]() noexcept { + context.run(); + }); + } + //client execution + bool to_run = true; + ds->reserve(num_ratelimit_classes*params.num_clients); + for (int i = 0; i < num_ratelimit_classes; i++) + { + unsigned long long tenantid = disttenant(rng); + std::string tenantuser = "uuser" + std::to_string(tenantid); + simulate_clients(context, tenantuser, info, ratelimit->get_active(), params, to_run); + } + boost::asio::steady_timer timer_runtime(stopme); + timer_runtime.expires_after(std::chrono::seconds(runtime)); + timer_runtime.wait(); + work.reset(); + context.stop(); + to_run = false; + + for (auto& i : threads) + { + i.join(); + } + std::unordered_map<std::string,client_info> metrics_by_tenant; + for(auto& i : *ds.get()) + { + auto it = metrics_by_tenant.emplace(i.tenant, client_info()).first; + std::cout << i.accepted << std::endl; + it->second.accepted += i.accepted; + it->second.rejected += i.rejected; + } + // TODO sum the results by tenant + for(auto& i : metrics_by_tenant) + { + std::cout << "Tenant is: " << i.first << std::endl; + std::cout << "Simulator finished accepted sum : " << i.second.accepted << std::endl; + std::cout << "Simulator finished rejected sum : " << i.second.rejected << std::endl; + } + + return 0; +} diff --git a/src/test/rgw/bench_rgw_ratelimit_gc.cc b/src/test/rgw/bench_rgw_ratelimit_gc.cc new file mode 100644 index 000000000..ae422e1da --- /dev/null +++ b/src/test/rgw/bench_rgw_ratelimit_gc.cc @@ -0,0 +1,52 @@ +#include "rgw_ratelimit.h" +#include "rgw_common.h" +#include "random" +#include <cstdlib> +#include <string> +#include <chrono> +#include <boost/program_options.hpp> +int main(int argc, char **argv) +{ + int num_qos_classes = 1; + try + { + using namespace boost::program_options; + options_description desc{"Options"}; + desc.add_options() + ("help,h", "Help screen") + ("num_qos_classes", value<int>()->default_value(1), "how many qos tenants"); + variables_map vm; + store(parse_command_line(argc, argv, desc), vm); + if (vm.count("help")) { + std::cout << desc << std::endl; + return EXIT_SUCCESS; + } + num_qos_classes = vm["num_qos_classes"].as<int>(); + } + catch (const boost::program_options::error &ex) + { + std::cerr << ex.what() << std::endl; + return EXIT_FAILURE; + } + RGWRateLimitInfo info; + info.enabled = true; + info.max_read_bytes = 0; + info.max_write_bytes = 0; + info.max_read_ops = 0; + info.max_write_ops = 0; + std::unique_ptr<CephContext> cct = std::make_unique<CephContext>(CEPH_ENTITY_TYPE_ANY); + if (!g_ceph_context) + { + g_ceph_context = cct.get(); + } + std::shared_ptr<ActiveRateLimiter> ratelimit(new ActiveRateLimiter(g_ceph_context)); + ratelimit->start(); + auto dout = DoutPrefix(g_ceph_context, ceph_subsys_rgw, "rate limiter: "); + for(int i = 0; i < num_qos_classes; i++) + { + std::string tenant = "uuser" + std::to_string(i); + auto time = ceph::coarse_real_clock::now(); + ratelimit->get_active()->should_rate_limit("PUT", tenant, time, &info); + } + +} diff --git a/src/test/rgw/bucket_notification/README.rst b/src/test/rgw/bucket_notification/README.rst new file mode 100644 index 000000000..9686bef71 --- /dev/null +++ b/src/test/rgw/bucket_notification/README.rst @@ -0,0 +1,96 @@ +========================== + Bucket Notification Tests +========================== + +You will need to use the sample configuration file named ``bntests.conf.SAMPLE`` +that has been provided at ``/path/to/ceph/src/test/rgw/bucket_notification/``. You can also copy this file to the directory where you are +running the tests and modify it if needed. This file can be used to run the bucket notification tests on a Ceph cluster started +with vstart. +For the tests covering Kafka and RabbitMQ security, the RGW will need to accept use/password without TLS connection between the client and the RGW. +So, the cluster will have to be started with the following ``rgw_allow_notification_secrets_in_cleartext`` parameter set to ``true``. +For example:: + + MON=1 OSD=1 MDS=0 MGR=1 RGW=1 ../src/vstart.sh -n -d -o "rgw_allow_notification_secrets_in_cleartext=true" + +=========== +Kafka Tests +=========== + +You also need to install Kafka which can be downloaded from: https://kafka.apache.org/downloads + +To test Kafka security, you should first run the ``kafka-security.sh`` script inside the Kafka directory. + +Then edit the Kafka server properties file (``/path/to/kafka/config/server.properties``) +to have the following lines:: + + listeners=PLAINTEXT://localhost:9092,SSL://localhost:9093,SASL_SSL://localhost:9094 + ssl.keystore.location=/home/ylifshit/kafka-3.3.1-src/server.keystore.jks + ssl.keystore.password=mypassword + ssl.key.password=mypassword + ssl.truststore.location=/home/ylifshit/kafka-3.3.1-src/server.truststore.jks + ssl.truststore.password=mypassword + sasl.enabled.mechanisms=PLAIN + listener.name.sasl_ssl.plain.sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required \ + username="alice" \ + password="alice-secret" \ + user_alice="alice-secret"; + +After following the above steps, start the Zookeeper and Kafka services. +For starting Zookeeper service run:: + + bin/zookeeper-server-start.sh config/zookeeper.properties + +and then start the Kafka service:: + + bin/kafka-server-start.sh config/server.properties + +If you want to run Zookeeper and Kafka services in the background add ``-daemon`` at the end of the command like:: + + bin/zookeeper-server-start.sh -daemon config/zookeeper.properties + +and:: + + bin/kafka-server-start.sh -daemon config/server.properties + +After running vstart, Zookeeper, and Kafka services you're ready to run the Kafka tests:: + + BNTESTS_CONF=bntests.conf python -m nose -s /path/to/ceph/src/test/rgw/bucket_notification/test_bn.py -v -a 'kafka_test' + +To run the Kafka security test, you also need to provide the test with the location of the Kafka directory:: + + KAFKA_DIR=/path/to/kafkaBNTESTS_CONF=bntests.conf python -m nose -s /path/to/ceph/src/test/rgw/bucket_notification/test_bn.py -v -a 'kafka_ssl_test' + +============== +RabbitMQ Tests +============== + +You need to install RabbitMQ in the following way:: + + sudo dnf install rabbitmq-server + +Then you need to run the following command:: + + sudo chkconfig rabbitmq-server on + +Finally, to start the RabbitMQ server you need to run the following command:: + + sudo /sbin/service rabbitmq-server start + +To confirm that the RabbitMQ server is running you can run the following command to check the status of the server:: + + sudo /sbin/service rabbitmq-server status + +After running vstart and RabbitMQ server you're ready to run the AMQP tests:: + + BNTESTS_CONF=bntests.conf python -m nose -s /path/to/ceph/src/test/rgw/bucket_notification/test_bn.py -v -a 'amqp_test' + +After running the tests you need to stop the vstart cluster (``/path/to/ceph/src/stop.sh``) and the RabbitMQ server by running the following command:: + + sudo /sbin/service rabbitmq-server stop + +To run the RabbitMQ SSL security tests use the following:: + + BNTESTS_CONF=bntests.conf python -m nose -s /path/to/ceph/src/test/rgw/bucket_notification/test_bn.py -v -a 'amqp_ssl_test' + +During these tests, the test script will restart the RabbitMQ server with the correct security configuration (``sudo`` privileges will be needed). + diff --git a/src/test/rgw/bucket_notification/__init__.py b/src/test/rgw/bucket_notification/__init__.py new file mode 100644 index 000000000..6785fce92 --- /dev/null +++ b/src/test/rgw/bucket_notification/__init__.py @@ -0,0 +1,48 @@ +import configparser +import os + +def setup(): + cfg = configparser.RawConfigParser() + try: + path = os.environ['BNTESTS_CONF'] + except KeyError: + raise RuntimeError( + 'To run tests, point environment ' + + 'variable BNTESTS_CONF to a config file.', + ) + cfg.read(path) + + if not cfg.defaults(): + raise RuntimeError('Your config file is missing the DEFAULT section!') + if not cfg.has_section("s3 main"): + raise RuntimeError('Your config file is missing the "s3 main" section!') + + defaults = cfg.defaults() + + global default_host + default_host = defaults.get("host") + + global default_port + default_port = int(defaults.get("port")) + + global main_access_key + main_access_key = cfg.get('s3 main',"access_key") + + global main_secret_key + main_secret_key = cfg.get('s3 main',"secret_key") + +def get_config_host(): + global default_host + return default_host + +def get_config_port(): + global default_port + return default_port + +def get_access_key(): + global main_access_key + return main_access_key + +def get_secret_key(): + global main_secret_key + return main_secret_key diff --git a/src/test/rgw/bucket_notification/api.py b/src/test/rgw/bucket_notification/api.py new file mode 100644 index 000000000..fe38576fb --- /dev/null +++ b/src/test/rgw/bucket_notification/api.py @@ -0,0 +1,234 @@ +import logging +import ssl +import urllib +import hmac +import hashlib +import base64 +import xmltodict +from http import client as http_client +from urllib import parse as urlparse +from time import gmtime, strftime +import boto3 +from botocore.client import Config +import os +import subprocess + +log = logging.getLogger('bucket_notification.tests') + +NO_HTTP_BODY = '' + +def put_object_tagging(conn, bucket_name, key, tags): + client = boto3.client('s3', + endpoint_url='http://'+conn.host+':'+str(conn.port), + aws_access_key_id=conn.aws_access_key_id, + aws_secret_access_key=conn.aws_secret_access_key) + return client.put_object(Body='aaaaaaaaaaa', Bucket=bucket_name, Key=key, Tagging=tags) + +def make_request(conn, method, resource, parameters=None, sign_parameters=False, extra_parameters=None): + """generic request sending to pubsub radogw + should cover: topics, notificatios and subscriptions + """ + url_params = '' + if parameters is not None: + url_params = urlparse.urlencode(parameters) + # remove 'None' from keys with no values + url_params = url_params.replace('=None', '') + url_params = '?' + url_params + if extra_parameters is not None: + url_params = url_params + '&' + extra_parameters + string_date = strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime()) + string_to_sign = method + '\n\n\n' + string_date + '\n' + resource + if sign_parameters: + string_to_sign += url_params + signature = base64.b64encode(hmac.new(conn.aws_secret_access_key.encode('utf-8'), + string_to_sign.encode('utf-8'), + hashlib.sha1).digest()).decode('ascii') + headers = {'Authorization': 'AWS '+conn.aws_access_key_id+':'+signature, + 'Date': string_date, + 'Host': conn.host+':'+str(conn.port)} + http_conn = http_client.HTTPConnection(conn.host, conn.port) + if log.getEffectiveLevel() <= 10: + http_conn.set_debuglevel(5) + http_conn.request(method, resource+url_params, NO_HTTP_BODY, headers) + response = http_conn.getresponse() + data = response.read() + status = response.status + http_conn.close() + return data.decode('utf-8'), status + + +def delete_all_objects(conn, bucket_name): + client = boto3.client('s3', + endpoint_url='http://'+conn.host+':'+str(conn.port), + aws_access_key_id=conn.aws_access_key_id, + aws_secret_access_key=conn.aws_secret_access_key) + + objects = [] + for key in client.list_objects(Bucket=bucket_name)['Contents']: + objects.append({'Key': key['Key']}) + # delete objects from the bucket + response = client.delete_objects(Bucket=bucket_name, + Delete={'Objects': objects}) + + +class PSTopicS3: + """class to set/list/get/delete a topic + POST ?Action=CreateTopic&Name=<topic name>[&OpaqueData=<data>[&push-endpoint=<endpoint>&[<arg1>=<value1>...]]] + POST ?Action=ListTopics + POST ?Action=GetTopic&TopicArn=<topic-arn> + POST ?Action=DeleteTopic&TopicArn=<topic-arn> + """ + def __init__(self, conn, topic_name, region, endpoint_args=None, opaque_data=None): + self.conn = conn + self.topic_name = topic_name.strip() + assert self.topic_name + self.topic_arn = '' + self.attributes = {} + if endpoint_args is not None: + self.attributes = {nvp[0] : nvp[1] for nvp in urlparse.parse_qsl(endpoint_args, keep_blank_values=True)} + if opaque_data is not None: + self.attributes['OpaqueData'] = opaque_data + protocol = 'https' if conn.is_secure else 'http' + self.client = boto3.client('sns', + endpoint_url=protocol+'://'+conn.host+':'+str(conn.port), + aws_access_key_id=conn.aws_access_key_id, + aws_secret_access_key=conn.aws_secret_access_key, + region_name=region, + verify='./cert.pem') + + def get_config(self): + """get topic info""" + parameters = {'Action': 'GetTopic', 'TopicArn': self.topic_arn} + body = urlparse.urlencode(parameters) + string_date = strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime()) + content_type = 'application/x-www-form-urlencoded; charset=utf-8' + resource = '/' + method = 'POST' + string_to_sign = method + '\n\n' + content_type + '\n' + string_date + '\n' + resource + log.debug('StringTosign: %s', string_to_sign) + signature = base64.b64encode(hmac.new(self.conn.aws_secret_access_key.encode('utf-8'), + string_to_sign.encode('utf-8'), + hashlib.sha1).digest()).decode('ascii') + headers = {'Authorization': 'AWS '+self.conn.aws_access_key_id+':'+signature, + 'Date': string_date, + 'Host': self.conn.host+':'+str(self.conn.port), + 'Content-Type': content_type} + if self.conn.is_secure: + http_conn = http_client.HTTPSConnection(self.conn.host, self.conn.port, + context=ssl.create_default_context(cafile='./cert.pem')) + else: + http_conn = http_client.HTTPConnection(self.conn.host, self.conn.port) + http_conn.request(method, resource, body, headers) + response = http_conn.getresponse() + data = response.read() + status = response.status + http_conn.close() + dict_response = xmltodict.parse(data) + return dict_response, status + + def set_config(self): + """set topic""" + result = self.client.create_topic(Name=self.topic_name, Attributes=self.attributes) + self.topic_arn = result['TopicArn'] + return self.topic_arn + + def del_config(self, topic_arn=None): + """delete topic""" + result = self.client.delete_topic(TopicArn=(topic_arn if topic_arn is not None else self.topic_arn)) + return result['ResponseMetadata']['HTTPStatusCode'] + + def get_list(self): + """list all topics""" + # note that boto3 supports list_topics(), however, the result only show ARNs + parameters = {'Action': 'ListTopics'} + body = urlparse.urlencode(parameters) + string_date = strftime("%a, %d %b %Y %H:%M:%S +0000", gmtime()) + content_type = 'application/x-www-form-urlencoded; charset=utf-8' + resource = '/' + method = 'POST' + string_to_sign = method + '\n\n' + content_type + '\n' + string_date + '\n' + resource + log.debug('StringTosign: %s', string_to_sign) + signature = base64.b64encode(hmac.new(self.conn.aws_secret_access_key.encode('utf-8'), + string_to_sign.encode('utf-8'), + hashlib.sha1).digest()).decode('ascii') + headers = {'Authorization': 'AWS '+self.conn.aws_access_key_id+':'+signature, + 'Date': string_date, + 'Host': self.conn.host+':'+str(self.conn.port), + 'Content-Type': content_type} + if self.conn.is_secure: + http_conn = http_client.HTTPSConnection(self.conn.host, self.conn.port, + context=ssl.create_default_context(cafile='./cert.pem')) + else: + http_conn = http_client.HTTPConnection(self.conn.host, self.conn.port) + http_conn.request(method, resource, body, headers) + response = http_conn.getresponse() + data = response.read() + status = response.status + http_conn.close() + dict_response = xmltodict.parse(data) + return dict_response, status + +class PSNotificationS3: + """class to set/get/delete an S3 notification + PUT /<bucket>?notification + GET /<bucket>?notification[=<notification>] + DELETE /<bucket>?notification[=<notification>] + """ + def __init__(self, conn, bucket_name, topic_conf_list): + self.conn = conn + assert bucket_name.strip() + self.bucket_name = bucket_name + self.resource = '/'+bucket_name + self.topic_conf_list = topic_conf_list + self.client = boto3.client('s3', + endpoint_url='http://'+conn.host+':'+str(conn.port), + aws_access_key_id=conn.aws_access_key_id, + aws_secret_access_key=conn.aws_secret_access_key) + + def send_request(self, method, parameters=None): + """send request to radosgw""" + return make_request(self.conn, method, self.resource, + parameters=parameters, sign_parameters=True) + + def get_config(self, notification=None): + """get notification info""" + parameters = None + if notification is None: + response = self.client.get_bucket_notification_configuration(Bucket=self.bucket_name) + status = response['ResponseMetadata']['HTTPStatusCode'] + return response, status + parameters = {'notification': notification} + response, status = self.send_request('GET', parameters=parameters) + dict_response = xmltodict.parse(response) + return dict_response, status + + def set_config(self): + """set notification""" + response = self.client.put_bucket_notification_configuration(Bucket=self.bucket_name, + NotificationConfiguration={ + 'TopicConfigurations': self.topic_conf_list + }) + status = response['ResponseMetadata']['HTTPStatusCode'] + return response, status + + def del_config(self, notification=None): + """delete notification""" + parameters = {'notification': notification} + + return self.send_request('DELETE', parameters) + + +test_path = os.path.normpath(os.path.dirname(os.path.realpath(__file__))) + '/../' + +def bash(cmd, **kwargs): + log.debug('running command: %s', ' '.join(cmd)) + kwargs['stdout'] = subprocess.PIPE + process = subprocess.Popen(cmd, **kwargs) + s = process.communicate()[0].decode('utf-8') + return (s, process.returncode) + +def admin(args, **kwargs): + """ radosgw-admin command """ + cmd = [test_path + 'test-rgw-call.sh', 'call_rgw_admin', 'noname'] + args + return bash(cmd, **kwargs) + diff --git a/src/test/rgw/bucket_notification/bntests.conf.SAMPLE b/src/test/rgw/bucket_notification/bntests.conf.SAMPLE new file mode 100644 index 000000000..eb3291daf --- /dev/null +++ b/src/test/rgw/bucket_notification/bntests.conf.SAMPLE @@ -0,0 +1,10 @@ +[DEFAULT] +port = 8000 +host = localhost + +[s3 main] +access_key = 0555b35654ad1656d804 +secret_key = h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q== +display_name = M. Tester +user_id = testid +email = tester@ceph.com diff --git a/src/test/rgw/bucket_notification/bootstrap b/src/test/rgw/bucket_notification/bootstrap new file mode 100755 index 000000000..4d4a5a748 --- /dev/null +++ b/src/test/rgw/bucket_notification/bootstrap @@ -0,0 +1,45 @@ +#!/bin/sh +set -e + +if [ -f /etc/debian_version ]; then + for package in python3-pip python3-dev python3-xmltodict python3-pika libevent-dev libxml2-dev libxslt-dev zlib1g-dev; do + if [ "$(dpkg --status -- $package 2>/dev/null|sed -n 's/^Status: //p')" != "install ok installed" ]; then + # add a space after old values + missing="${missing:+$missing }$package" + fi + done + if [ -n "$missing" ]; then + echo "$0: missing required DEB packages. Installing via sudo." 1>&2 + sudo apt-get -y install $missing + fi +fi +if [ -f /etc/redhat-release ]; then + for package in python3-pip python3-devel python3-xmltodict python3-pika libevent-devel libxml2-devel libxslt-devel zlib-devel; do + if [ "$(rpm -qa $package 2>/dev/null)" == "" ]; then + missing="${missing:+$missing }$package" + fi + done + if [ -n "$missing" ]; then + echo "$0: missing required RPM packages. Installing via sudo." 1>&2 + sudo yum -y install $missing + fi +fi + +python3 -m venv --system-site-packages virtualenv + +# avoid pip bugs +./virtualenv/bin/pip install --upgrade pip +#pip3 install --upgrade setuptools cffi # address pip issue: https://github.com/pypa/pip/issues/6264 + +# work-around change in pip 1.5 +#./virtualenv/bin/pip install six +#./virtualenv/bin/pip install -I nose +#./virtualenv/bin/pip install setuptools + +./virtualenv/bin/pip install -U -r requirements.txt + +# forbid setuptools from using the network because it'll try to use +# easy_install, and we really wanted pip; next line will fail if pip +# requirements.txt does not match setup.py requirements -- sucky but +# good enough for now +./virtualenv/bin/python setup.py develop diff --git a/src/test/rgw/bucket_notification/kafka-security.sh b/src/test/rgw/bucket_notification/kafka-security.sh new file mode 100755 index 000000000..6c6f3e261 --- /dev/null +++ b/src/test/rgw/bucket_notification/kafka-security.sh @@ -0,0 +1,49 @@ +FQDN=localhost +KEYFILE=server.keystore.jks +TRUSTFILE=server.truststore.jks +CAFILE=y-ca.crt +CAKEYFILE=y-ca.key +REQFILE=$FQDN.req +CERTFILE=$FQDN.crt +MYPW=mypassword +VALIDITY=36500 + +rm -f $KEYFILE +rm -f $TRUSTFILE +rm -f $CAFILE +rm -f $REQFILE +rm -f $CERTFILE + +echo "########## create the request in key store '$KEYFILE'" +keytool -keystore $KEYFILE -alias localhost \ + -dname "CN=$FQDN, OU=Michigan Engineering, O=Red Hat Inc, \ + L=Ann Arbor, ST=Michigan, C=US" \ + -storepass $MYPW -keypass $MYPW \ + -validity $VALIDITY -genkey -keyalg RSA -ext SAN=DNS:"$FQDN" + +echo "########## create the CA '$CAFILE'" +openssl req -new -nodes -x509 -keyout $CAKEYFILE -out $CAFILE \ + -days $VALIDITY -subj \ + '/C=US/ST=Michigan/L=Ann Arbor/O=Red Hat Inc/OU=Michigan Engineering/CN=yuval-1' + +echo "########## store the CA in trust store '$TRUSTFILE'" +keytool -keystore $TRUSTFILE -storepass $MYPW -alias CARoot \ + -noprompt -importcert -file $CAFILE + +echo "########## create a request '$REQFILE' for signing in key store '$KEYFILE'" +keytool -storepass $MYPW -keystore $KEYFILE \ + -alias localhost -certreq -file $REQFILE + +echo "########## sign and create certificate '$CERTFILE'" +openssl x509 -req -CA $CAFILE -CAkey $CAKEYFILE -CAcreateserial \ + -days $VALIDITY \ + -in $REQFILE -out $CERTFILE + +echo "########## store CA '$CAFILE' in key store '$KEYFILE'" +keytool -storepass $MYPW -keystore $KEYFILE -alias CARoot \ + -noprompt -importcert -file $CAFILE + +echo "########## store certificate '$CERTFILE' in key store '$KEYFILE'" +keytool -storepass $MYPW -keystore $KEYFILE -alias localhost \ + -import -file $CERTFILE + diff --git a/src/test/rgw/bucket_notification/requirements.txt b/src/test/rgw/bucket_notification/requirements.txt new file mode 100644 index 000000000..a3cff2bed --- /dev/null +++ b/src/test/rgw/bucket_notification/requirements.txt @@ -0,0 +1,8 @@ +nose >=1.0.0 +boto >=2.6.0 +boto3 >=1.0.0 +configparser >=5.0.0 +kafka-python >=2.0.0 +pika +cloudevents +xmltodict diff --git a/src/test/rgw/bucket_notification/setup.py b/src/test/rgw/bucket_notification/setup.py new file mode 100644 index 000000000..189ab27b4 --- /dev/null +++ b/src/test/rgw/bucket_notification/setup.py @@ -0,0 +1,19 @@ +#!/usr/bin/python +from setuptools import setup, find_packages + +setup( + name='bn_tests', + version='0.0.1', + packages=find_packages(), + + author='Kalpesh Pandya', + author_email='kapandya@redhat.com', + description='Bucket Notification compatibility tests', + license='MIT', + keywords='bn web testing', + + install_requires=[ + 'boto >=2.0b4', + 'boto3 >=1.0.0' + ], + ) diff --git a/src/test/rgw/bucket_notification/test_bn.py b/src/test/rgw/bucket_notification/test_bn.py new file mode 100644 index 000000000..87a2acb76 --- /dev/null +++ b/src/test/rgw/bucket_notification/test_bn.py @@ -0,0 +1,4128 @@ +import logging +import json +import tempfile +import random +import threading +import subprocess +import socket +import time +import os +import string +import boto +from botocore.exceptions import ClientError +from http import server as http_server +from random import randint +import hashlib +from nose.plugins.attrib import attr +import boto3 +import datetime +from cloudevents.http import from_http +from dateutil import parser + +from boto.s3.connection import S3Connection + +from . import( + get_config_host, + get_config_port, + get_access_key, + get_secret_key + ) + +from .api import PSTopicS3, \ + PSNotificationS3, \ + delete_all_objects, \ + put_object_tagging, \ + admin + +from nose import SkipTest +from nose.tools import assert_not_equal, assert_equal, assert_in +import boto.s3.tagging + +# configure logging for the tests module +log = logging.getLogger(__name__) + +TOPIC_SUFFIX = "_topic" +NOTIFICATION_SUFFIX = "_notif" + + +num_buckets = 0 +run_prefix=''.join(random.choice(string.ascii_lowercase) for _ in range(6)) + +def gen_bucket_name(): + global num_buckets + + num_buckets += 1 + return run_prefix + '-' + str(num_buckets) + + +def set_contents_from_string(key, content): + try: + key.set_contents_from_string(content) + except Exception as e: + print('Error: ' + str(e)) + + +class HTTPPostHandler(http_server.BaseHTTPRequestHandler): + """HTTP POST hanler class storing the received events in its http server""" + def do_POST(self): + """implementation of POST handler""" + content_length = int(self.headers['Content-Length']) + body = self.rfile.read(content_length) + if self.server.cloudevents: + event = from_http(self.headers, body) + record = json.loads(body)['Records'][0] + assert_equal(event['specversion'], '1.0') + assert_equal(event['id'], record['responseElements']['x-amz-request-id'] + '.' + record['responseElements']['x-amz-id-2']) + assert_equal(event['source'], 'ceph:s3.' + record['awsRegion'] + '.' + record['s3']['bucket']['name']) + assert_equal(event['type'], 'com.amazonaws.' + record['eventName']) + assert_equal(event['datacontenttype'], 'application/json') + assert_equal(event['subject'], record['s3']['object']['key']) + assert_equal(parser.parse(event['time']), parser.parse(record['eventTime'])) + log.info('HTTP Server (%d) received event: %s', self.server.worker_id, str(body)) + self.server.append(json.loads(body)) + if self.headers.get('Expect') == '100-continue': + self.send_response(100) + else: + self.send_response(200) + if self.server.delay > 0: + time.sleep(self.server.delay) + self.end_headers() + + +class HTTPServerWithEvents(http_server.HTTPServer): + """HTTP server used by the handler to store events""" + def __init__(self, addr, handler, worker_id, delay=0, cloudevents=False): + http_server.HTTPServer.__init__(self, addr, handler, False) + self.worker_id = worker_id + self.events = [] + self.delay = delay + self.cloudevents = cloudevents + + def append(self, event): + self.events.append(event) + +class HTTPServerThread(threading.Thread): + """thread for running the HTTP server. reusing the same socket for all threads""" + def __init__(self, i, sock, addr, delay=0, cloudevents=False): + threading.Thread.__init__(self) + self.i = i + self.daemon = True + self.httpd = HTTPServerWithEvents(addr, HTTPPostHandler, i, delay, cloudevents) + self.httpd.socket = sock + # prevent the HTTP server from re-binding every handler + self.httpd.server_bind = self.server_close = lambda self: None + self.start() + + def run(self): + try: + log.info('HTTP Server (%d) started on: %s', self.i, self.httpd.server_address) + self.httpd.serve_forever() + log.info('HTTP Server (%d) ended', self.i) + except Exception as error: + # could happen if the server r/w to a closing socket during shutdown + log.info('HTTP Server (%d) ended unexpectedly: %s', self.i, str(error)) + + def close(self): + self.httpd.shutdown() + + def get_events(self): + return self.httpd.events + + def reset_events(self): + self.httpd.events = [] + +class StreamingHTTPServer: + """multi-threaded http server class also holding list of events received into the handler + each thread has its own server, and all servers share the same socket""" + def __init__(self, host, port, num_workers=100, delay=0, cloudevents=False): + addr = (host, port) + self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) + self.sock.bind(addr) + self.sock.listen(num_workers) + self.workers = [HTTPServerThread(i, self.sock, addr, delay, cloudevents) for i in range(num_workers)] + + def verify_s3_events(self, keys, exact_match=False, deletions=False, expected_sizes={}): + """verify stored s3 records agains a list of keys""" + events = [] + for worker in self.workers: + events += worker.get_events() + worker.reset_events() + verify_s3_records_by_elements(events, keys, exact_match=exact_match, deletions=deletions, expected_sizes=expected_sizes) + + def verify_events(self, keys, exact_match=False, deletions=False): + """verify stored events agains a list of keys""" + events = [] + for worker in self.workers: + events += worker.get_events() + worker.reset_events() + verify_events_by_elements(events, keys, exact_match=exact_match, deletions=deletions) + + def get_and_reset_events(self): + events = [] + for worker in self.workers: + events += worker.get_events() + worker.reset_events() + return events + + def close(self): + """close all workers in the http server and wait for it to finish""" + # make sure that the shared socket is closed + # this is needed in case that one of the threads is blocked on the socket + self.sock.shutdown(socket.SHUT_RDWR) + self.sock.close() + # wait for server threads to finish + for worker in self.workers: + worker.close() + worker.join() + +# AMQP endpoint functions + +class AMQPReceiver(object): + """class for receiving and storing messages on a topic from the AMQP broker""" + def __init__(self, exchange, topic, external_endpoint_address=None, ca_location=None): + import pika + import ssl + + if ca_location: + ssl_context = ssl.create_default_context() + ssl_context.load_verify_locations(cafile=ca_location) + ssl_options = pika.SSLOptions(ssl_context) + rabbitmq_port = 5671 + else: + rabbitmq_port = 5672 + ssl_options = None + + if external_endpoint_address: + if ssl_options: + # this is currently not working due to: https://github.com/pika/pika/issues/1192 + params = pika.URLParameters(external_endpoint_address, ssl_options=ssl_options) + else: + params = pika.URLParameters(external_endpoint_address) + else: + hostname = get_ip() + params = pika.ConnectionParameters(host=hostname, port=rabbitmq_port, ssl_options=ssl_options) + + remaining_retries = 10 + while remaining_retries > 0: + try: + connection = pika.BlockingConnection(params) + break + except Exception as error: + remaining_retries -= 1 + print('failed to connect to rabbitmq (remaining retries ' + + str(remaining_retries) + '): ' + str(error)) + time.sleep(1) + + if remaining_retries == 0: + raise Exception('failed to connect to rabbitmq - no retries left') + + self.channel = connection.channel() + self.channel.exchange_declare(exchange=exchange, exchange_type='topic', durable=True) + result = self.channel.queue_declare('', exclusive=True) + queue_name = result.method.queue + self.channel.queue_bind(exchange=exchange, queue=queue_name, routing_key=topic) + self.channel.basic_consume(queue=queue_name, + on_message_callback=self.on_message, + auto_ack=True) + self.events = [] + self.topic = topic + + def on_message(self, ch, method, properties, body): + """callback invoked when a new message arrive on the topic""" + log.info('AMQP received event for topic %s:\n %s', self.topic, body) + self.events.append(json.loads(body)) + + # TODO create a base class for the AMQP and HTTP cases + def verify_s3_events(self, keys, exact_match=False, deletions=False, expected_sizes={}): + """verify stored s3 records agains a list of keys""" + verify_s3_records_by_elements(self.events, keys, exact_match=exact_match, deletions=deletions, expected_sizes=expected_sizes) + self.events = [] + + def verify_events(self, keys, exact_match=False, deletions=False): + """verify stored events agains a list of keys""" + verify_events_by_elements(self.events, keys, exact_match=exact_match, deletions=deletions) + self.events = [] + + def get_and_reset_events(self): + tmp = self.events + self.events = [] + return tmp + + +def amqp_receiver_thread_runner(receiver): + """main thread function for the amqp receiver""" + try: + log.info('AMQP receiver started') + receiver.channel.start_consuming() + log.info('AMQP receiver ended') + except Exception as error: + log.info('AMQP receiver ended unexpectedly: %s', str(error)) + + +def create_amqp_receiver_thread(exchange, topic, external_endpoint_address=None, ca_location=None): + """create amqp receiver and thread""" + receiver = AMQPReceiver(exchange, topic, external_endpoint_address, ca_location) + task = threading.Thread(target=amqp_receiver_thread_runner, args=(receiver,)) + task.daemon = True + return task, receiver + +def stop_amqp_receiver(receiver, task): + """stop the receiver thread and wait for it to finis""" + try: + receiver.channel.stop_consuming() + log.info('stopping AMQP receiver') + except Exception as error: + log.info('failed to gracefuly stop AMQP receiver: %s', str(error)) + task.join(5) + + +def init_rabbitmq(): + """ start a rabbitmq broker """ + hostname = get_ip() + try: + # first try to stop any existing process + subprocess.call(['sudo', 'rabbitmqctl', 'stop']) + time.sleep(5) + proc = subprocess.Popen(['sudo', '--preserve-env=RABBITMQ_CONFIG_FILE', 'rabbitmq-server']) + except Exception as error: + log.info('failed to execute rabbitmq-server: %s', str(error)) + print('failed to execute rabbitmq-server: %s' % str(error)) + return None + # TODO add rabbitmq checkpoint instead of sleep + time.sleep(5) + return proc + + +def clean_rabbitmq(proc): + """ stop the rabbitmq broker """ + try: + subprocess.call(['sudo', 'rabbitmqctl', 'stop']) + time.sleep(5) + proc.terminate() + except: + log.info('rabbitmq server already terminated') + + +def verify_events_by_elements(events, keys, exact_match=False, deletions=False): + """ verify there is at least one event per element """ + err = '' + for key in keys: + key_found = False + if type(events) is list: + for event_list in events: + if key_found: + break + for event in event_list['events']: + if event['info']['bucket']['name'] == key.bucket.name and \ + event['info']['key']['name'] == key.name: + if deletions and event['event'] == 'OBJECT_DELETE': + key_found = True + break + elif not deletions and event['event'] == 'OBJECT_CREATE': + key_found = True + break + else: + for event in events['events']: + if event['info']['bucket']['name'] == key.bucket.name and \ + event['info']['key']['name'] == key.name: + if deletions and event['event'] == 'OBJECT_DELETE': + key_found = True + break + elif not deletions and event['event'] == 'OBJECT_CREATE': + key_found = True + break + + if not key_found: + err = 'no ' + ('deletion' if deletions else 'creation') + ' event found for key: ' + str(key) + log.error(events) + assert False, err + + if not len(events) == len(keys): + err = 'superfluous events are found' + log.debug(err) + if exact_match: + log.error(events) + assert False, err + +META_PREFIX = 'x-amz-meta-' + +def verify_s3_records_by_elements(records, keys, exact_match=False, deletions=False, expected_sizes={}, etags=[]): + """ verify there is at least one record per element """ + err = '' + for key in keys: + key_found = False + object_size = 0 + if type(records) is list: + for record_list in records: + if key_found: + break + for record in record_list['Records']: + assert_in('eTag', record['s3']['object']) + if record['s3']['bucket']['name'] == key.bucket.name and \ + record['s3']['object']['key'] == key.name: + # Assertion Error needs to be fixed + #assert_equal(key.etag[1:-1], record['s3']['object']['eTag']) + if etags: + assert_in(key.etag[1:-1], etags) + if len(record['s3']['object']['metadata']) > 0: + for meta in record['s3']['object']['metadata']: + assert(meta['key'].startswith(META_PREFIX)) + if deletions and record['eventName'].startswith('ObjectRemoved'): + key_found = True + object_size = record['s3']['object']['size'] + break + elif not deletions and record['eventName'].startswith('ObjectCreated'): + key_found = True + object_size = record['s3']['object']['size'] + break + else: + for record in records['Records']: + assert_in('eTag', record['s3']['object']) + if record['s3']['bucket']['name'] == key.bucket.name and \ + record['s3']['object']['key'] == key.name: + assert_equal(key.etag, record['s3']['object']['eTag']) + if etags: + assert_in(key.etag[1:-1], etags) + if len(record['s3']['object']['metadata']) > 0: + for meta in record['s3']['object']['metadata']: + assert(meta['key'].startswith(META_PREFIX)) + if deletions and record['eventName'].startswith('ObjectRemoved'): + key_found = True + object_size = record['s3']['object']['size'] + break + elif not deletions and record['eventName'].startswith('ObjectCreated'): + key_found = True + object_size = record['s3']['object']['size'] + break + + if not key_found: + err = 'no ' + ('deletion' if deletions else 'creation') + ' event found for key: ' + str(key) + assert False, err + elif expected_sizes: + assert_equal(object_size, expected_sizes.get(key.name)) + + if not len(records) == len(keys): + err = 'superfluous records are found' + log.warning(err) + if exact_match: + for record_list in records: + for record in record_list['Records']: + log.error(str(record['s3']['bucket']['name']) + ',' + str(record['s3']['object']['key'])) + assert False, err + + +# Kafka endpoint functions + +kafka_server = 'localhost' + +class KafkaReceiver(object): + """class for receiving and storing messages on a topic from the kafka broker""" + def __init__(self, topic, security_type): + from kafka import KafkaConsumer + remaining_retries = 10 + port = 9092 + if security_type != 'PLAINTEXT': + security_type = 'SSL' + port = 9093 + while remaining_retries > 0: + try: + self.consumer = KafkaConsumer(topic, + bootstrap_servers = kafka_server+':'+str(port), + security_protocol=security_type, + consumer_timeout_ms=16000) + print('Kafka consumer created on topic: '+topic) + break + except Exception as error: + remaining_retries -= 1 + print('failed to connect to kafka (remaining retries ' + + str(remaining_retries) + '): ' + str(error)) + time.sleep(1) + + if remaining_retries == 0: + raise Exception('failed to connect to kafka - no retries left') + + self.events = [] + self.topic = topic + self.stop = False + + def verify_s3_events(self, keys, exact_match=False, deletions=False, etags=[]): + """verify stored s3 records agains a list of keys""" + verify_s3_records_by_elements(self.events, keys, exact_match=exact_match, deletions=deletions, etags=etags) + self.events = [] + +def kafka_receiver_thread_runner(receiver): + """main thread function for the kafka receiver""" + try: + log.info('Kafka receiver started') + print('Kafka receiver started') + while not receiver.stop: + for msg in receiver.consumer: + receiver.events.append(json.loads(msg.value)) + time.sleep(0.1) + log.info('Kafka receiver ended') + print('Kafka receiver ended') + except Exception as error: + log.info('Kafka receiver ended unexpectedly: %s', str(error)) + print('Kafka receiver ended unexpectedly: ' + str(error)) + + +def create_kafka_receiver_thread(topic, security_type='PLAINTEXT'): + """create kafka receiver and thread""" + receiver = KafkaReceiver(topic, security_type) + task = threading.Thread(target=kafka_receiver_thread_runner, args=(receiver,)) + task.daemon = True + return task, receiver + +def stop_kafka_receiver(receiver, task): + """stop the receiver thread and wait for it to finis""" + receiver.stop = True + task.join(1) + try: + receiver.consumer.unsubscribe() + receiver.consumer.close() + except Exception as error: + log.info('failed to gracefuly stop Kafka receiver: %s', str(error)) + + +def get_ip(): + return 'localhost' + + +def get_ip_http(): + s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + try: + # address should not be reachable + s.connect(('10.255.255.255', 1)) + ip = s.getsockname()[0] + finally: + s.close() + return ip + + +def connection(): + hostname = get_config_host() + port_no = get_config_port() + vstart_access_key = get_access_key() + vstart_secret_key = get_secret_key() + + conn = S3Connection(aws_access_key_id=vstart_access_key, + aws_secret_access_key=vstart_secret_key, + is_secure=False, port=port_no, host=hostname, + calling_format='boto.s3.connection.OrdinaryCallingFormat') + + return conn + + +def connection2(): + hostname = get_config_host() + port_no = 8001 + vstart_access_key = get_access_key() + vstart_secret_key = get_secret_key() + + conn = S3Connection(aws_access_key_id=vstart_access_key, + aws_secret_access_key=vstart_secret_key, + is_secure=False, port=port_no, host=hostname, + calling_format='boto.s3.connection.OrdinaryCallingFormat') + + return conn + + +def another_user(tenant=None): + access_key = str(time.time()) + secret_key = str(time.time()) + uid = 'superman' + str(time.time()) + if tenant: + _, result = admin(['user', 'create', '--uid', uid, '--tenant', tenant, '--access-key', access_key, '--secret-key', secret_key, '--display-name', '"Super Man"']) + else: + _, result = admin(['user', 'create', '--uid', uid, '--access-key', access_key, '--secret-key', secret_key, '--display-name', '"Super Man"']) + + assert_equal(result, 0) + conn = S3Connection(aws_access_key_id=access_key, + aws_secret_access_key=secret_key, + is_secure=False, port=get_config_port(), host=get_config_host(), + calling_format='boto.s3.connection.OrdinaryCallingFormat') + return conn + +############## +# bucket notifications tests +############## + + +@attr('basic_test') +def test_ps_s3_topic_on_master(): + """ test s3 topics set/get/delete on master """ + tenant = 'kaboom' + conn = another_user(tenant) + zonegroup = 'default' + bucket_name = gen_bucket_name() + topic_name = bucket_name + TOPIC_SUFFIX + + # create s3 topics + endpoint_address = 'amqp://127.0.0.1:7001/vhost_1' + endpoint_args = 'push-endpoint='+endpoint_address+'&amqp-exchange=amqp.direct&amqp-ack-level=none' + topic_conf1 = PSTopicS3(conn, topic_name+'_1', zonegroup, endpoint_args=endpoint_args) + # clean all topics + try: + result = topic_conf1.get_list()[0]['ListTopicsResponse']['ListTopicsResult']['Topics'] + topics = [] + if result is not None: + topics = result['member'] + for topic in topics: + topic_conf1.del_config(topic_arn=topic['TopicArn']) + except Exception as err: + print('failed to do topic cleanup: ' + str(err)) + + topic_arn = topic_conf1.set_config() + assert_equal(topic_arn, + 'arn:aws:sns:' + zonegroup + ':' + tenant + ':' + topic_name + '_1') + + endpoint_address = 'http://127.0.0.1:9001' + endpoint_args = 'push-endpoint='+endpoint_address + topic_conf2 = PSTopicS3(conn, topic_name+'_2', zonegroup, endpoint_args=endpoint_args) + topic_arn = topic_conf2.set_config() + assert_equal(topic_arn, + 'arn:aws:sns:' + zonegroup + ':' + tenant + ':' + topic_name + '_2') + endpoint_address = 'http://127.0.0.1:9002' + endpoint_args = 'push-endpoint='+endpoint_address + topic_conf3 = PSTopicS3(conn, topic_name+'_3', zonegroup, endpoint_args=endpoint_args) + topic_arn = topic_conf3.set_config() + assert_equal(topic_arn, + 'arn:aws:sns:' + zonegroup + ':' + tenant + ':' + topic_name + '_3') + + # get topic 3 + result, status = topic_conf3.get_config() + assert_equal(status, 200) + assert_equal(topic_arn, result['GetTopicResponse']['GetTopicResult']['Topic']['TopicArn']) + assert_equal(endpoint_address, result['GetTopicResponse']['GetTopicResult']['Topic']['EndPoint']['EndpointAddress']) + + # Note that endpoint args may be ordered differently in the result + # delete topic 1 + result = topic_conf1.del_config() + assert_equal(status, 200) + + # try to get a deleted topic + _, status = topic_conf1.get_config() + assert_equal(status, 404) + + # get the remaining 2 topics + result, status = topic_conf1.get_list() + assert_equal(status, 200) + assert_equal(len(result['ListTopicsResponse']['ListTopicsResult']['Topics']['member']), 2) + + # delete topics + result = topic_conf2.del_config() + assert_equal(status, 200) + result = topic_conf3.del_config() + assert_equal(status, 200) + + # get topic list, make sure it is empty + result, status = topic_conf1.get_list() + assert_equal(result['ListTopicsResponse']['ListTopicsResult']['Topics'], None) + + +@attr('basic_test') +def test_ps_s3_topic_admin_on_master(): + """ test s3 topics set/get/delete on master """ + tenant = 'kaboom' + conn = another_user(tenant) + zonegroup = 'default' + bucket_name = gen_bucket_name() + topic_name = bucket_name + TOPIC_SUFFIX + + # create s3 topics + endpoint_address = 'amqp://127.0.0.1:7001/vhost_1' + endpoint_args = 'push-endpoint='+endpoint_address+'&amqp-exchange=amqp.direct&amqp-ack-level=none' + topic_conf1 = PSTopicS3(conn, topic_name+'_1', zonegroup, endpoint_args=endpoint_args) + # clean all topics + try: + result = topic_conf1.get_list()[0]['ListTopicsResponse']['ListTopicsResult']['Topics'] + topics = [] + if result is not None: + topics = result['member'] + for topic in topics: + topic_conf1.del_config(topic_arn=topic['TopicArn']) + except Exception as err: + print('failed to do topic cleanup: ' + str(err)) + + topic_arn1 = topic_conf1.set_config() + assert_equal(topic_arn1, + 'arn:aws:sns:' + zonegroup + ':' + tenant + ':' + topic_name + '_1') + + endpoint_address = 'http://127.0.0.1:9001' + endpoint_args = 'push-endpoint='+endpoint_address + topic_conf2 = PSTopicS3(conn, topic_name+'_2', zonegroup, endpoint_args=endpoint_args) + topic_arn2 = topic_conf2.set_config() + assert_equal(topic_arn2, + 'arn:aws:sns:' + zonegroup + ':' + tenant + ':' + topic_name + '_2') + endpoint_address = 'http://127.0.0.1:9002' + endpoint_args = 'push-endpoint='+endpoint_address + topic_conf3 = PSTopicS3(conn, topic_name+'_3', zonegroup, endpoint_args=endpoint_args) + topic_arn3 = topic_conf3.set_config() + assert_equal(topic_arn3, + 'arn:aws:sns:' + zonegroup + ':' + tenant + ':' + topic_name + '_3') + + # get topic 3 via commandline + result = admin(['topic', 'get', '--topic', topic_name+'_3', '--tenant', tenant]) + parsed_result = json.loads(result[0]) + assert_equal(parsed_result['arn'], topic_arn3) + + # delete topic 3 + _, result = admin(['topic', 'rm', '--topic', topic_name+'_3', '--tenant', tenant]) + assert_equal(result, 0) + + # try to get a deleted topic + _, result = admin(['topic', 'get', '--topic', topic_name+'_3', '--tenant', tenant]) + print('"topic not found" error is expected') + assert_equal(result, 2) + + # get the remaining 2 topics + result = admin(['topic', 'list', '--tenant', tenant]) + parsed_result = json.loads(result[0]) + assert_equal(len(parsed_result['topics']), 2) + + # delete topics + _, result = admin(['topic', 'rm', '--topic', topic_name+'_1', '--tenant', tenant]) + assert_equal(result, 0) + _, result = admin(['topic', 'rm', '--topic', topic_name+'_2', '--tenant', tenant]) + assert_equal(result, 0) + + # get topic list, make sure it is empty + result = admin(['topic', 'list', '--tenant', tenant]) + parsed_result = json.loads(result[0]) + assert_equal(len(parsed_result['topics']), 0) + + +@attr('basic_test') +def test_ps_s3_notification_configuration_admin_on_master(): + """ test s3 notification list/get/delete on master """ + conn = connection() + zonegroup = 'default' + bucket_name = gen_bucket_name() + bucket = conn.create_bucket(bucket_name) + topic_name = bucket_name + TOPIC_SUFFIX + + # create s3 topics + endpoint_address = 'amqp://127.0.0.1:7001/vhost_1' + endpoint_args = 'push-endpoint='+endpoint_address+'&amqp-exchange=amqp.direct&amqp-ack-level=none' + topic_conf = PSTopicS3(conn, topic_name+'_1', zonegroup, endpoint_args=endpoint_args) + # clean all topics + try: + result = topic_conf.get_list()[0]['ListTopicsResponse']['ListTopicsResult']['Topics'] + topics = [] + if result is not None: + topics = result['member'] + for topic in topics: + topic_conf.del_config(topic_arn=topic['TopicArn']) + except Exception as err: + print('failed to do topic cleanup: ' + str(err)) + + topic_arn = topic_conf.set_config() + assert_equal(topic_arn, + 'arn:aws:sns:' + zonegroup + '::' + topic_name + '_1') + # create s3 notification + notification_name = bucket_name + NOTIFICATION_SUFFIX + topic_conf_list = [{'Id': notification_name+'_1', + 'TopicArn': topic_arn, + 'Events': ['s3:ObjectCreated:*'] + }, + {'Id': notification_name+'_2', + 'TopicArn': topic_arn, + 'Events': ['s3:ObjectRemoved:*'] + }, + {'Id': notification_name+'_3', + 'TopicArn': topic_arn, + 'Events': [] + }] + s3_notification_conf = PSNotificationS3(conn, bucket_name, topic_conf_list) + _, status = s3_notification_conf.set_config() + assert_equal(status/100, 2) + + # list notification + result = admin(['notification', 'list', '--bucket', bucket_name]) + parsed_result = json.loads(result[0]) + assert_equal(len(parsed_result['notifications']), 3) + assert_equal(result[1], 0) + + # get notification 1 + result = admin(['notification', 'get', '--bucket', bucket_name, '--notification-id', notification_name+'_1']) + parsed_result = json.loads(result[0]) + assert_equal(parsed_result['Id'], notification_name+'_1') + assert_equal(result[1], 0) + + # remove notification 3 + _, result = admin(['notification', 'rm', '--bucket', bucket_name, '--notification-id', notification_name+'_3']) + assert_equal(result, 0) + + # list notification + result = admin(['notification', 'list', '--bucket', bucket_name]) + parsed_result = json.loads(result[0]) + assert_equal(len(parsed_result['notifications']), 2) + assert_equal(result[1], 0) + + # delete notifications + _, result = admin(['notification', 'rm', '--bucket', bucket_name]) + assert_equal(result, 0) + + # list notification, make sure it is empty + result = admin(['notification', 'list', '--bucket', bucket_name]) + parsed_result = json.loads(result[0]) + assert_equal(len(parsed_result['notifications']), 0) + assert_equal(result[1], 0) + + +@attr('modification_required') +def test_ps_s3_topic_with_secret_on_master(): + """ test s3 topics with secret set/get/delete on master """ + return SkipTest('secure connection is needed to test topic with secrets') + + conn = connection1() + if conn.secure_conn is None: + return SkipTest('secure connection is needed to test topic with secrets') + + zonegroup = 'default' + bucket_name = gen_bucket_name() + topic_name = bucket_name + TOPIC_SUFFIX + + # clean all topics + delete_all_s3_topics(conn, zonegroup) + + # create s3 topics + endpoint_address = 'amqp://user:password@127.0.0.1:7001' + endpoint_args = 'push-endpoint='+endpoint_address+'&amqp-exchange=amqp.direct&amqp-ack-level=none' + bad_topic_conf = PSTopicS3(conn, topic_name, zonegroup, endpoint_args=endpoint_args) + try: + result = bad_topic_conf.set_config() + except Exception as err: + print('Error is expected: ' + str(err)) + else: + assert False, 'user password configuration set allowed only over HTTPS' + topic_conf = PSTopicS3(conn.secure_conn, topic_name, zonegroup, endpoint_args=endpoint_args) + topic_arn = topic_conf.set_config() + + assert_equal(topic_arn, + 'arn:aws:sns:' + zonegroup + ':' + get_tenant() + ':' + topic_name) + + _, status = bad_topic_conf.get_config() + assert_equal(status/100, 4) + + # get topic + result, status = topic_conf.get_config() + assert_equal(status, 200) + assert_equal(topic_arn, result['GetTopicResponse']['GetTopicResult']['Topic']['TopicArn']) + assert_equal(endpoint_address, result['GetTopicResponse']['GetTopicResult']['Topic']['EndPoint']['EndpointAddress']) + + _, status = bad_topic_conf.get_config() + assert_equal(status/100, 4) + + _, status = topic_conf.get_list() + assert_equal(status/100, 2) + + # delete topics + result = topic_conf.del_config() + + +@attr('basic_test') +def test_ps_s3_notification_on_master(): + """ test s3 notification set/get/delete on master """ + conn = connection() + zonegroup = 'default' + bucket_name = gen_bucket_name() + # create bucket + bucket = conn.create_bucket(bucket_name) + topic_name = bucket_name + TOPIC_SUFFIX + # create s3 topic + endpoint_address = 'amqp://127.0.0.1:7001' + endpoint_args = 'push-endpoint='+endpoint_address+'&amqp-exchange=amqp.direct&amqp-ack-level=none' + topic_conf = PSTopicS3(conn, topic_name, zonegroup, endpoint_args=endpoint_args) + topic_arn = topic_conf.set_config() + # create s3 notification + notification_name = bucket_name + NOTIFICATION_SUFFIX + topic_conf_list = [{'Id': notification_name+'_1', + 'TopicArn': topic_arn, + 'Events': ['s3:ObjectCreated:*'] + }, + {'Id': notification_name+'_2', + 'TopicArn': topic_arn, + 'Events': ['s3:ObjectRemoved:*'] + }, + {'Id': notification_name+'_3', + 'TopicArn': topic_arn, + 'Events': [] + }] + s3_notification_conf = PSNotificationS3(conn, bucket_name, topic_conf_list) + _, status = s3_notification_conf.set_config() + assert_equal(status/100, 2) + + # get notifications on a bucket + response, status = s3_notification_conf.get_config(notification=notification_name+'_1') + assert_equal(status/100, 2) + assert_equal(response['NotificationConfiguration']['TopicConfiguration']['Topic'], topic_arn) + + # delete specific notifications + _, status = s3_notification_conf.del_config(notification=notification_name+'_1') + assert_equal(status/100, 2) + + # get the remaining 2 notifications on a bucket + response, status = s3_notification_conf.get_config() + assert_equal(status/100, 2) + assert_equal(len(response['TopicConfigurations']), 2) + assert_equal(response['TopicConfigurations'][0]['TopicArn'], topic_arn) + assert_equal(response['TopicConfigurations'][1]['TopicArn'], topic_arn) + + # delete remaining notifications + _, status = s3_notification_conf.del_config() + assert_equal(status/100, 2) + + # make sure that the notifications are now deleted + _, status = s3_notification_conf.get_config() + + # cleanup + topic_conf.del_config() + # delete the bucket + conn.delete_bucket(bucket_name) + + +@attr('basic_test') +def test_ps_s3_notification_on_master_empty_config(): + """ test s3 notification set/get/delete on master with empty config """ + hostname = get_ip() + + conn = connection() + + zonegroup = 'default' + + # create bucket + bucket_name = gen_bucket_name() + bucket = conn.create_bucket(bucket_name) + topic_name = bucket_name + TOPIC_SUFFIX + + # create s3 topic + endpoint_address = 'amqp://127.0.0.1:7001' + endpoint_args = 'push-endpoint='+endpoint_address+'&amqp-exchange=amqp.direct&amqp-ack-level=none' + topic_conf = PSTopicS3(conn, topic_name, zonegroup, endpoint_args=endpoint_args) + topic_arn = topic_conf.set_config() + + # create s3 notification + notification_name = bucket_name + NOTIFICATION_SUFFIX + topic_conf_list = [{'Id': notification_name+'_1', + 'TopicArn': topic_arn, + 'Events': [] + }] + s3_notification_conf = PSNotificationS3(conn, bucket_name, topic_conf_list) + _, status = s3_notification_conf.set_config() + assert_equal(status/100, 2) + + # get notifications on a bucket + response, status = s3_notification_conf.get_config(notification=notification_name+'_1') + assert_equal(status/100, 2) + assert_equal(response['NotificationConfiguration']['TopicConfiguration']['Topic'], topic_arn) + + # create s3 notification again with empty configuration to check if it deletes or not + topic_conf_list = [] + + s3_notification_conf = PSNotificationS3(conn, bucket_name, topic_conf_list) + _, status = s3_notification_conf.set_config() + assert_equal(status/100, 2) + + # make sure that the notification is now deleted + response, status = s3_notification_conf.get_config() + try: + check = response['NotificationConfiguration'] + except KeyError as e: + assert_equal(status/100, 2) + else: + assert False + + # cleanup + topic_conf.del_config() + # delete the bucket + conn.delete_bucket(bucket_name) + + +@attr('amqp_test') +def test_ps_s3_notification_filter_on_master(): + """ test s3 notification filter on master """ + + hostname = get_ip() + + conn = connection() + ps_zone = conn + + zonegroup = 'default' + + # create bucket + bucket_name = gen_bucket_name() + bucket = conn.create_bucket(bucket_name) + topic_name = bucket_name + TOPIC_SUFFIX + + # start amqp receivers + exchange = 'ex1' + task, receiver = create_amqp_receiver_thread(exchange, topic_name) + task.start() + + # create s3 topic + endpoint_address = 'amqp://' + hostname + endpoint_args = 'push-endpoint='+endpoint_address+'&amqp-exchange=' + exchange +'&amqp-ack-level=broker' + + topic_conf = PSTopicS3(conn, topic_name, zonegroup, endpoint_args=endpoint_args) + topic_arn = topic_conf.set_config() + + # create s3 notification + notification_name = bucket_name + NOTIFICATION_SUFFIX + topic_conf_list = [{'Id': notification_name+'_1', + 'TopicArn': topic_arn, + 'Events': ['s3:ObjectCreated:*'], + 'Filter': { + 'Key': { + 'FilterRules': [{'Name': 'prefix', 'Value': 'hello'}] + } + } + }, + {'Id': notification_name+'_2', + 'TopicArn': topic_arn, + 'Events': ['s3:ObjectCreated:*'], + 'Filter': { + 'Key': { + 'FilterRules': [{'Name': 'prefix', 'Value': 'world'}, + {'Name': 'suffix', 'Value': 'log'}] + } + } + }, + {'Id': notification_name+'_3', + 'TopicArn': topic_arn, + 'Events': [], + 'Filter': { + 'Key': { + 'FilterRules': [{'Name': 'regex', 'Value': '([a-z]+)\\.txt'}] + } + } + }] + + s3_notification_conf = PSNotificationS3(conn, bucket_name, topic_conf_list) + result, status = s3_notification_conf.set_config() + assert_equal(status/100, 2) + + topic_conf_list = [{'Id': notification_name+'_4', + 'TopicArn': topic_arn, + 'Events': ['s3:ObjectCreated:*', 's3:ObjectRemoved:*'], + 'Filter': { + 'Metadata': { + 'FilterRules': [{'Name': 'x-amz-meta-foo', 'Value': 'bar'}, + {'Name': 'x-amz-meta-hello', 'Value': 'world'}] + }, + 'Key': { + 'FilterRules': [{'Name': 'regex', 'Value': '([a-z]+)'}] + } + } + }] + + try: + s3_notification_conf4 = PSNotificationS3(conn, bucket_name, topic_conf_list) + _, status = s3_notification_conf4.set_config() + assert_equal(status/100, 2) + skip_notif4 = False + except Exception as error: + print('note: metadata filter is not supported by boto3 - skipping test') + skip_notif4 = True + + + # get all notifications + result, status = s3_notification_conf.get_config() + assert_equal(status/100, 2) + for conf in result['TopicConfigurations']: + filter_name = conf['Filter']['Key']['FilterRules'][0]['Name'] + assert filter_name == 'prefix' or filter_name == 'suffix' or filter_name == 'regex', filter_name + + if not skip_notif4: + result, status = s3_notification_conf4.get_config(notification=notification_name+'_4') + assert_equal(status/100, 2) + filter_name = result['NotificationConfiguration']['TopicConfiguration']['Filter']['S3Metadata']['FilterRule'][0]['Name'] + assert filter_name == 'x-amz-meta-foo' or filter_name == 'x-amz-meta-hello' + + expected_in1 = ['hello.kaboom', 'hello.txt', 'hello123.txt', 'hello'] + expected_in2 = ['world1.log', 'world2log', 'world3.log'] + expected_in3 = ['hello.txt', 'hell.txt', 'worldlog.txt'] + expected_in4 = ['foo', 'bar', 'hello', 'world'] + filtered = ['hell.kaboom', 'world.og', 'world.logg', 'he123ll.txt', 'wo', 'log', 'h', 'txt', 'world.log.txt'] + filtered_with_attr = ['nofoo', 'nobar', 'nohello', 'noworld'] + # create objects in bucket + for key_name in expected_in1: + key = bucket.new_key(key_name) + key.set_contents_from_string('bar') + for key_name in expected_in2: + key = bucket.new_key(key_name) + key.set_contents_from_string('bar') + for key_name in expected_in3: + key = bucket.new_key(key_name) + key.set_contents_from_string('bar') + if not skip_notif4: + for key_name in expected_in4: + key = bucket.new_key(key_name) + key.set_metadata('foo', 'bar') + key.set_metadata('hello', 'world') + key.set_metadata('goodbye', 'cruel world') + key.set_contents_from_string('bar') + for key_name in filtered: + key = bucket.new_key(key_name) + key.set_contents_from_string('bar') + for key_name in filtered_with_attr: + key.set_metadata('foo', 'nobar') + key.set_metadata('hello', 'noworld') + key.set_metadata('goodbye', 'cruel world') + key = bucket.new_key(key_name) + key.set_contents_from_string('bar') + + print('wait for 5sec for the messages...') + time.sleep(5) + + found_in1 = [] + found_in2 = [] + found_in3 = [] + found_in4 = [] + + for event in receiver.get_and_reset_events(): + notif_id = event['Records'][0]['s3']['configurationId'] + key_name = event['Records'][0]['s3']['object']['key'] + awsRegion = event['Records'][0]['awsRegion'] + assert_equal(awsRegion, zonegroup) + bucket_arn = event['Records'][0]['s3']['bucket']['arn'] + assert_equal(bucket_arn, "arn:aws:s3:"+awsRegion+"::"+bucket_name) + if notif_id == notification_name+'_1': + found_in1.append(key_name) + elif notif_id == notification_name+'_2': + found_in2.append(key_name) + elif notif_id == notification_name+'_3': + found_in3.append(key_name) + elif not skip_notif4 and notif_id == notification_name+'_4': + found_in4.append(key_name) + else: + assert False, 'invalid notification: ' + notif_id + + assert_equal(set(found_in1), set(expected_in1)) + assert_equal(set(found_in2), set(expected_in2)) + assert_equal(set(found_in3), set(expected_in3)) + if not skip_notif4: + assert_equal(set(found_in4), set(expected_in4)) + + # cleanup + s3_notification_conf.del_config() + if not skip_notif4: + s3_notification_conf4.del_config() + topic_conf.del_config() + # delete the bucket + for key in bucket.list(): + key.delete() + conn.delete_bucket(bucket_name) + stop_amqp_receiver(receiver, task) + + +@attr('basic_test') +def test_ps_s3_notification_errors_on_master(): + """ test s3 notification set/get/delete on master """ + conn = connection() + zonegroup = 'default' + bucket_name = gen_bucket_name() + # create bucket + bucket = conn.create_bucket(bucket_name) + topic_name = bucket_name + TOPIC_SUFFIX + # create s3 topic + endpoint_address = 'amqp://127.0.0.1:7001' + endpoint_args = 'push-endpoint='+endpoint_address+'&amqp-exchange=amqp.direct&amqp-ack-level=none' + topic_conf = PSTopicS3(conn, topic_name, zonegroup, endpoint_args=endpoint_args) + topic_arn = topic_conf.set_config() + + # create s3 notification with invalid event name + notification_name = bucket_name + NOTIFICATION_SUFFIX + topic_conf_list = [{'Id': notification_name, + 'TopicArn': topic_arn, + 'Events': ['s3:ObjectCreated:Kaboom'] + }] + s3_notification_conf = PSNotificationS3(conn, bucket_name, topic_conf_list) + try: + result, status = s3_notification_conf.set_config() + except Exception as error: + print(str(error) + ' - is expected') + else: + assert False, 'invalid event name is expected to fail' + + # create s3 notification with missing name + topic_conf_list = [{'Id': '', + 'TopicArn': topic_arn, + 'Events': ['s3:ObjectCreated:Put'] + }] + s3_notification_conf = PSNotificationS3(conn, bucket_name, topic_conf_list) + try: + _, _ = s3_notification_conf.set_config() + except Exception as error: + print(str(error) + ' - is expected') + else: + assert False, 'missing notification name is expected to fail' + + # create s3 notification with invalid topic ARN + invalid_topic_arn = 'kaboom' + topic_conf_list = [{'Id': notification_name, + 'TopicArn': invalid_topic_arn, + 'Events': ['s3:ObjectCreated:Put'] + }] + s3_notification_conf = PSNotificationS3(conn, bucket_name, topic_conf_list) + try: + _, _ = s3_notification_conf.set_config() + except Exception as error: + print(str(error) + ' - is expected') + else: + assert False, 'invalid ARN is expected to fail' + + # create s3 notification with unknown topic ARN + invalid_topic_arn = 'arn:aws:sns:a::kaboom' + topic_conf_list = [{'Id': notification_name, + 'TopicArn': invalid_topic_arn , + 'Events': ['s3:ObjectCreated:Put'] + }] + s3_notification_conf = PSNotificationS3(conn, bucket_name, topic_conf_list) + try: + _, _ = s3_notification_conf.set_config() + except Exception as error: + print(str(error) + ' - is expected') + else: + assert False, 'unknown topic is expected to fail' + + # create s3 notification with wrong bucket + topic_conf_list = [{'Id': notification_name, + 'TopicArn': topic_arn, + 'Events': ['s3:ObjectCreated:Put'] + }] + s3_notification_conf = PSNotificationS3(conn, 'kaboom', topic_conf_list) + try: + _, _ = s3_notification_conf.set_config() + except Exception as error: + print(str(error) + ' - is expected') + else: + assert False, 'unknown bucket is expected to fail' + + topic_conf.del_config() + + status = topic_conf.del_config() + # deleting an unknown notification is not considered an error + assert_equal(status, 200) + + _, status = topic_conf.get_config() + assert_equal(status, 404) + + # cleanup + # delete the bucket + conn.delete_bucket(bucket_name) + +@attr('basic_test') +def test_ps_s3_notification_permissions(): + """ test s3 notification set/get/delete permissions """ + conn1 = connection() + conn2 = another_user() + zonegroup = 'default' + bucket_name = gen_bucket_name() + # create bucket + bucket = conn1.create_bucket(bucket_name) + topic_name = bucket_name + TOPIC_SUFFIX + # create s3 topic + endpoint_address = 'amqp://127.0.0.1:7001' + endpoint_args = 'push-endpoint='+endpoint_address+'&amqp-exchange=amqp.direct&amqp-ack-level=none' + topic_conf = PSTopicS3(conn1, topic_name, zonegroup, endpoint_args=endpoint_args) + topic_arn = topic_conf.set_config() + + # one user create a notification + notification_name = bucket_name + NOTIFICATION_SUFFIX + topic_conf_list = [{'Id': notification_name, + 'TopicArn': topic_arn, + 'Events': [] + }] + s3_notification_conf1 = PSNotificationS3(conn1, bucket_name, topic_conf_list) + _, status = s3_notification_conf1.set_config() + assert_equal(status, 200) + # another user try to fetch it + s3_notification_conf2 = PSNotificationS3(conn2, bucket_name, topic_conf_list) + try: + _, _ = s3_notification_conf2.get_config() + assert False, "'AccessDenied' error is expected" + except ClientError as error: + assert_equal(error.response['Error']['Code'], 'AccessDenied') + # other user try to delete the notification + _, status = s3_notification_conf2.del_config() + assert_equal(status, 403) + + # bucket policy is added by the 1st user + client = boto3.client('s3', + endpoint_url='http://'+conn1.host+':'+str(conn1.port), + aws_access_key_id=conn1.aws_access_key_id, + aws_secret_access_key=conn1.aws_secret_access_key) + bucket_policy = json.dumps({ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "Statement", + "Effect": "Allow", + "Principal": "*", + "Action": ["s3:GetBucketNotification", "s3:PutBucketNotification"], + "Resource": f"arn:aws:s3:::{bucket_name}" + } + ] + }) + response = client.put_bucket_policy(Bucket=bucket_name, Policy=bucket_policy) + assert_equal(int(response['ResponseMetadata']['HTTPStatusCode']/100), 2) + result = client.get_bucket_policy(Bucket=bucket_name) + print(result['Policy']) + + # 2nd user try to fetch it again + _, status = s3_notification_conf2.get_config() + assert_equal(status, 200) + + # 2nd user try to delete it again + result, status = s3_notification_conf2.del_config() + assert_equal(status, 200) + + # 2nd user try to add another notification + topic_conf_list = [{'Id': notification_name+"2", + 'TopicArn': topic_arn, + 'Events': [] + }] + s3_notification_conf2 = PSNotificationS3(conn2, bucket_name, topic_conf_list) + result, status = s3_notification_conf2.set_config() + assert_equal(status, 200) + + # cleanup + s3_notification_conf1.del_config() + s3_notification_conf2.del_config() + topic_conf.del_config() + # delete the bucket + conn1.delete_bucket(bucket_name) + +@attr('amqp_test') +def test_ps_s3_notification_push_amqp_on_master(): + """ test pushing amqp s3 notification on master """ + + hostname = get_ip() + conn = connection() + zonegroup = 'default' + + # create bucket + bucket_name = gen_bucket_name() + bucket = conn.create_bucket(bucket_name) + topic_name1 = bucket_name + TOPIC_SUFFIX + '_1' + topic_name2 = bucket_name + TOPIC_SUFFIX + '_2' + + # start amqp receivers + exchange = 'ex1' + task1, receiver1 = create_amqp_receiver_thread(exchange, topic_name1) + task2, receiver2 = create_amqp_receiver_thread(exchange, topic_name2) + task1.start() + task2.start() + + # create two s3 topic + endpoint_address = 'amqp://' + hostname + # with acks from broker + endpoint_args = 'push-endpoint='+endpoint_address+'&amqp-exchange=' + exchange +'&amqp-ack-level=broker' + topic_conf1 = PSTopicS3(conn, topic_name1, zonegroup, endpoint_args=endpoint_args) + topic_arn1 = topic_conf1.set_config() + # without acks from broker + endpoint_args = 'push-endpoint='+endpoint_address+'&amqp-exchange=' + exchange +'&amqp-ack-level=routable' + topic_conf2 = PSTopicS3(conn, topic_name2, zonegroup, endpoint_args=endpoint_args) + topic_arn2 = topic_conf2.set_config() + # create s3 notification + notification_name = bucket_name + NOTIFICATION_SUFFIX + topic_conf_list = [{'Id': notification_name+'_1', 'TopicArn': topic_arn1, + 'Events': [] + }, + {'Id': notification_name+'_2', 'TopicArn': topic_arn2, + 'Events': ['s3:ObjectCreated:*'] + }] + + s3_notification_conf = PSNotificationS3(conn, bucket_name, topic_conf_list) + response, status = s3_notification_conf.set_config() + assert_equal(status/100, 2) + + # create objects in the bucket (async) + number_of_objects = 100 + client_threads = [] + start_time = time.time() + for i in range(number_of_objects): + key = bucket.new_key(str(i)) + content = str(os.urandom(1024*1024)) + thr = threading.Thread(target = set_contents_from_string, args=(key, content,)) + thr.start() + client_threads.append(thr) + [thr.join() for thr in client_threads] + + time_diff = time.time() - start_time + print('average time for creation + qmqp notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds') + + print('wait for 5sec for the messages...') + time.sleep(5) + + # check amqp receiver + keys = list(bucket.list()) + print('total number of objects: ' + str(len(keys))) + receiver1.verify_s3_events(keys, exact_match=True) + receiver2.verify_s3_events(keys, exact_match=True) + + # delete objects from the bucket + client_threads = [] + start_time = time.time() + for key in bucket.list(): + thr = threading.Thread(target = key.delete, args=()) + thr.start() + client_threads.append(thr) + [thr.join() for thr in client_threads] + + time_diff = time.time() - start_time + print('average time for deletion + amqp notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds') + + print('wait for 5sec for the messages...') + time.sleep(5) + + # check amqp receiver 1 for deletions + receiver1.verify_s3_events(keys, exact_match=True, deletions=True) + # check amqp receiver 2 has no deletions + try: + receiver1.verify_s3_events(keys, exact_match=False, deletions=True) + except: + pass + else: + err = 'amqp receiver 2 should have no deletions' + assert False, err + + # cleanup + stop_amqp_receiver(receiver1, task1) + stop_amqp_receiver(receiver2, task2) + s3_notification_conf.del_config() + topic_conf1.del_config() + topic_conf2.del_config() + # delete the bucket + conn.delete_bucket(bucket_name) + + +@attr('manual_test') +def test_ps_s3_notification_push_amqp_idleness_check(): + """ test pushing amqp s3 notification and checking for connection idleness """ + return SkipTest("only used in manual testing") + hostname = get_ip() + conn = connection() + zonegroup = 'default' + + # create bucket + bucket_name = gen_bucket_name() + bucket = conn.create_bucket(bucket_name) + topic_name1 = bucket_name + TOPIC_SUFFIX + '_1' + + # start amqp receivers + exchange = 'ex1' + task1, receiver1 = create_amqp_receiver_thread(exchange, topic_name1) + task1.start() + + # create two s3 topic + endpoint_address = 'amqp://' + hostname + # with acks from broker + endpoint_args = 'push-endpoint='+endpoint_address+'&amqp-exchange=' + exchange +'&amqp-ack-level=broker' + topic_conf1 = PSTopicS3(conn, topic_name1, zonegroup, endpoint_args=endpoint_args) + topic_arn1 = topic_conf1.set_config() + # create s3 notification + notification_name = bucket_name + NOTIFICATION_SUFFIX + topic_conf_list = [{'Id': notification_name+'_1', 'TopicArn': topic_arn1, + 'Events': [] + }] + + s3_notification_conf = PSNotificationS3(conn, bucket_name, topic_conf_list) + response, status = s3_notification_conf.set_config() + assert_equal(status/100, 2) + + # create objects in the bucket (async) + number_of_objects = 10 + client_threads = [] + start_time = time.time() + for i in range(number_of_objects): + key = bucket.new_key(str(i)) + content = str(os.urandom(1024*1024)) + thr = threading.Thread(target = set_contents_from_string, args=(key, content,)) + thr.start() + client_threads.append(thr) + [thr.join() for thr in client_threads] + + time_diff = time.time() - start_time + print('average time for creation + amqp notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds') + + print('wait for 5sec for the messages...') + time.sleep(5) + + # check amqp receiver + keys = list(bucket.list()) + print('total number of objects: ' + str(len(keys))) + receiver1.verify_s3_events(keys, exact_match=True) + + # delete objects from the bucket + client_threads = [] + start_time = time.time() + for key in bucket.list(): + thr = threading.Thread(target = key.delete, args=()) + thr.start() + client_threads.append(thr) + [thr.join() for thr in client_threads] + + time_diff = time.time() - start_time + print('average time for deletion + amqp notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds') + + print('wait for 5sec for the messages...') + time.sleep(5) + + # check amqp receiver 1 for deletions + receiver1.verify_s3_events(keys, exact_match=True, deletions=True) + + print('waiting for 40sec for checking idleness') + time.sleep(40) + + os.system("netstat -nnp | grep 5672"); + + # do the process of uploading an object and checking for notification again + number_of_objects = 10 + client_threads = [] + start_time = time.time() + for i in range(number_of_objects): + key = bucket.new_key(str(i)) + content = str(os.urandom(1024*1024)) + thr = threading.Thread(target = set_contents_from_string, args=(key, content,)) + thr.start() + client_threads.append(thr) + [thr.join() for thr in client_threads] + + time_diff = time.time() - start_time + print('average time for creation + amqp notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds') + + print('wait for 5sec for the messages...') + time.sleep(5) + + # check amqp receiver + keys = list(bucket.list()) + print('total number of objects: ' + str(len(keys))) + receiver1.verify_s3_events(keys, exact_match=True) + + # delete objects from the bucket + client_threads = [] + start_time = time.time() + for key in bucket.list(): + thr = threading.Thread(target = key.delete, args=()) + thr.start() + client_threads.append(thr) + [thr.join() for thr in client_threads] + + time_diff = time.time() - start_time + print('average time for deletion + amqp notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds') + + print('wait for 5sec for the messages...') + time.sleep(5) + + # check amqp receiver 1 for deletions + receiver1.verify_s3_events(keys, exact_match=True, deletions=True) + + os.system("netstat -nnp | grep 5672"); + + # cleanup + stop_amqp_receiver(receiver1, task1) + s3_notification_conf.del_config() + topic_conf1.del_config() + # delete the bucket + conn.delete_bucket(bucket_name) + + +@attr('kafka_test') +def test_ps_s3_notification_push_kafka_on_master(): + """ test pushing kafka s3 notification on master """ + conn = connection() + zonegroup = 'default' + + # create bucket + bucket_name = gen_bucket_name() + bucket = conn.create_bucket(bucket_name) + # name is constant for manual testing + topic_name = bucket_name+'_topic' + # create consumer on the topic + + try: + s3_notification_conf = None + topic_conf1 = None + topic_conf2 = None + receiver = None + task, receiver = create_kafka_receiver_thread(topic_name+'_1') + task.start() + + # create s3 topic + endpoint_address = 'kafka://' + kafka_server + # without acks from broker + endpoint_args = 'push-endpoint='+endpoint_address+'&kafka-ack-level=broker' + topic_conf1 = PSTopicS3(conn, topic_name+'_1', zonegroup, endpoint_args=endpoint_args) + topic_arn1 = topic_conf1.set_config() + endpoint_args = 'push-endpoint='+endpoint_address+'&kafka-ack-level=none' + topic_conf2 = PSTopicS3(conn, topic_name+'_2', zonegroup, endpoint_args=endpoint_args) + topic_arn2 = topic_conf2.set_config() + # create s3 notification + notification_name = bucket_name + NOTIFICATION_SUFFIX + topic_conf_list = [{'Id': notification_name + '_1', 'TopicArn': topic_arn1, + 'Events': [] + }, + {'Id': notification_name + '_2', 'TopicArn': topic_arn2, + 'Events': [] + }] + + s3_notification_conf = PSNotificationS3(conn, bucket_name, topic_conf_list) + response, status = s3_notification_conf.set_config() + assert_equal(status/100, 2) + + # create objects in the bucket (async) + number_of_objects = 10 + client_threads = [] + etags = [] + start_time = time.time() + for i in range(number_of_objects): + key = bucket.new_key(str(i)) + content = str(os.urandom(1024*1024)) + etag = hashlib.md5(content.encode()).hexdigest() + etags.append(etag) + thr = threading.Thread(target = set_contents_from_string, args=(key, content,)) + thr.start() + client_threads.append(thr) + [thr.join() for thr in client_threads] + + time_diff = time.time() - start_time + print('average time for creation + kafka notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds') + + print('wait for 5sec for the messages...') + time.sleep(5) + keys = list(bucket.list()) + receiver.verify_s3_events(keys, exact_match=True, etags=etags) + + # delete objects from the bucket + client_threads = [] + start_time = time.time() + for key in bucket.list(): + thr = threading.Thread(target = key.delete, args=()) + thr.start() + client_threads.append(thr) + [thr.join() for thr in client_threads] + + time_diff = time.time() - start_time + print('average time for deletion + kafka notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds') + + print('wait for 5sec for the messages...') + time.sleep(5) + receiver.verify_s3_events(keys, exact_match=True, deletions=True, etags=etags) + except Exception as e: + print(e) + assert False + finally: + # cleanup + if s3_notification_conf is not None: + s3_notification_conf.del_config() + if topic_conf1 is not None: + topic_conf1.del_config() + if topic_conf2 is not None: + topic_conf2.del_config() + # delete the bucket + for key in bucket.list(): + key.delete() + conn.delete_bucket(bucket_name) + if receiver is not None: + stop_kafka_receiver(receiver, task) + + +@attr('http_test') +def test_ps_s3_notification_multi_delete_on_master(): + """ test deletion of multiple keys on master """ + hostname = get_ip() + conn = connection() + zonegroup = 'default' + + # create random port for the http server + host = get_ip() + port = random.randint(10000, 20000) + # start an http server in a separate thread + number_of_objects = 10 + http_server = StreamingHTTPServer(host, port, num_workers=number_of_objects) + + # create bucket + bucket_name = gen_bucket_name() + bucket = conn.create_bucket(bucket_name) + topic_name = bucket_name + TOPIC_SUFFIX + + # create s3 topic + endpoint_address = 'http://'+host+':'+str(port) + endpoint_args = 'push-endpoint='+endpoint_address + topic_conf = PSTopicS3(conn, topic_name, zonegroup, endpoint_args=endpoint_args) + topic_arn = topic_conf.set_config() + # create s3 notification + notification_name = bucket_name + NOTIFICATION_SUFFIX + topic_conf_list = [{'Id': notification_name, + 'TopicArn': topic_arn, + 'Events': ['s3:ObjectRemoved:*'] + }] + s3_notification_conf = PSNotificationS3(conn, bucket_name, topic_conf_list) + response, status = s3_notification_conf.set_config() + assert_equal(status/100, 2) + + # create objects in the bucket + client_threads = [] + objects_size = {} + for i in range(number_of_objects): + content = str(os.urandom(randint(1, 1024))) + object_size = len(content) + key = bucket.new_key(str(i)) + objects_size[key.name] = object_size + thr = threading.Thread(target = set_contents_from_string, args=(key, content,)) + thr.start() + client_threads.append(thr) + [thr.join() for thr in client_threads] + + keys = list(bucket.list()) + + start_time = time.time() + delete_all_objects(conn, bucket_name) + time_diff = time.time() - start_time + print('average time for deletion + http notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds') + + print('wait for 5sec for the messages...') + time.sleep(5) + + # check http receiver + http_server.verify_s3_events(keys, exact_match=True, deletions=True, expected_sizes=objects_size) + + # cleanup + topic_conf.del_config() + s3_notification_conf.del_config(notification=notification_name) + # delete the bucket + conn.delete_bucket(bucket_name) + http_server.close() + + +@attr('http_test') +def test_ps_s3_notification_push_http_on_master(): + """ test pushing http s3 notification on master """ + hostname = get_ip_http() + conn = connection() + zonegroup = 'default' + + # create random port for the http server + host = get_ip() + port = random.randint(10000, 20000) + # start an http server in a separate thread + number_of_objects = 10 + http_server = StreamingHTTPServer(host, port, num_workers=number_of_objects) + + # create bucket + bucket_name = gen_bucket_name() + bucket = conn.create_bucket(bucket_name) + topic_name = bucket_name + TOPIC_SUFFIX + + # create s3 topic + endpoint_address = 'http://'+host+':'+str(port) + endpoint_args = 'push-endpoint='+endpoint_address + topic_conf = PSTopicS3(conn, topic_name, zonegroup, endpoint_args=endpoint_args) + topic_arn = topic_conf.set_config() + # create s3 notification + notification_name = bucket_name + NOTIFICATION_SUFFIX + topic_conf_list = [{'Id': notification_name, + 'TopicArn': topic_arn, + 'Events': [] + }] + s3_notification_conf = PSNotificationS3(conn, bucket_name, topic_conf_list) + response, status = s3_notification_conf.set_config() + assert_equal(status/100, 2) + + # create objects in the bucket + client_threads = [] + objects_size = {} + start_time = time.time() + for i in range(number_of_objects): + content = str(os.urandom(randint(1, 1024))) + object_size = len(content) + key = bucket.new_key(str(i)) + objects_size[key.name] = object_size + thr = threading.Thread(target = set_contents_from_string, args=(key, content,)) + thr.start() + client_threads.append(thr) + [thr.join() for thr in client_threads] + + time_diff = time.time() - start_time + print('average time for creation + http notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds') + + print('wait for 5sec for the messages...') + time.sleep(5) + + # check http receiver + keys = list(bucket.list()) + http_server.verify_s3_events(keys, exact_match=True, deletions=False, expected_sizes=objects_size) + + # delete objects from the bucket + client_threads = [] + start_time = time.time() + for key in bucket.list(): + thr = threading.Thread(target = key.delete, args=()) + thr.start() + client_threads.append(thr) + [thr.join() for thr in client_threads] + + time_diff = time.time() - start_time + print('average time for deletion + http notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds') + + print('wait for 5sec for the messages...') + time.sleep(5) + + # check http receiver + http_server.verify_s3_events(keys, exact_match=True, deletions=True, expected_sizes=objects_size) + + # cleanup + topic_conf.del_config() + s3_notification_conf.del_config(notification=notification_name) + # delete the bucket + conn.delete_bucket(bucket_name) + http_server.close() + + +@attr('http_test') +def test_ps_s3_notification_push_cloudevents_on_master(): + """ test pushing cloudevents notification on master """ + hostname = get_ip_http() + conn = connection() + zonegroup = 'default' + + # create random port for the http server + host = get_ip() + port = random.randint(10000, 20000) + # start an http server in a separate thread + number_of_objects = 10 + http_server = StreamingHTTPServer(host, port, num_workers=number_of_objects, cloudevents=True) + + # create bucket + bucket_name = gen_bucket_name() + bucket = conn.create_bucket(bucket_name) + topic_name = bucket_name + TOPIC_SUFFIX + + # create s3 topic + endpoint_address = 'http://'+host+':'+str(port) + endpoint_args = 'push-endpoint='+endpoint_address+'&cloudevents=true' + topic_conf = PSTopicS3(conn, topic_name, zonegroup, endpoint_args=endpoint_args) + topic_arn = topic_conf.set_config() + # create s3 notification + notification_name = bucket_name + NOTIFICATION_SUFFIX + topic_conf_list = [{'Id': notification_name, + 'TopicArn': topic_arn, + 'Events': [] + }] + s3_notification_conf = PSNotificationS3(conn, bucket_name, topic_conf_list) + response, status = s3_notification_conf.set_config() + assert_equal(status/100, 2) + + # create objects in the bucket + client_threads = [] + objects_size = {} + start_time = time.time() + for i in range(number_of_objects): + content = str(os.urandom(randint(1, 1024))) + object_size = len(content) + key = bucket.new_key(str(i)) + objects_size[key.name] = object_size + thr = threading.Thread(target = set_contents_from_string, args=(key, content,)) + thr.start() + client_threads.append(thr) + [thr.join() for thr in client_threads] + + time_diff = time.time() - start_time + print('average time for creation + http notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds') + + print('wait for 5sec for the messages...') + time.sleep(5) + + # check http receiver + keys = list(bucket.list()) + http_server.verify_s3_events(keys, exact_match=True, deletions=False, expected_sizes=objects_size) + + # delete objects from the bucket + client_threads = [] + start_time = time.time() + for key in bucket.list(): + thr = threading.Thread(target = key.delete, args=()) + thr.start() + client_threads.append(thr) + [thr.join() for thr in client_threads] + + time_diff = time.time() - start_time + print('average time for deletion + http notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds') + + print('wait for 5sec for the messages...') + time.sleep(5) + + # check http receiver + http_server.verify_s3_events(keys, exact_match=True, deletions=True, expected_sizes=objects_size) + + # cleanup + topic_conf.del_config() + s3_notification_conf.del_config(notification=notification_name) + # delete the bucket + conn.delete_bucket(bucket_name) + http_server.close() + + +@attr('http_test') +def test_ps_s3_opaque_data_on_master(): + """ test that opaque id set in topic, is sent in notification on master """ + hostname = get_ip() + conn = connection() + zonegroup = 'default' + + # create random port for the http server + host = get_ip() + port = random.randint(10000, 20000) + # start an http server in a separate thread + number_of_objects = 10 + http_server = StreamingHTTPServer(host, port, num_workers=number_of_objects) + + # create bucket + bucket_name = gen_bucket_name() + bucket = conn.create_bucket(bucket_name) + topic_name = bucket_name + TOPIC_SUFFIX + + # create s3 topic + endpoint_address = 'http://'+host+':'+str(port) + endpoint_args = 'push-endpoint='+endpoint_address + opaque_data = 'http://1.2.3.4:8888' + topic_conf = PSTopicS3(conn, topic_name, zonegroup, endpoint_args=endpoint_args, opaque_data=opaque_data) + topic_arn = topic_conf.set_config() + # create s3 notification + notification_name = bucket_name + NOTIFICATION_SUFFIX + topic_conf_list = [{'Id': notification_name, + 'TopicArn': topic_arn, + 'Events': [] + }] + s3_notification_conf = PSNotificationS3(conn, bucket_name, topic_conf_list) + response, status = s3_notification_conf.set_config() + assert_equal(status/100, 2) + + # create objects in the bucket + client_threads = [] + start_time = time.time() + content = 'bar' + for i in range(number_of_objects): + key = bucket.new_key(str(i)) + thr = threading.Thread(target = set_contents_from_string, args=(key, content,)) + thr.start() + client_threads.append(thr) + [thr.join() for thr in client_threads] + + time_diff = time.time() - start_time + print('average time for creation + http notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds') + + print('wait for 5sec for the messages...') + time.sleep(5) + + # check http receiver + keys = list(bucket.list()) + print('total number of objects: ' + str(len(keys))) + events = http_server.get_and_reset_events() + for event in events: + assert_equal(event['Records'][0]['opaqueData'], opaque_data) + + # cleanup + for key in keys: + key.delete() + [thr.join() for thr in client_threads] + topic_conf.del_config() + s3_notification_conf.del_config(notification=notification_name) + # delete the bucket + conn.delete_bucket(bucket_name) + http_server.close() + +@attr('http_test') +def test_ps_s3_lifecycle_on_master(): + """ test that when object is deleted due to lifecycle policy, notification is sent on master """ + hostname = get_ip() + conn = connection() + zonegroup = 'default' + + # create random port for the http server + host = get_ip() + port = random.randint(10000, 20000) + # start an http server in a separate thread + number_of_objects = 10 + http_server = StreamingHTTPServer(host, port, num_workers=number_of_objects) + + # create bucket + bucket_name = gen_bucket_name() + bucket = conn.create_bucket(bucket_name) + topic_name = bucket_name + TOPIC_SUFFIX + + # create s3 topic + endpoint_address = 'http://'+host+':'+str(port) + endpoint_args = 'push-endpoint='+endpoint_address + opaque_data = 'http://1.2.3.4:8888' + topic_conf = PSTopicS3(conn, topic_name, zonegroup, endpoint_args=endpoint_args, opaque_data=opaque_data) + topic_arn = topic_conf.set_config() + # create s3 notification + notification_name = bucket_name + NOTIFICATION_SUFFIX + topic_conf_list = [{'Id': notification_name, + 'TopicArn': topic_arn, + 'Events': ['s3:ObjectLifecycle:Expiration:*'] + }] + s3_notification_conf = PSNotificationS3(conn, bucket_name, topic_conf_list) + response, status = s3_notification_conf.set_config() + assert_equal(status/100, 2) + + # create objects in the bucket + obj_prefix = 'ooo' + client_threads = [] + start_time = time.time() + content = 'bar' + for i in range(number_of_objects): + key = bucket.new_key(obj_prefix + str(i)) + thr = threading.Thread(target = set_contents_from_string, args=(key, content,)) + thr.start() + client_threads.append(thr) + [thr.join() for thr in client_threads] + + time_diff = time.time() - start_time + print('average time for creation + http notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds') + + # create lifecycle policy + client = boto3.client('s3', + endpoint_url='http://'+conn.host+':'+str(conn.port), + aws_access_key_id=conn.aws_access_key_id, + aws_secret_access_key=conn.aws_secret_access_key) + yesterday = datetime.date.today() - datetime.timedelta(days=1) + response = client.put_bucket_lifecycle_configuration(Bucket=bucket_name, + LifecycleConfiguration={'Rules': [ + { + 'ID': 'rule1', + 'Expiration': {'Date': yesterday.isoformat()}, + 'Filter': {'Prefix': obj_prefix}, + 'Status': 'Enabled', + } + ] + } + ) + + # start lifecycle processing + admin(['lc', 'process']) + print('wait for 5sec for the messages...') + time.sleep(5) + + # check http receiver does not have messages + keys = list(bucket.list()) + print('total number of objects: ' + str(len(keys))) + event_keys = [] + events = http_server.get_and_reset_events() + for event in events: + assert_equal(event['Records'][0]['eventName'], 'ObjectLifecycle:Expiration:Current') + event_keys.append(event['Records'][0]['s3']['object']['key']) + for key in keys: + key_found = False + for event_key in event_keys: + if event_key == key: + key_found = True + break + if not key_found: + err = 'no lifecycle event found for key: ' + str(key) + log.error(events) + assert False, err + + # cleanup + for key in keys: + key.delete() + [thr.join() for thr in client_threads] + topic_conf.del_config() + s3_notification_conf.del_config(notification=notification_name) + # delete the bucket + conn.delete_bucket(bucket_name) + http_server.close() + + +def ps_s3_creation_triggers_on_master(external_endpoint_address=None, ca_location=None, verify_ssl='true'): + """ test object creation s3 notifications in using put/copy/post on master""" + + if not external_endpoint_address: + hostname = 'localhost' + proc = init_rabbitmq() + if proc is None: + return SkipTest('end2end amqp tests require rabbitmq-server installed') + else: + proc = None + + conn = connection() + hostname = 'localhost' + zonegroup = 'default' + + # create bucket + bucket_name = gen_bucket_name() + bucket = conn.create_bucket(bucket_name) + topic_name = bucket_name + TOPIC_SUFFIX + + # start amqp receiver + exchange = 'ex1' + task, receiver = create_amqp_receiver_thread(exchange, topic_name, external_endpoint_address, ca_location) + task.start() + + # create s3 topic + if external_endpoint_address: + endpoint_address = external_endpoint_address + elif ca_location: + endpoint_address = 'amqps://' + hostname + else: + endpoint_address = 'amqp://' + hostname + endpoint_args = 'push-endpoint='+endpoint_address+'&amqp-exchange=' + exchange +'&amqp-ack-level=broker&verify-ssl='+verify_ssl + if ca_location: + endpoint_args += '&ca-location={}'.format(ca_location) + topic_conf = PSTopicS3(conn, topic_name, zonegroup, endpoint_args=endpoint_args) + topic_arn = topic_conf.set_config() + # create s3 notification + notification_name = bucket_name + NOTIFICATION_SUFFIX + topic_conf_list = [{'Id': notification_name,'TopicArn': topic_arn, + 'Events': ['s3:ObjectCreated:Put', 's3:ObjectCreated:Copy', 's3:ObjectCreated:CompleteMultipartUpload'] + }] + + s3_notification_conf = PSNotificationS3(conn, bucket_name, topic_conf_list) + response, status = s3_notification_conf.set_config() + assert_equal(status/100, 2) + + objects_size = {} + # create objects in the bucket using PUT + content = str(os.urandom(randint(1, 1024))) + key_name = 'put' + key = bucket.new_key(key_name) + objects_size[key_name] = len(content) + key.set_contents_from_string(content) + # create objects in the bucket using COPY + key_name = 'copy' + bucket.copy_key(key_name, bucket.name, key.name) + objects_size[key_name] = len(content) + + # create objects in the bucket using multi-part upload + fp = tempfile.NamedTemporaryFile(mode='w+b') + content = bytearray(os.urandom(10*1024*1024)) + key_name = 'multipart' + objects_size[key_name] = len(content) + fp.write(content) + fp.flush() + fp.seek(0) + uploader = bucket.initiate_multipart_upload(key_name) + uploader.upload_part_from_file(fp, 1) + uploader.complete_upload() + fp.close() + + print('wait for 5sec for the messages...') + time.sleep(5) + + # check amqp receiver + keys = list(bucket.list()) + receiver.verify_s3_events(keys, exact_match=True, expected_sizes=objects_size) + + # cleanup + stop_amqp_receiver(receiver, task) + s3_notification_conf.del_config() + topic_conf.del_config() + for key in bucket.list(): + key.delete() + # delete the bucket + conn.delete_bucket(bucket_name) + if proc: + clean_rabbitmq(proc) + + +@attr('amqp_test') +def test_ps_s3_creation_triggers_on_master(): + ps_s3_creation_triggers_on_master(external_endpoint_address="amqp://localhost:5672") + + +@attr('amqp_ssl_test') +def test_ps_s3_creation_triggers_on_master_external(): + + from distutils.util import strtobool + + if 'AMQP_EXTERNAL_ENDPOINT' in os.environ: + try: + if strtobool(os.environ['AMQP_VERIFY_SSL']): + verify_ssl = 'true' + else: + verify_ssl = 'false' + except Exception as e: + verify_ssl = 'true' + + ps_s3_creation_triggers_on_master( + external_endpoint_address=os.environ['AMQP_EXTERNAL_ENDPOINT'], + verify_ssl=verify_ssl) + else: + return SkipTest("Set AMQP_EXTERNAL_ENDPOINT to a valid external AMQP endpoint url for this test to run") + + +def generate_private_key(tempdir): + + import datetime + import stat + from cryptography import x509 + from cryptography.x509.oid import NameOID + from cryptography.hazmat.primitives import hashes + from cryptography.hazmat.backends import default_backend + from cryptography.hazmat.primitives import serialization + from cryptography.hazmat.primitives.asymmetric import rsa + + # modify permissions to ensure that the broker user can access them + os.chmod(tempdir, mode=stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH) + CACERTFILE = os.path.join(tempdir, 'ca_certificate.pem') + CERTFILE = os.path.join(tempdir, 'server_certificate.pem') + KEYFILE = os.path.join(tempdir, 'server_key.pem') + + root_key = rsa.generate_private_key( + public_exponent=65537, + key_size=2048, + backend=default_backend() + ) + subject = issuer = x509.Name([ + x509.NameAttribute(NameOID.COUNTRY_NAME, u"UK"), + x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, u"Oxfordshire"), + x509.NameAttribute(NameOID.LOCALITY_NAME, u"Harwell"), + x509.NameAttribute(NameOID.ORGANIZATION_NAME, u"Rosalind Franklin Institute"), + x509.NameAttribute(NameOID.COMMON_NAME, u"RFI CA"), + ]) + root_cert = x509.CertificateBuilder().subject_name( + subject + ).issuer_name( + issuer + ).public_key( + root_key.public_key() + ).serial_number( + x509.random_serial_number() + ).not_valid_before( + datetime.datetime.utcnow() + ).not_valid_after( + datetime.datetime.utcnow() + datetime.timedelta(days=3650) + ).add_extension( + x509.BasicConstraints(ca=True, path_length=None), critical=True + ).sign(root_key, hashes.SHA256(), default_backend()) + with open(CACERTFILE, "wb") as f: + f.write(root_cert.public_bytes(serialization.Encoding.PEM)) + + # Now we want to generate a cert from that root + cert_key = rsa.generate_private_key( + public_exponent=65537, + key_size=2048, + backend=default_backend(), + ) + with open(KEYFILE, "wb") as f: + f.write(cert_key.private_bytes( + encoding=serialization.Encoding.PEM, + format=serialization.PrivateFormat.PKCS8, + encryption_algorithm=serialization.NoEncryption(), + )) + new_subject = x509.Name([ + x509.NameAttribute(NameOID.COUNTRY_NAME, u"UK"), + x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, u"Oxfordshire"), + x509.NameAttribute(NameOID.LOCALITY_NAME, u"Harwell"), + x509.NameAttribute(NameOID.ORGANIZATION_NAME, u"Rosalind Franklin Institute"), + ]) + cert = x509.CertificateBuilder().subject_name( + new_subject + ).issuer_name( + root_cert.issuer + ).public_key( + cert_key.public_key() + ).serial_number( + x509.random_serial_number() + ).not_valid_before( + datetime.datetime.utcnow() + ).not_valid_after( + datetime.datetime.utcnow() + datetime.timedelta(days=30) + ).add_extension( + x509.SubjectAlternativeName([x509.DNSName(u"localhost")]), + critical=False, + ).sign(root_key, hashes.SHA256(), default_backend()) + # Write our certificate out to disk. + with open(CERTFILE, "wb") as f: + f.write(cert.public_bytes(serialization.Encoding.PEM)) + + print("\n\n********private key generated********") + print(CACERTFILE, CERTFILE, KEYFILE) + print("\n\n") + return CACERTFILE, CERTFILE, KEYFILE + + +@attr('amqp_ssl_test') +def test_ps_s3_creation_triggers_on_master_ssl(): + + import textwrap + from tempfile import TemporaryDirectory + + with TemporaryDirectory() as tempdir: + CACERTFILE, CERTFILE, KEYFILE = generate_private_key(tempdir) + RABBITMQ_CONF_FILE = os.path.join(tempdir, 'rabbitmq.config') + with open(RABBITMQ_CONF_FILE, "w") as f: + # use the old style config format to ensure it also runs on older RabbitMQ versions. + f.write(textwrap.dedent(f''' + [ + {{rabbit, [ + {{ssl_listeners, [5671]}}, + {{ssl_options, [{{cacertfile, "{CACERTFILE}"}}, + {{certfile, "{CERTFILE}"}}, + {{keyfile, "{KEYFILE}"}}, + {{verify, verify_peer}}, + {{fail_if_no_peer_cert, false}}]}}]}} + ]. + ''')) + os.environ['RABBITMQ_CONFIG_FILE'] = os.path.splitext(RABBITMQ_CONF_FILE)[0] + + ps_s3_creation_triggers_on_master(ca_location=CACERTFILE) + + del os.environ['RABBITMQ_CONFIG_FILE'] + + +@attr('amqp_test') +def test_http_post_object_upload(): + """ test that uploads object using HTTP POST """ + + import boto3 + from collections import OrderedDict + import requests + + hostname = get_ip() + zonegroup = 'default' + conn = connection() + + endpoint = "http://%s:%d" % (get_config_host(), get_config_port()) + + conn1 = boto3.client(service_name='s3', + aws_access_key_id=get_access_key(), + aws_secret_access_key=get_secret_key(), + endpoint_url=endpoint, + ) + + bucket_name = gen_bucket_name() + topic_name = bucket_name + TOPIC_SUFFIX + + key_name = 'foo.txt' + + resp = conn1.generate_presigned_post(Bucket=bucket_name, Key=key_name,) + + url = resp['url'] + + bucket = conn1.create_bucket(ACL='public-read-write', Bucket=bucket_name) + + # start amqp receivers + exchange = 'ex1' + task1, receiver1 = create_amqp_receiver_thread(exchange, topic_name+'_1') + task1.start() + + # create s3 topics + endpoint_address = 'amqp://' + hostname + endpoint_args = 'push-endpoint=' + endpoint_address + '&amqp-exchange=' + exchange + '&amqp-ack-level=broker' + topic_conf1 = PSTopicS3(conn, topic_name+'_1', zonegroup, endpoint_args=endpoint_args) + topic_arn1 = topic_conf1.set_config() + + # create s3 notifications + notification_name = bucket_name + NOTIFICATION_SUFFIX + topic_conf_list = [{'Id': notification_name+'_1', 'TopicArn': topic_arn1, + 'Events': ['s3:ObjectCreated:Post'] + }] + s3_notification_conf = PSNotificationS3(conn, bucket_name, topic_conf_list) + response, status = s3_notification_conf.set_config() + assert_equal(status/100, 2) + + payload = OrderedDict([("key" , "foo.txt"),("acl" , "public-read"),\ + ("Content-Type" , "text/plain"),('file', ('bar'))]) + + # POST upload + r = requests.post(url, files=payload, verify=True) + assert_equal(r.status_code, 204) + + # check amqp receiver + events = receiver1.get_and_reset_events() + assert_equal(len(events), 1) + + # cleanup + stop_amqp_receiver(receiver1, task1) + s3_notification_conf.del_config() + topic_conf1.del_config() + conn1.delete_object(Bucket=bucket_name, Key=key_name) + # delete the bucket + conn1.delete_bucket(Bucket=bucket_name) + + +@attr('amqp_test') +def test_ps_s3_multipart_on_master(): + """ test multipart object upload on master""" + + hostname = get_ip() + conn = connection() + zonegroup = 'default' + + # create bucket + bucket_name = gen_bucket_name() + bucket = conn.create_bucket(bucket_name) + topic_name = bucket_name + TOPIC_SUFFIX + + # start amqp receivers + exchange = 'ex1' + task1, receiver1 = create_amqp_receiver_thread(exchange, topic_name+'_1') + task1.start() + task2, receiver2 = create_amqp_receiver_thread(exchange, topic_name+'_2') + task2.start() + task3, receiver3 = create_amqp_receiver_thread(exchange, topic_name+'_3') + task3.start() + + # create s3 topics + endpoint_address = 'amqp://' + hostname + endpoint_args = 'push-endpoint=' + endpoint_address + '&amqp-exchange=' + exchange + '&amqp-ack-level=broker' + topic_conf1 = PSTopicS3(conn, topic_name+'_1', zonegroup, endpoint_args=endpoint_args) + topic_arn1 = topic_conf1.set_config() + topic_conf2 = PSTopicS3(conn, topic_name+'_2', zonegroup, endpoint_args=endpoint_args) + topic_arn2 = topic_conf2.set_config() + topic_conf3 = PSTopicS3(conn, topic_name+'_3', zonegroup, endpoint_args=endpoint_args) + topic_arn3 = topic_conf3.set_config() + + # create s3 notifications + notification_name = bucket_name + NOTIFICATION_SUFFIX + topic_conf_list = [{'Id': notification_name+'_1', 'TopicArn': topic_arn1, + 'Events': ['s3:ObjectCreated:*'] + }, + {'Id': notification_name+'_2', 'TopicArn': topic_arn2, + 'Events': ['s3:ObjectCreated:Post'] + }, + {'Id': notification_name+'_3', 'TopicArn': topic_arn3, + 'Events': ['s3:ObjectCreated:CompleteMultipartUpload'] + }] + s3_notification_conf = PSNotificationS3(conn, bucket_name, topic_conf_list) + response, status = s3_notification_conf.set_config() + assert_equal(status/100, 2) + + # create objects in the bucket using multi-part upload + fp = tempfile.NamedTemporaryFile(mode='w+b') + object_size = 1024 + content = bytearray(os.urandom(object_size)) + fp.write(content) + fp.flush() + fp.seek(0) + uploader = bucket.initiate_multipart_upload('multipart') + uploader.upload_part_from_file(fp, 1) + uploader.complete_upload() + fp.close() + + print('wait for 5sec for the messages...') + time.sleep(5) + + # check amqp receiver + events = receiver1.get_and_reset_events() + assert_equal(len(events), 1) + + events = receiver2.get_and_reset_events() + assert_equal(len(events), 0) + + events = receiver3.get_and_reset_events() + assert_equal(len(events), 1) + assert_equal(events[0]['Records'][0]['eventName'], 'ObjectCreated:CompleteMultipartUpload') + assert_equal(events[0]['Records'][0]['s3']['configurationId'], notification_name+'_3') + assert_equal(events[0]['Records'][0]['s3']['object']['size'], object_size) + assert events[0]['Records'][0]['eventTime'] != '0.000000', 'invalid eventTime' + + # cleanup + stop_amqp_receiver(receiver1, task1) + stop_amqp_receiver(receiver2, task2) + stop_amqp_receiver(receiver3, task3) + s3_notification_conf.del_config() + topic_conf1.del_config() + topic_conf2.del_config() + topic_conf3.del_config() + for key in bucket.list(): + key.delete() + # delete the bucket + conn.delete_bucket(bucket_name) + +@attr('amqp_test') +def test_ps_s3_metadata_filter_on_master(): + """ test s3 notification of metadata on master """ + + hostname = get_ip() + conn = connection() + zonegroup = 'default' + + # create bucket + bucket_name = gen_bucket_name() + bucket = conn.create_bucket(bucket_name) + topic_name = bucket_name + TOPIC_SUFFIX + + # start amqp receivers + exchange = 'ex1' + task, receiver = create_amqp_receiver_thread(exchange, topic_name) + task.start() + + # create s3 topic + endpoint_address = 'amqp://' + hostname + endpoint_args = 'push-endpoint='+endpoint_address+'&amqp-exchange=' + exchange +'&amqp-ack-level=routable' + topic_conf = PSTopicS3(conn, topic_name, zonegroup, endpoint_args=endpoint_args) + topic_arn = topic_conf.set_config() + # create s3 notification + notification_name = bucket_name + NOTIFICATION_SUFFIX + meta_key = 'meta1' + meta_value = 'This is my metadata value' + topic_conf_list = [{'Id': notification_name, 'TopicArn': topic_arn, + 'Events': ['s3:ObjectCreated:*', 's3:ObjectRemoved:*'], + 'Filter': { + 'Metadata': { + 'FilterRules': [{'Name': META_PREFIX+meta_key, 'Value': meta_value}] + } + } + }] + + s3_notification_conf = PSNotificationS3(conn, bucket_name, topic_conf_list) + _, status = s3_notification_conf.set_config() + assert_equal(status/100, 2) + + expected_keys = [] + # create objects in the bucket + key_name = 'foo' + key = bucket.new_key(key_name) + key.set_metadata(meta_key, meta_value) + key.set_contents_from_string('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa') + expected_keys.append(key_name) + + # create objects in the bucket using COPY + key_name = 'copy_of_foo' + bucket.copy_key(key_name, bucket.name, key.name) + expected_keys.append(key_name) + + # create another objects in the bucket using COPY + # but override the metadata value + key_name = 'another_copy_of_foo' + bucket.copy_key(key_name, bucket.name, key.name, metadata={meta_key: 'kaboom'}) + # this key is not in the expected keys due to the different meta value + + # create objects in the bucket using multi-part upload + fp = tempfile.NamedTemporaryFile(mode='w+b') + chunk_size = 1024*1024*5 # 5MB + object_size = 10*chunk_size + content = bytearray(os.urandom(object_size)) + fp.write(content) + fp.flush() + fp.seek(0) + key_name = 'multipart_foo' + uploader = bucket.initiate_multipart_upload(key_name, + metadata={meta_key: meta_value}) + for i in range(1,5): + uploader.upload_part_from_file(fp, i, size=chunk_size) + fp.seek(i*chunk_size) + uploader.complete_upload() + fp.close() + expected_keys.append(key_name) + + print('wait for 5sec for the messages...') + time.sleep(5) + # check amqp receiver + events = receiver.get_and_reset_events() + assert_equal(len(events), len(expected_keys)) + for event in events: + assert(event['Records'][0]['s3']['object']['key'] in expected_keys) + + # delete objects + for key in bucket.list(): + key.delete() + print('wait for 5sec for the messages...') + time.sleep(5) + # check amqp receiver + events = receiver.get_and_reset_events() + assert_equal(len(events), len(expected_keys)) + for event in events: + assert(event['Records'][0]['s3']['object']['key'] in expected_keys) + + # cleanup + stop_amqp_receiver(receiver, task) + s3_notification_conf.del_config() + topic_conf.del_config() + # delete the bucket + conn.delete_bucket(bucket_name) + + +@attr('amqp_test') +def test_ps_s3_metadata_on_master(): + """ test s3 notification of metadata on master """ + + hostname = get_ip() + conn = connection() + zonegroup = 'default' + + # create bucket + bucket_name = gen_bucket_name() + bucket = conn.create_bucket(bucket_name) + topic_name = bucket_name + TOPIC_SUFFIX + + # start amqp receivers + exchange = 'ex1' + task, receiver = create_amqp_receiver_thread(exchange, topic_name) + task.start() + + # create s3 topic + endpoint_address = 'amqp://' + hostname + endpoint_args = 'push-endpoint='+endpoint_address+'&amqp-exchange=' + exchange +'&amqp-ack-level=routable' + topic_conf = PSTopicS3(conn, topic_name, zonegroup, endpoint_args=endpoint_args) + topic_arn = topic_conf.set_config() + # create s3 notification + notification_name = bucket_name + NOTIFICATION_SUFFIX + meta_key = 'meta1' + meta_value = 'This is my metadata value' + meta_prefix = META_PREFIX + topic_conf_list = [{'Id': notification_name, 'TopicArn': topic_arn, + 'Events': ['s3:ObjectCreated:*', 's3:ObjectRemoved:*'], + }] + + s3_notification_conf = PSNotificationS3(conn, bucket_name, topic_conf_list) + _, status = s3_notification_conf.set_config() + assert_equal(status/100, 2) + + # create objects in the bucket + key_name = 'foo' + key = bucket.new_key(key_name) + key.set_metadata(meta_key, meta_value) + key.set_contents_from_string('aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa') + # update the object + another_meta_key = 'meta2' + key.set_metadata(another_meta_key, meta_value) + key.set_contents_from_string('bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb') + + # create objects in the bucket using COPY + key_name = 'copy_of_foo' + bucket.copy_key(key_name, bucket.name, key.name) + + # create objects in the bucket using multi-part upload + fp = tempfile.NamedTemporaryFile(mode='w+b') + chunk_size = 1024*1024*5 # 5MB + object_size = 10*chunk_size + content = bytearray(os.urandom(object_size)) + fp.write(content) + fp.flush() + fp.seek(0) + key_name = 'multipart_foo' + uploader = bucket.initiate_multipart_upload(key_name, + metadata={meta_key: meta_value}) + for i in range(1,5): + uploader.upload_part_from_file(fp, i, size=chunk_size) + fp.seek(i*chunk_size) + uploader.complete_upload() + fp.close() + + print('wait for 5sec for the messages...') + time.sleep(5) + # check amqp receiver + events = receiver.get_and_reset_events() + for event in events: + value = [x['val'] for x in event['Records'][0]['s3']['object']['metadata'] if x['key'] == META_PREFIX+meta_key] + assert_equal(value[0], meta_value) + + # delete objects + for key in bucket.list(): + key.delete() + print('wait for 5sec for the messages...') + time.sleep(5) + # check amqp receiver + events = receiver.get_and_reset_events() + for event in events: + value = [x['val'] for x in event['Records'][0]['s3']['object']['metadata'] if x['key'] == META_PREFIX+meta_key] + assert_equal(value[0], meta_value) + + # cleanup + stop_amqp_receiver(receiver, task) + s3_notification_conf.del_config() + topic_conf.del_config() + # delete the bucket + conn.delete_bucket(bucket_name) + + +@attr('amqp_test') +def test_ps_s3_tags_on_master(): + """ test s3 notification of tags on master """ + + hostname = get_ip() + conn = connection() + zonegroup = 'default' + + # create bucket + bucket_name = gen_bucket_name() + bucket = conn.create_bucket(bucket_name) + topic_name = bucket_name + TOPIC_SUFFIX + + # start amqp receiver + exchange = 'ex1' + task, receiver = create_amqp_receiver_thread(exchange, topic_name) + task.start() + + # create s3 topic + endpoint_address = 'amqp://' + hostname + endpoint_args = 'push-endpoint='+endpoint_address+'&amqp-exchange=' + exchange +'&amqp-ack-level=routable' + topic_conf = PSTopicS3(conn, topic_name, zonegroup, endpoint_args=endpoint_args) + topic_arn = topic_conf.set_config() + # create s3 notification + notification_name = bucket_name + NOTIFICATION_SUFFIX + topic_conf_list = [{'Id': notification_name,'TopicArn': topic_arn, + 'Events': ['s3:ObjectCreated:*', 's3:ObjectRemoved:*'], + 'Filter': { + 'Tags': { + 'FilterRules': [{'Name': 'hello', 'Value': 'world'}, {'Name': 'ka', 'Value': 'boom'}] + } + } + }] + + s3_notification_conf = PSNotificationS3(conn, bucket_name, topic_conf_list) + response, status = s3_notification_conf.set_config() + assert_equal(status/100, 2) + + expected_keys = [] + # create objects in the bucket with tags + # key 1 has all the tags in the filter + tags = 'hello=world&ka=boom&hello=helloworld' + key_name1 = 'key1' + put_object_tagging(conn, bucket_name, key_name1, tags) + expected_keys.append(key_name1) + # key 2 has an additional tag not in the filter + tags = 'hello=world&foo=bar&ka=boom&hello=helloworld' + key_name = 'key2' + put_object_tagging(conn, bucket_name, key_name, tags) + expected_keys.append(key_name) + # key 3 has no tags + key_name3 = 'key3' + key = bucket.new_key(key_name3) + key.set_contents_from_string('bar') + # key 4 has the wrong of the multi value tags + tags = 'hello=helloworld&ka=boom' + key_name = 'key4' + put_object_tagging(conn, bucket_name, key_name, tags) + # key 5 has the right of the multi value tags + tags = 'hello=world&ka=boom' + key_name = 'key5' + put_object_tagging(conn, bucket_name, key_name, tags) + expected_keys.append(key_name) + # key 6 is missing a tag + tags = 'hello=world' + key_name = 'key6' + put_object_tagging(conn, bucket_name, key_name, tags) + # create objects in the bucket using COPY + key_name = 'copy_of_'+key_name1 + bucket.copy_key(key_name, bucket.name, key_name1) + expected_keys.append(key_name) + + print('wait for 5sec for the messages...') + time.sleep(5) + event_count = 0 + expected_tags1 = [{'key': 'hello', 'val': 'world'}, {'key': 'hello', 'val': 'helloworld'}, {'key': 'ka', 'val': 'boom'}] + expected_tags1 = sorted(expected_tags1, key=lambda k: k['key']+k['val']) + for event in receiver.get_and_reset_events(): + key = event['Records'][0]['s3']['object']['key'] + if (key == key_name1): + obj_tags = sorted(event['Records'][0]['s3']['object']['tags'], key=lambda k: k['key']+k['val']) + assert_equal(obj_tags, expected_tags1) + event_count += 1 + assert(key in expected_keys) + + assert_equal(event_count, len(expected_keys)) + + # delete the objects + for key in bucket.list(): + key.delete() + print('wait for 5sec for the messages...') + time.sleep(5) + event_count = 0 + # check amqp receiver + for event in receiver.get_and_reset_events(): + key = event['Records'][0]['s3']['object']['key'] + if (key == key_name1): + obj_tags = sorted(event['Records'][0]['s3']['object']['tags'], key=lambda k: k['key']+k['val']) + assert_equal(obj_tags, expected_tags1) + event_count += 1 + assert(key in expected_keys) + + assert(event_count == len(expected_keys)) + + # cleanup + stop_amqp_receiver(receiver, task) + s3_notification_conf.del_config() + topic_conf.del_config() + # delete the bucket + conn.delete_bucket(bucket_name) + +@attr('amqp_test') +def test_ps_s3_versioning_on_master(): + """ test s3 notification of object versions """ + + hostname = get_ip() + conn = connection() + zonegroup = 'default' + + # create bucket + bucket_name = gen_bucket_name() + bucket = conn.create_bucket(bucket_name) + bucket.configure_versioning(True) + topic_name = bucket_name + TOPIC_SUFFIX + + # start amqp receiver + exchange = 'ex1' + task, receiver = create_amqp_receiver_thread(exchange, topic_name) + task.start() + + # create s3 topic + endpoint_address = 'amqp://' + hostname + endpoint_args = 'push-endpoint='+endpoint_address+'&amqp-exchange=' + exchange +'&amqp-ack-level=broker' + topic_conf = PSTopicS3(conn, topic_name, zonegroup, endpoint_args=endpoint_args) + topic_arn = topic_conf.set_config() + # create notification + notification_name = bucket_name + NOTIFICATION_SUFFIX + topic_conf_list = [{'Id': notification_name, 'TopicArn': topic_arn, + 'Events': [] + }] + s3_notification_conf = PSNotificationS3(conn, bucket_name, topic_conf_list) + _, status = s3_notification_conf.set_config() + assert_equal(status/100, 2) + + # create objects in the bucket + key_name = 'foo' + key = bucket.new_key(key_name) + key.set_contents_from_string('hello') + ver1 = key.version_id + key.set_contents_from_string('world') + ver2 = key.version_id + copy_of_key = bucket.copy_key('copy_of_foo', bucket.name, key_name, src_version_id=ver1) + ver3 = copy_of_key.version_id + versions = [ver1, ver2, ver3] + + print('wait for 5sec for the messages...') + time.sleep(5) + + # check amqp receiver + events = receiver.get_and_reset_events() + num_of_versions = 0 + for event_list in events: + for event in event_list['Records']: + assert event['s3']['object']['key'] in (key_name, copy_of_key.name) + version = event['s3']['object']['versionId'] + num_of_versions += 1 + if version not in versions: + print('version mismatch: '+version+' not in: '+str(versions)) + # TODO: copy_key() does not return the version of the copied object + #assert False + else: + print('version ok: '+version+' in: '+str(versions)) + + assert_equal(num_of_versions, 3) + + # cleanup + stop_amqp_receiver(receiver, task) + s3_notification_conf.del_config() + topic_conf.del_config() + # delete the bucket + bucket.delete_key(copy_of_key, version_id=ver3) + bucket.delete_key(key.name, version_id=ver2) + bucket.delete_key(key.name, version_id=ver1) + #conn.delete_bucket(bucket_name) + + +@attr('amqp_test') +def test_ps_s3_versioned_deletion_on_master(): + """ test s3 notification of deletion markers on master """ + + hostname = get_ip() + conn = connection() + zonegroup = 'default' + + # create bucket + bucket_name = gen_bucket_name() + bucket = conn.create_bucket(bucket_name) + bucket.configure_versioning(True) + topic_name = bucket_name + TOPIC_SUFFIX + + # start amqp receiver + exchange = 'ex1' + task, receiver = create_amqp_receiver_thread(exchange, topic_name) + task.start() + + # create s3 topic + endpoint_address = 'amqp://' + hostname + endpoint_args = 'push-endpoint='+endpoint_address+'&amqp-exchange=' + exchange +'&amqp-ack-level=broker' + topic_conf = PSTopicS3(conn, topic_name, zonegroup, endpoint_args=endpoint_args) + topic_arn = topic_conf.set_config() + # create s3 notification + notification_name = bucket_name + NOTIFICATION_SUFFIX + topic_conf_list = [{'Id': notification_name+'_1', 'TopicArn': topic_arn, + 'Events': ['s3:ObjectRemoved:*'] + }, + {'Id': notification_name+'_2', 'TopicArn': topic_arn, + 'Events': ['s3:ObjectRemoved:DeleteMarkerCreated'] + }, + {'Id': notification_name+'_3', 'TopicArn': topic_arn, + 'Events': ['s3:ObjectRemoved:Delete'] + }] + s3_notification_conf = PSNotificationS3(conn, bucket_name, topic_conf_list) + response, status = s3_notification_conf.set_config() + assert_equal(status/100, 2) + + # create objects in the bucket + key = bucket.new_key('foo') + content = str(os.urandom(512)) + size1 = len(content) + key.set_contents_from_string(content) + ver1 = key.version_id + content = str(os.urandom(511)) + size2 = len(content) + key.set_contents_from_string(content) + ver2 = key.version_id + # create delete marker (non versioned deletion) + delete_marker_key = bucket.delete_key(key.name) + versions = [ver1, ver2, delete_marker_key.version_id] + + time.sleep(1) + + # versioned deletion + bucket.delete_key(key.name, version_id=ver2) + bucket.delete_key(key.name, version_id=ver1) + + print('wait for 5sec for the messages...') + time.sleep(5) + + # check amqp receiver + events = receiver.get_and_reset_events() + delete_events = 0 + delete_marker_create_events = 0 + for event_list in events: + for event in event_list['Records']: + version = event['s3']['object']['versionId'] + size = event['s3']['object']['size'] + if version not in versions: + print('version mismatch: '+version+' not in: '+str(versions)) + assert False + else: + print('version ok: '+version+' in: '+str(versions)) + if event['eventName'] == 'ObjectRemoved:Delete': + delete_events += 1 + assert size in [size1, size2] + assert event['s3']['configurationId'] in [notification_name+'_1', notification_name+'_3'] + if event['eventName'] == 'ObjectRemoved:DeleteMarkerCreated': + delete_marker_create_events += 1 + assert size == size2 + assert event['s3']['configurationId'] in [notification_name+'_1', notification_name+'_2'] + + # 2 key versions were deleted + # notified over the same topic via 2 notifications (1,3) + assert_equal(delete_events, 2*2) + # 1 deletion marker was created + # notified over the same topic over 2 notifications (1,2) + assert_equal(delete_marker_create_events, 1*2) + + # cleanup + delete_marker_key.delete() + stop_amqp_receiver(receiver, task) + s3_notification_conf.del_config() + topic_conf.del_config() + # delete the bucket + conn.delete_bucket(bucket_name) + + +@attr('manual_test') +def test_ps_s3_persistent_cleanup(): + """ test reservation cleanup after gateway crash """ + return SkipTest("only used in manual testing") + conn = connection() + zonegroup = 'default' + + # create random port for the http server + host = get_ip() + port = random.randint(10000, 20000) + # start an http server in a separate thread + number_of_objects = 200 + http_server = StreamingHTTPServer(host, port, num_workers=number_of_objects) + + gw = conn + + # create bucket + bucket_name = gen_bucket_name() + bucket = gw.create_bucket(bucket_name) + topic_name = bucket_name + TOPIC_SUFFIX + + # create s3 topic + endpoint_address = 'http://'+host+':'+str(port) + endpoint_args = 'push-endpoint='+endpoint_address+'&persistent=true' + topic_conf = PSTopicS3(gw, topic_name, zonegroup, endpoint_args=endpoint_args) + topic_arn = topic_conf.set_config() + + # create s3 notification + notification_name = bucket_name + NOTIFICATION_SUFFIX + topic_conf_list = [{'Id': notification_name, 'TopicArn': topic_arn, + 'Events': ['s3:ObjectCreated:Put'] + }] + s3_notification_conf = PSNotificationS3(gw, bucket_name, topic_conf_list) + response, status = s3_notification_conf.set_config() + assert_equal(status/100, 2) + + client_threads = [] + start_time = time.time() + for i in range(number_of_objects): + key = bucket.new_key(str(i)) + content = str(os.urandom(1024*1024)) + thr = threading.Thread(target = set_contents_from_string, args=(key, content,)) + thr.start() + client_threads.append(thr) + # stop gateway while clients are sending + os.system("killall -9 radosgw"); + print('wait for 10 sec for before restarting the gateway') + time.sleep(10) + # TODO: start the radosgw + [thr.join() for thr in client_threads] + + keys = list(bucket.list()) + + # delete objects from the bucket + client_threads = [] + start_time = time.time() + for key in bucket.list(): + thr = threading.Thread(target = key.delete, args=()) + thr.start() + client_threads.append(thr) + [thr.join() for thr in client_threads] + + # check http receiver + events = http_server.get_and_reset_events() + + print(str(len(events) ) + " events found out of " + str(number_of_objects)) + + # make sure that things are working now + client_threads = [] + start_time = time.time() + for i in range(number_of_objects): + key = bucket.new_key(str(i)) + content = str(os.urandom(1024*1024)) + thr = threading.Thread(target = set_contents_from_string, args=(key, content,)) + thr.start() + client_threads.append(thr) + [thr.join() for thr in client_threads] + + keys = list(bucket.list()) + + # delete objects from the bucket + client_threads = [] + start_time = time.time() + for key in bucket.list(): + thr = threading.Thread(target = key.delete, args=()) + thr.start() + client_threads.append(thr) + [thr.join() for thr in client_threads] + + print('wait for 180 sec for reservations to be stale before queue deletion') + time.sleep(180) + + # check http receiver + events = http_server.get_and_reset_events() + + print(str(len(events)) + " events found out of " + str(number_of_objects)) + + # cleanup + s3_notification_conf.del_config() + topic_conf.del_config() + gw.delete_bucket(bucket_name) + http_server.close() + + +@attr('manual_test') +def test_ps_s3_persistent_notification_pushback(): + """ test pushing persistent notification pushback """ + return SkipTest("only used in manual testing") + conn = connection() + zonegroup = 'default' + + # create random port for the http server + host = get_ip() + port = random.randint(10000, 20000) + # start an http server in a separate thread + http_server = StreamingHTTPServer(host, port, num_workers=10, delay=0.5) + + # create bucket + bucket_name = gen_bucket_name() + bucket = conn.create_bucket(bucket_name) + topic_name = bucket_name + TOPIC_SUFFIX + + # create s3 topic + endpoint_address = 'http://'+host+':'+str(port) + endpoint_args = 'push-endpoint='+endpoint_address+'&persistent=true' + topic_conf = PSTopicS3(conn, topic_name, zonegroup, endpoint_args=endpoint_args) + topic_arn = topic_conf.set_config() + # create s3 notification + notification_name = bucket_name + NOTIFICATION_SUFFIX + topic_conf_list = [{'Id': notification_name, 'TopicArn': topic_arn, + 'Events': [] + }] + + s3_notification_conf = PSNotificationS3(conn, bucket_name, topic_conf_list) + response, status = s3_notification_conf.set_config() + assert_equal(status/100, 2) + + # create objects in the bucket (async) + for j in range(100): + number_of_objects = randint(500, 1000) + client_threads = [] + start_time = time.time() + for i in range(number_of_objects): + key = bucket.new_key(str(j)+'-'+str(i)) + content = str(os.urandom(1024*1024)) + thr = threading.Thread(target = set_contents_from_string, args=(key, content,)) + thr.start() + client_threads.append(thr) + [thr.join() for thr in client_threads] + time_diff = time.time() - start_time + print('average time for creation + async http notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds') + + keys = list(bucket.list()) + + delay = 30 + print('wait for '+str(delay)+'sec for the messages...') + time.sleep(delay) + + # delete objects from the bucket + client_threads = [] + start_time = time.time() + count = 0 + for key in bucket.list(): + count += 1 + thr = threading.Thread(target = key.delete, args=()) + thr.start() + client_threads.append(thr) + if count%100 == 0: + [thr.join() for thr in client_threads] + time_diff = time.time() - start_time + print('average time for deletion + async http notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds') + client_threads = [] + start_time = time.time() + + print('wait for '+str(delay)+'sec for the messages...') + time.sleep(delay) + + # cleanup + s3_notification_conf.del_config() + topic_conf.del_config() + # delete the bucket + conn.delete_bucket(bucket_name) + time.sleep(delay) + http_server.close() + + +@attr('kafka_test') +def test_ps_s3_notification_kafka_idle_behaviour(): + """ test pushing kafka s3 notification idle behaviour check """ + # TODO convert this test to actual running test by changing + # os.system call to verify the process idleness + conn = connection() + zonegroup = 'default' + + # create bucket + bucket_name = gen_bucket_name() + bucket = conn.create_bucket(bucket_name) + # name is constant for manual testing + topic_name = bucket_name+'_topic' + # create consumer on the topic + + task, receiver = create_kafka_receiver_thread(topic_name+'_1') + task.start() + + # create s3 topic + endpoint_address = 'kafka://' + kafka_server + # with acks from broker + endpoint_args = 'push-endpoint='+endpoint_address+'&kafka-ack-level=broker' + topic_conf1 = PSTopicS3(conn, topic_name+'_1', zonegroup, endpoint_args=endpoint_args) + topic_arn1 = topic_conf1.set_config() + # create s3 notification + notification_name = bucket_name + NOTIFICATION_SUFFIX + topic_conf_list = [{'Id': notification_name + '_1', 'TopicArn': topic_arn1, + 'Events': [] + }] + + s3_notification_conf = PSNotificationS3(conn, bucket_name, topic_conf_list) + response, status = s3_notification_conf.set_config() + assert_equal(status/100, 2) + + # create objects in the bucket (async) + number_of_objects = 10 + client_threads = [] + etags = [] + start_time = time.time() + for i in range(number_of_objects): + key = bucket.new_key(str(i)) + content = str(os.urandom(1024*1024)) + etag = hashlib.md5(content.encode()).hexdigest() + etags.append(etag) + thr = threading.Thread(target = set_contents_from_string, args=(key, content,)) + thr.start() + client_threads.append(thr) + [thr.join() for thr in client_threads] + + time_diff = time.time() - start_time + print('average time for creation + kafka notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds') + + print('wait for 5sec for the messages...') + time.sleep(5) + keys = list(bucket.list()) + receiver.verify_s3_events(keys, exact_match=True, etags=etags) + + # delete objects from the bucket + client_threads = [] + start_time = time.time() + for key in bucket.list(): + thr = threading.Thread(target = key.delete, args=()) + thr.start() + client_threads.append(thr) + [thr.join() for thr in client_threads] + + time_diff = time.time() - start_time + print('average time for deletion + kafka notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds') + + print('wait for 5sec for the messages...') + time.sleep(5) + receiver.verify_s3_events(keys, exact_match=True, deletions=True, etags=etags) + + is_idle = False + + while not is_idle: + print('waiting for 10sec for checking idleness') + time.sleep(10) + cmd = "netstat -nnp | grep 9092 | grep radosgw" + proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True) + out = proc.communicate()[0] + if len(out) == 0: + is_idle = True + else: + print("radosgw<->kafka connection is not idle") + print(out.decode('utf-8')) + + # do the process of uploading an object and checking for notification again + number_of_objects = 10 + client_threads = [] + etags = [] + start_time = time.time() + for i in range(number_of_objects): + key = bucket.new_key(str(i)) + content = str(os.urandom(1024*1024)) + etag = hashlib.md5(content.encode()).hexdigest() + etags.append(etag) + thr = threading.Thread(target = set_contents_from_string, args=(key, content,)) + thr.start() + client_threads.append(thr) + [thr.join() for thr in client_threads] + + time_diff = time.time() - start_time + print('average time for creation + kafka notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds') + + print('wait for 5sec for the messages...') + time.sleep(5) + keys = list(bucket.list()) + receiver.verify_s3_events(keys, exact_match=True, etags=etags) + + # delete objects from the bucket + client_threads = [] + start_time = time.time() + for key in bucket.list(): + thr = threading.Thread(target = key.delete, args=()) + thr.start() + client_threads.append(thr) + [thr.join() for thr in client_threads] + + time_diff = time.time() - start_time + print('average time for deletion + kafka notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds') + + print('wait for 5sec for the messages...') + time.sleep(5) + receiver.verify_s3_events(keys, exact_match=True, deletions=True, etags=etags) + + # cleanup + s3_notification_conf.del_config() + topic_conf1.del_config() + # delete the bucket + conn.delete_bucket(bucket_name) + stop_kafka_receiver(receiver, task) + + +@attr('modification_required') +def test_ps_s3_persistent_gateways_recovery(): + """ test gateway recovery of persistent notifications """ + return SkipTest('This test requires two gateways.') + + conn = connection() + zonegroup = 'default' + # create random port for the http server + host = get_ip() + port = random.randint(10000, 20000) + # start an http server in a separate thread + number_of_objects = 10 + http_server = StreamingHTTPServer(host, port, num_workers=number_of_objects) + gw1 = conn + gw2 = connection2() + # create bucket + bucket_name = gen_bucket_name() + bucket = gw1.create_bucket(bucket_name) + topic_name = bucket_name + TOPIC_SUFFIX + # create two s3 topics + endpoint_address = 'http://'+host+':'+str(port) + endpoint_args = 'push-endpoint='+endpoint_address+'&persistent=true' + topic_conf1 = PSTopicS3(gw1, topic_name+'_1', zonegroup, endpoint_args=endpoint_args+'&OpaqueData=fromgw1') + topic_arn1 = topic_conf1.set_config() + topic_conf2 = PSTopicS3(gw2, topic_name+'_2', zonegroup, endpoint_args=endpoint_args+'&OpaqueData=fromgw2') + topic_arn2 = topic_conf2.set_config() + # create two s3 notifications + notification_name = bucket_name + NOTIFICATION_SUFFIX+'_1' + topic_conf_list = [{'Id': notification_name, 'TopicArn': topic_arn1, + 'Events': ['s3:ObjectCreated:Put'] + }] + s3_notification_conf1 = PSNotificationS3(gw1, bucket_name, topic_conf_list) + response, status = s3_notification_conf1.set_config() + assert_equal(status/100, 2) + notification_name = bucket_name + NOTIFICATION_SUFFIX+'_2' + topic_conf_list = [{'Id': notification_name, 'TopicArn': topic_arn2, + 'Events': ['s3:ObjectRemoved:Delete'] + }] + s3_notification_conf2 = PSNotificationS3(gw2, bucket_name, topic_conf_list) + response, status = s3_notification_conf2.set_config() + assert_equal(status/100, 2) + # stop gateway 2 + print('stopping gateway2...') + client_threads = [] + start_time = time.time() + for i in range(number_of_objects): + key = bucket.new_key(str(i)) + content = str(os.urandom(1024*1024)) + thr = threading.Thread(target = set_contents_from_string, args=(key, content,)) + thr.start() + client_threads.append(thr) + [thr.join() for thr in client_threads] + keys = list(bucket.list()) + # delete objects from the bucket + client_threads = [] + start_time = time.time() + for key in bucket.list(): + thr = threading.Thread(target = key.delete, args=()) + thr.start() + client_threads.append(thr) + [thr.join() for thr in client_threads] + print('wait for 60 sec for before restarting the gateway') + time.sleep(60) + # check http receiver + events = http_server.get_and_reset_events() + for key in keys: + creations = 0 + deletions = 0 + for event in events: + if event['Records'][0]['eventName'] == 'ObjectCreated:Put' and \ + key.name == event['Records'][0]['s3']['object']['key']: + creations += 1 + elif event['Records'][0]['eventName'] == 'ObjectRemoved:Delete' and \ + key.name == event['Records'][0]['s3']['object']['key']: + deletions += 1 + assert_equal(creations, 1) + assert_equal(deletions, 1) + # cleanup + s3_notification_conf1.del_config() + topic_conf1.del_config() + gw1.delete_bucket(bucket_name) + time.sleep(10) + s3_notification_conf2.del_config() + topic_conf2.del_config() + http_server.close() + + +@attr('modification_required') +def test_ps_s3_persistent_multiple_gateways(): + """ test pushing persistent notification via two gateways """ + return SkipTest('This test requires two gateways.') + + conn = connection() + zonegroup = 'default' + # create random port for the http server + host = get_ip() + port = random.randint(10000, 20000) + # start an http server in a separate thread + number_of_objects = 10 + http_server = StreamingHTTPServer(host, port, num_workers=number_of_objects) + gw1 = conn + gw2 = connection2() + # create bucket + bucket_name = gen_bucket_name() + bucket1 = gw1.create_bucket(bucket_name) + bucket2 = gw2.get_bucket(bucket_name) + topic_name = bucket_name + TOPIC_SUFFIX + # create two s3 topics + endpoint_address = 'http://'+host+':'+str(port) + endpoint_args = 'push-endpoint='+endpoint_address+'&persistent=true' + topic1_opaque = 'fromgw1' + topic_conf1 = PSTopicS3(gw1, topic_name+'_1', zonegroup, endpoint_args=endpoint_args+'&OpaqueData='+topic1_opaque) + topic_arn1 = topic_conf1.set_config() + topic2_opaque = 'fromgw2' + topic_conf2 = PSTopicS3(gw2, topic_name+'_2', zonegroup, endpoint_args=endpoint_args+'&OpaqueData='+topic2_opaque) + topic_arn2 = topic_conf2.set_config() + # create two s3 notifications + notification_name = bucket_name + NOTIFICATION_SUFFIX+'_1' + topic_conf_list = [{'Id': notification_name, 'TopicArn': topic_arn1, + 'Events': [] + }] + s3_notification_conf1 = PSNotificationS3(gw1, bucket_name, topic_conf_list) + response, status = s3_notification_conf1.set_config() + assert_equal(status/100, 2) + notification_name = bucket_name + NOTIFICATION_SUFFIX+'_2' + topic_conf_list = [{'Id': notification_name, 'TopicArn': topic_arn2, + 'Events': [] + }] + s3_notification_conf2 = PSNotificationS3(gw2, bucket_name, topic_conf_list) + response, status = s3_notification_conf2.set_config() + assert_equal(status/100, 2) + client_threads = [] + start_time = time.time() + for i in range(number_of_objects): + key = bucket1.new_key('gw1_'+str(i)) + content = str(os.urandom(1024*1024)) + thr = threading.Thread(target = set_contents_from_string, args=(key, content,)) + thr.start() + client_threads.append(thr) + key = bucket2.new_key('gw2_'+str(i)) + content = str(os.urandom(1024*1024)) + thr = threading.Thread(target = set_contents_from_string, args=(key, content,)) + thr.start() + client_threads.append(thr) + [thr.join() for thr in client_threads] + keys = list(bucket1.list()) + delay = 30 + print('wait for '+str(delay)+'sec for the messages...') + time.sleep(delay) + events = http_server.get_and_reset_events() + for key in keys: + topic1_count = 0 + topic2_count = 0 + for event in events: + if event['Records'][0]['eventName'] == 'ObjectCreated:Put' and \ + key.name == event['Records'][0]['s3']['object']['key'] and \ + topic1_opaque == event['Records'][0]['opaqueData']: + topic1_count += 1 + elif event['Records'][0]['eventName'] == 'ObjectCreated:Put' and \ + key.name == event['Records'][0]['s3']['object']['key'] and \ + topic2_opaque == event['Records'][0]['opaqueData']: + topic2_count += 1 + assert_equal(topic1_count, 1) + assert_equal(topic2_count, 1) + # delete objects from the bucket + client_threads = [] + start_time = time.time() + for key in bucket1.list(): + thr = threading.Thread(target = key.delete, args=()) + thr.start() + client_threads.append(thr) + [thr.join() for thr in client_threads] + print('wait for '+str(delay)+'sec for the messages...') + time.sleep(delay) + events = http_server.get_and_reset_events() + for key in keys: + topic1_count = 0 + topic2_count = 0 + for event in events: + if event['Records'][0]['eventName'] == 'ObjectRemoved:Delete' and \ + key.name == event['Records'][0]['s3']['object']['key'] and \ + topic1_opaque == event['Records'][0]['opaqueData']: + topic1_count += 1 + elif event['Records'][0]['eventName'] == 'ObjectRemoved:Delete' and \ + key.name == event['Records'][0]['s3']['object']['key'] and \ + topic2_opaque == event['Records'][0]['opaqueData']: + topic2_count += 1 + assert_equal(topic1_count, 1) + assert_equal(topic2_count, 1) + # cleanup + s3_notification_conf1.del_config() + topic_conf1.del_config() + s3_notification_conf2.del_config() + topic_conf2.del_config() + gw1.delete_bucket(bucket_name) + http_server.close() + + +@attr('http_test') +def test_ps_s3_persistent_multiple_endpoints(): + """ test pushing persistent notification when one of the endpoints has error """ + conn = connection() + zonegroup = 'default' + + # create random port for the http server + host = get_ip() + port = random.randint(10000, 20000) + # start an http server in a separate thread + number_of_objects = 10 + http_server = StreamingHTTPServer(host, port, num_workers=number_of_objects) + + # create bucket + bucket_name = gen_bucket_name() + bucket = conn.create_bucket(bucket_name) + topic_name = bucket_name + TOPIC_SUFFIX + + # create two s3 topics + endpoint_address = 'http://'+host+':'+str(port) + endpoint_args = 'push-endpoint='+endpoint_address+'&persistent=true' + topic_conf1 = PSTopicS3(conn, topic_name+'_1', zonegroup, endpoint_args=endpoint_args) + topic_arn1 = topic_conf1.set_config() + endpoint_address = 'http://kaboom:9999' + endpoint_args = 'push-endpoint='+endpoint_address+'&persistent=true' + topic_conf2 = PSTopicS3(conn, topic_name+'_2', zonegroup, endpoint_args=endpoint_args) + topic_arn2 = topic_conf2.set_config() + + # create two s3 notifications + notification_name = bucket_name + NOTIFICATION_SUFFIX+'_1' + topic_conf_list = [{'Id': notification_name, 'TopicArn': topic_arn1, + 'Events': [] + }] + s3_notification_conf1 = PSNotificationS3(conn, bucket_name, topic_conf_list) + response, status = s3_notification_conf1.set_config() + assert_equal(status/100, 2) + notification_name = bucket_name + NOTIFICATION_SUFFIX+'_2' + topic_conf_list = [{'Id': notification_name, 'TopicArn': topic_arn2, + 'Events': [] + }] + s3_notification_conf2 = PSNotificationS3(conn, bucket_name, topic_conf_list) + response, status = s3_notification_conf2.set_config() + assert_equal(status/100, 2) + + client_threads = [] + start_time = time.time() + for i in range(number_of_objects): + key = bucket.new_key(str(i)) + content = str(os.urandom(1024*1024)) + thr = threading.Thread(target = set_contents_from_string, args=(key, content,)) + thr.start() + client_threads.append(thr) + [thr.join() for thr in client_threads] + + keys = list(bucket.list()) + + delay = 30 + print('wait for '+str(delay)+'sec for the messages...') + time.sleep(delay) + + http_server.verify_s3_events(keys, exact_match=False, deletions=False) + + # delete objects from the bucket + client_threads = [] + start_time = time.time() + for key in bucket.list(): + thr = threading.Thread(target = key.delete, args=()) + thr.start() + client_threads.append(thr) + [thr.join() for thr in client_threads] + + print('wait for '+str(delay)+'sec for the messages...') + time.sleep(delay) + + http_server.verify_s3_events(keys, exact_match=False, deletions=True) + + # cleanup + s3_notification_conf1.del_config() + topic_conf1.del_config() + s3_notification_conf2.del_config() + topic_conf2.del_config() + conn.delete_bucket(bucket_name) + http_server.close() + +def persistent_notification(endpoint_type): + """ test pushing persistent notification """ + conn = connection() + zonegroup = 'default' + + # create bucket + bucket_name = gen_bucket_name() + bucket = conn.create_bucket(bucket_name) + topic_name = bucket_name + TOPIC_SUFFIX + + receiver = {} + host = get_ip() + if endpoint_type == 'http': + # create random port for the http server + host = get_ip_http() + port = random.randint(10000, 20000) + # start an http server in a separate thread + receiver = StreamingHTTPServer(host, port, num_workers=10) + endpoint_address = 'http://'+host+':'+str(port) + endpoint_args = 'push-endpoint='+endpoint_address+'&persistent=true' + # the http server does not guarantee order, so duplicates are expected + exact_match = False + elif endpoint_type == 'amqp': + # start amqp receiver + exchange = 'ex1' + task, receiver = create_amqp_receiver_thread(exchange, topic_name) + task.start() + endpoint_address = 'amqp://' + host + endpoint_args = 'push-endpoint='+endpoint_address+'&amqp-exchange='+exchange+'&amqp-ack-level=broker'+'&persistent=true' + # amqp broker guarantee ordering + exact_match = True + elif endpoint_type == 'kafka': + # start amqp receiver + task, receiver = create_kafka_receiver_thread(topic_name) + task.start() + endpoint_address = 'kafka://' + host + endpoint_args = 'push-endpoint='+endpoint_address+'&kafka-ack-level=broker'+'&persistent=true' + # amqp broker guarantee ordering + exact_match = True + else: + return SkipTest('Unknown endpoint type: ' + endpoint_type) + + + # create s3 topic + topic_conf = PSTopicS3(conn, topic_name, zonegroup, endpoint_args=endpoint_args) + topic_arn = topic_conf.set_config() + # create s3 notification + notification_name = bucket_name + NOTIFICATION_SUFFIX + topic_conf_list = [{'Id': notification_name, 'TopicArn': topic_arn, + 'Events': [] + }] + + s3_notification_conf = PSNotificationS3(conn, bucket_name, topic_conf_list) + response, status = s3_notification_conf.set_config() + assert_equal(status/100, 2) + + # create objects in the bucket (async) + number_of_objects = 100 + client_threads = [] + start_time = time.time() + for i in range(number_of_objects): + key = bucket.new_key(str(i)) + content = str(os.urandom(1024*1024)) + thr = threading.Thread(target = set_contents_from_string, args=(key, content,)) + thr.start() + client_threads.append(thr) + [thr.join() for thr in client_threads] + + time_diff = time.time() - start_time + print('average time for creation + async http notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds') + + keys = list(bucket.list()) + + delay = 40 + print('wait for '+str(delay)+'sec for the messages...') + time.sleep(delay) + + receiver.verify_s3_events(keys, exact_match=exact_match, deletions=False) + + # delete objects from the bucket + client_threads = [] + start_time = time.time() + for key in bucket.list(): + thr = threading.Thread(target = key.delete, args=()) + thr.start() + client_threads.append(thr) + [thr.join() for thr in client_threads] + + time_diff = time.time() - start_time + print('average time for deletion + async http notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds') + + print('wait for '+str(delay)+'sec for the messages...') + time.sleep(delay) + + receiver.verify_s3_events(keys, exact_match=exact_match, deletions=True) + + # cleanup + s3_notification_conf.del_config() + topic_conf.del_config() + # delete the bucket + conn.delete_bucket(bucket_name) + if endpoint_type == 'http': + receiver.close() + else: + stop_amqp_receiver(receiver, task) + + +@attr('http_test') +def test_ps_s3_persistent_notification_http(): + """ test pushing persistent notification http """ + persistent_notification('http') + + +@attr('amqp_test') +def test_ps_s3_persistent_notification_amqp(): + """ test pushing persistent notification amqp """ + persistent_notification('amqp') + + +@attr('kafka_test') +def test_ps_s3_persistent_notification_kafka(): + """ test pushing persistent notification kafka """ + persistent_notification('kafka') + + +def random_string(length): + import string + letters = string.ascii_letters + return ''.join(random.choice(letters) for i in range(length)) + + +@attr('amqp_test') +def test_ps_s3_persistent_notification_large(): + """ test pushing persistent notification of large notifications """ + + conn = connection() + zonegroup = 'default' + + # create bucket + bucket_name = gen_bucket_name() + bucket = conn.create_bucket(bucket_name) + topic_name = bucket_name + TOPIC_SUFFIX + + receiver = {} + host = get_ip() + # start amqp receiver + exchange = 'ex1' + task, receiver = create_amqp_receiver_thread(exchange, topic_name) + task.start() + endpoint_address = 'amqp://' + host + opaque_data = random_string(1024*2) + endpoint_args = 'push-endpoint='+endpoint_address+'&OpaqueData='+opaque_data+'&amqp-exchange='+exchange+'&amqp-ack-level=broker'+'&persistent=true' + # amqp broker guarantee ordering + exact_match = True + + # create s3 topic + topic_conf = PSTopicS3(conn, topic_name, zonegroup, endpoint_args=endpoint_args) + topic_arn = topic_conf.set_config() + # create s3 notification + notification_name = bucket_name + NOTIFICATION_SUFFIX + topic_conf_list = [{'Id': notification_name, 'TopicArn': topic_arn, + 'Events': [] + }] + + s3_notification_conf = PSNotificationS3(conn, bucket_name, topic_conf_list) + response, status = s3_notification_conf.set_config() + assert_equal(status/100, 2) + + # create objects in the bucket (async) + number_of_objects = 100 + client_threads = [] + start_time = time.time() + for i in range(number_of_objects): + key_value = random_string(63) + key = bucket.new_key(key_value) + content = str(os.urandom(1024*1024)) + thr = threading.Thread(target = set_contents_from_string, args=(key, content,)) + thr.start() + client_threads.append(thr) + [thr.join() for thr in client_threads] + + time_diff = time.time() - start_time + print('average time for creation + async http notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds') + + keys = list(bucket.list()) + + delay = 40 + print('wait for '+str(delay)+'sec for the messages...') + time.sleep(delay) + + receiver.verify_s3_events(keys, exact_match=exact_match, deletions=False) + + # delete objects from the bucket + client_threads = [] + start_time = time.time() + for key in bucket.list(): + thr = threading.Thread(target = key.delete, args=()) + thr.start() + client_threads.append(thr) + [thr.join() for thr in client_threads] + + time_diff = time.time() - start_time + print('average time for deletion + async http notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds') + + print('wait for '+str(delay)+'sec for the messages...') + time.sleep(delay) + + receiver.verify_s3_events(keys, exact_match=exact_match, deletions=True) + + # cleanup + s3_notification_conf.del_config() + topic_conf.del_config() + # delete the bucket + conn.delete_bucket(bucket_name) + stop_amqp_receiver(receiver, task) + + +@attr('modification_required') +def test_ps_s3_topic_update(): + """ test updating topic associated with a notification""" + return SkipTest('This test is yet to be modified.') + + conn = connection() + ps_zone = None + bucket_name = gen_bucket_name() + topic_name = bucket_name+TOPIC_SUFFIX + # create amqp topic + hostname = get_ip() + exchange = 'ex1' + amqp_task, receiver = create_amqp_receiver_thread(exchange, topic_name) + amqp_task.start() + #topic_conf = PSTopic(ps_zone.conn, topic_name,endpoint='amqp://' + hostname,endpoint_args='amqp-exchange=' + exchange + '&amqp-ack-level=none') + topic_conf = PSTopicS3(conn, topic_name, zonegroup, endpoint_args='amqp-exchange=' + exchange + '&amqp-ack-level=none') + + topic_arn = topic_conf.set_config() + #result, status = topic_conf.set_config() + #assert_equal(status/100, 2) + parsed_result = json.loads(result) + topic_arn = parsed_result['arn'] + # get topic + result, _ = topic_conf.get_config() + # verify topic content + parsed_result = json.loads(result) + assert_equal(parsed_result['topic']['name'], topic_name) + assert_equal(parsed_result['topic']['dest']['push_endpoint'], topic_conf.parameters['push-endpoint']) + # create http server + port = random.randint(10000, 20000) + # start an http server in a separate thread + http_server = StreamingHTTPServer(hostname, port) + # create bucket on the first of the rados zones + bucket = conn.create_bucket(bucket_name) + # create s3 notification + notification_name = bucket_name + NOTIFICATION_SUFFIX + topic_conf_list = [{'Id': notification_name, + 'TopicArn': topic_arn, + 'Events': ['s3:ObjectCreated:*'] + }] + s3_notification_conf = PSNotificationS3(ps_zone.conn, bucket_name, topic_conf_list) + _, status = s3_notification_conf.set_config() + assert_equal(status/100, 2) + # create objects in the bucket + number_of_objects = 10 + for i in range(number_of_objects): + key = bucket.new_key(str(i)) + key.set_contents_from_string('bar') + # wait for sync + #zone_bucket_checkpoint(ps_zone.zone, master_zone.zone, bucket_name) + keys = list(bucket.list()) + # TODO: use exact match + receiver.verify_s3_events(keys, exact_match=False) + # update the same topic with new endpoint + #topic_conf = PSTopic(ps_zone.conn, topic_name,endpoint='http://'+ hostname + ':' + str(port)) + topic_conf = PSTopicS3(conn, topic_name, endpoint_args='http://'+ hostname + ':' + str(port)) + _, status = topic_conf.set_config() + assert_equal(status/100, 2) + # get topic + result, _ = topic_conf.get_config() + # verify topic content + parsed_result = json.loads(result) + assert_equal(parsed_result['topic']['name'], topic_name) + assert_equal(parsed_result['topic']['dest']['push_endpoint'], topic_conf.parameters['push-endpoint']) + # delete current objects and create new objects in the bucket + for key in bucket.list(): + key.delete() + for i in range(number_of_objects): + key = bucket.new_key(str(i+100)) + key.set_contents_from_string('bar') + # wait for sync + #zone_meta_checkpoint(ps_zone.zone) + #zone_bucket_checkpoint(ps_zone.zone, master_zone.zone, bucket_name) + keys = list(bucket.list()) + # verify that notifications are still sent to amqp + # TODO: use exact match + receiver.verify_s3_events(keys, exact_match=False) + # update notification to update the endpoint from the topic + topic_conf_list = [{'Id': notification_name, + 'TopicArn': topic_arn, + 'Events': ['s3:ObjectCreated:*'] + }] + s3_notification_conf = PSNotificationS3(ps_zone.conn, bucket_name, topic_conf_list) + _, status = s3_notification_conf.set_config() + assert_equal(status/100, 2) + # delete current objects and create new objects in the bucket + for key in bucket.list(): + key.delete() + for i in range(number_of_objects): + key = bucket.new_key(str(i+200)) + key.set_contents_from_string('bar') + # wait for sync + #zone_meta_checkpoint(ps_zone.zone) + #zone_bucket_checkpoint(ps_zone.zone, master_zone.zone, bucket_name) + keys = list(bucket.list()) + # check that updates switched to http + # TODO: use exact match + http_server.verify_s3_events(keys, exact_match=False) + # cleanup + # delete objects from the bucket + stop_amqp_receiver(receiver, amqp_task) + for key in bucket.list(): + key.delete() + s3_notification_conf.del_config() + topic_conf.del_config() + conn.delete_bucket(bucket_name) + http_server.close() + + +@attr('modification_required') +def test_ps_s3_notification_update(): + """ test updating the topic of a notification""" + return SkipTest('This test is yet to be modified.') + + hostname = get_ip() + conn = connection() + ps_zone = None + bucket_name = gen_bucket_name() + topic_name1 = bucket_name+'amqp'+TOPIC_SUFFIX + topic_name2 = bucket_name+'http'+TOPIC_SUFFIX + zonegroup = 'default' + # create topics + # start amqp receiver in a separate thread + exchange = 'ex1' + amqp_task, receiver = create_amqp_receiver_thread(exchange, topic_name1) + amqp_task.start() + # create random port for the http server + http_port = random.randint(10000, 20000) + # start an http server in a separate thread + http_server = StreamingHTTPServer(hostname, http_port) + #topic_conf1 = PSTopic(ps_zone.conn, topic_name1,endpoint='amqp://' + hostname,endpoint_args='amqp-exchange=' + exchange + '&amqp-ack-level=none') + topic_conf1 = PSTopicS3(conn, topic_name1, zonegroup, endpoint_args='amqp-exchange=' + exchange + '&amqp-ack-level=none') + result, status = topic_conf1.set_config() + parsed_result = json.loads(result) + topic_arn1 = parsed_result['arn'] + assert_equal(status/100, 2) + #topic_conf2 = PSTopic(ps_zone.conn, topic_name2,endpoint='http://'+hostname+':'+str(http_port)) + topic_conf2 = PSTopicS3(conn, topic_name2, endpoint_args='http://'+hostname+':'+str(http_port)) + result, status = topic_conf2.set_config() + parsed_result = json.loads(result) + topic_arn2 = parsed_result['arn'] + assert_equal(status/100, 2) + # create bucket on the first of the rados zones + bucket = conn.create_bucket(bucket_name) + # wait for sync + #zone_meta_checkpoint(ps_zone.zone) + # create s3 notification with topic1 + notification_name = bucket_name + NOTIFICATION_SUFFIX + topic_conf_list = [{'Id': notification_name, + 'TopicArn': topic_arn1, + 'Events': ['s3:ObjectCreated:*'] + }] + s3_notification_conf = PSNotificationS3(ps_zone.conn, bucket_name, topic_conf_list) + _, status = s3_notification_conf.set_config() + assert_equal(status/100, 2) + # create objects in the bucket + number_of_objects = 10 + for i in range(number_of_objects): + key = bucket.new_key(str(i)) + key.set_contents_from_string('bar') + # wait for sync + #zone_bucket_checkpoint(ps_zone.zone, master_zone.zone, bucket_name) + keys = list(bucket.list()) + # TODO: use exact match + receiver.verify_s3_events(keys, exact_match=False); + # update notification to use topic2 + topic_conf_list = [{'Id': notification_name, + 'TopicArn': topic_arn2, + 'Events': ['s3:ObjectCreated:*'] + }] + s3_notification_conf = PSNotificationS3(ps_zone.conn, bucket_name, topic_conf_list) + _, status = s3_notification_conf.set_config() + assert_equal(status/100, 2) + # delete current objects and create new objects in the bucket + for key in bucket.list(): + key.delete() + for i in range(number_of_objects): + key = bucket.new_key(str(i+100)) + key.set_contents_from_string('bar') + # wait for sync + #zone_meta_checkpoint(ps_zone.zone) + #zone_bucket_checkpoint(ps_zone.zone, master_zone.zone, bucket_name) + keys = list(bucket.list()) + # check that updates switched to http + # TODO: use exact match + http_server.verify_s3_events(keys, exact_match=False) + # cleanup + # delete objects from the bucket + stop_amqp_receiver(receiver, amqp_task) + for key in bucket.list(): + key.delete() + s3_notification_conf.del_config() + topic_conf1.del_config() + topic_conf2.del_config() + conn.delete_bucket(bucket_name) + http_server.close() + + +@attr('modification_required') +def test_ps_s3_multiple_topics_notification(): + """ test notification creation with multiple topics""" + return SkipTest('This test is yet to be modified.') + + hostname = get_ip() + zonegroup = 'default' + conn = connection() + ps_zone = None + bucket_name = gen_bucket_name() + topic_name1 = bucket_name+'amqp'+TOPIC_SUFFIX + topic_name2 = bucket_name+'http'+TOPIC_SUFFIX + # create topics + # start amqp receiver in a separate thread + exchange = 'ex1' + amqp_task, receiver = create_amqp_receiver_thread(exchange, topic_name1) + amqp_task.start() + # create random port for the http server + http_port = random.randint(10000, 20000) + # start an http server in a separate thread + http_server = StreamingHTTPServer(hostname, http_port) + #topic_conf1 = PSTopic(ps_zone.conn, topic_name1,endpoint='amqp://' + hostname,endpoint_args='amqp-exchange=' + exchange + '&amqp-ack-level=none') + topic_conf1 = PSTopicS3(conn, topic_name1, zonegroup, endpoint_args='amqp-exchange=' + exchange + '&amqp-ack-level=none') + result, status = topic_conf1.set_config() + parsed_result = json.loads(result) + topic_arn1 = parsed_result['arn'] + assert_equal(status/100, 2) + #topic_conf2 = PSTopic(ps_zone.conn, topic_name2,endpoint='http://'+hostname+':'+str(http_port)) + topic_conf2 = PSTopicS3(conn, topic_name2, zonegroup, endpoint_args='http://'+hostname+':'+str(http_port)) + result, status = topic_conf2.set_config() + parsed_result = json.loads(result) + topic_arn2 = parsed_result['arn'] + assert_equal(status/100, 2) + # create bucket on the first of the rados zones + bucket = conn.create_bucket(bucket_name) + # wait for sync + #zone_meta_checkpoint(ps_zone.zone) + # create s3 notification + notification_name1 = bucket_name + NOTIFICATION_SUFFIX + '_1' + notification_name2 = bucket_name + NOTIFICATION_SUFFIX + '_2' + topic_conf_list = [ + { + 'Id': notification_name1, + 'TopicArn': topic_arn1, + 'Events': ['s3:ObjectCreated:*'] + }, + { + 'Id': notification_name2, + 'TopicArn': topic_arn2, + 'Events': ['s3:ObjectCreated:*'] + }] + s3_notification_conf = PSNotificationS3(ps_zone.conn, bucket_name, topic_conf_list) + _, status = s3_notification_conf.set_config() + assert_equal(status/100, 2) + result, _ = s3_notification_conf.get_config() + assert_equal(len(result['TopicConfigurations']), 2) + assert_equal(result['TopicConfigurations'][0]['Id'], notification_name1) + assert_equal(result['TopicConfigurations'][1]['Id'], notification_name2) + # get auto-generated subscriptions + sub_conf1 = PSSubscription(ps_zone.conn, notification_name1, + topic_name1) + _, status = sub_conf1.get_config() + assert_equal(status/100, 2) + sub_conf2 = PSSubscription(ps_zone.conn, notification_name2, + topic_name2) + _, status = sub_conf2.get_config() + assert_equal(status/100, 2) + # create objects in the bucket + number_of_objects = 10 + for i in range(number_of_objects): + key = bucket.new_key(str(i)) + key.set_contents_from_string('bar') + # wait for sync + #zone_bucket_checkpoint(ps_zone.zone, master_zone.zone, bucket_name) + # get the events from both of the subscription + result, _ = sub_conf1.get_events() + records = json.loads(result) + for record in records['Records']: + log.debug(record) + keys = list(bucket.list()) + # TODO: use exact match + verify_s3_records_by_elements(records, keys, exact_match=False) + receiver.verify_s3_events(keys, exact_match=False) + result, _ = sub_conf2.get_events() + parsed_result = json.loads(result) + for record in parsed_result['Records']: + log.debug(record) + keys = list(bucket.list()) + # TODO: use exact match + verify_s3_records_by_elements(records, keys, exact_match=False) + http_server.verify_s3_events(keys, exact_match=False) + # cleanup + stop_amqp_receiver(receiver, amqp_task) + s3_notification_conf.del_config() + topic_conf1.del_config() + topic_conf2.del_config() + # delete objects from the bucket + for key in bucket.list(): + key.delete() + conn.delete_bucket(bucket_name) + http_server.close() + + +def kafka_security(security_type): + """ test pushing kafka s3 notification securly to master """ + conn = connection() + zonegroup = 'default' + # create bucket + bucket_name = gen_bucket_name() + bucket = conn.create_bucket(bucket_name) + # name is constant for manual testing + topic_name = bucket_name+'_topic' + # create s3 topic + if security_type == 'SSL_SASL': + endpoint_address = 'kafka://alice:alice-secret@' + kafka_server + ':9094' + elif security_type == 'SSL': + endpoint_address = 'kafka://' + kafka_server + ':9093' + else: + assert False, 'unknown security method '+security_type + + KAFKA_DIR = os.environ['KAFKA_DIR'] + endpoint_args = 'push-endpoint='+endpoint_address+'&kafka-ack-level=broker&use-ssl=true&ca-location='+KAFKA_DIR+"/y-ca.crt" + + topic_conf = PSTopicS3(conn, topic_name, zonegroup, endpoint_args=endpoint_args) + + # create consumer on the topic + task, receiver = create_kafka_receiver_thread(topic_name) + task.start() + + topic_arn = topic_conf.set_config() + # create s3 notification + notification_name = bucket_name + NOTIFICATION_SUFFIX + topic_conf_list = [{'Id': notification_name, 'TopicArn': topic_arn, + 'Events': [] + }] + s3_notification_conf = PSNotificationS3(conn, bucket_name, topic_conf_list) + s3_notification_conf.set_config() + # create objects in the bucket (async) + number_of_objects = 10 + client_threads = [] + start_time = time.time() + for i in range(number_of_objects): + key = bucket.new_key(str(i)) + content = str(os.urandom(1024*1024)) + thr = threading.Thread(target = set_contents_from_string, args=(key, content,)) + thr.start() + client_threads.append(thr) + [thr.join() for thr in client_threads] + time_diff = time.time() - start_time + print('average time for creation + kafka notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds') + try: + print('wait for 5sec for the messages...') + time.sleep(5) + keys = list(bucket.list()) + receiver.verify_s3_events(keys, exact_match=True) + # delete objects from the bucket + client_threads = [] + start_time = time.time() + for key in bucket.list(): + thr = threading.Thread(target = key.delete, args=()) + thr.start() + client_threads.append(thr) + [thr.join() for thr in client_threads] + time_diff = time.time() - start_time + print('average time for deletion + kafka notification is: ' + str(time_diff*1000/number_of_objects) + ' milliseconds') + print('wait for 5sec for the messages...') + time.sleep(5) + receiver.verify_s3_events(keys, exact_match=True, deletions=True) + except Exception as err: + assert False, str(err) + finally: + # cleanup + s3_notification_conf.del_config() + topic_conf.del_config() + # delete the bucket + for key in bucket.list(): + key.delete() + conn.delete_bucket(bucket_name) + stop_kafka_receiver(receiver, task) + + +@attr('kafka_ssl_test') +def test_ps_s3_notification_push_kafka_security_ssl(): + kafka_security('SSL') + + +@attr('kafka_ssl_test') +def test_ps_s3_notification_push_kafka_security_ssl_sasl(): + kafka_security('SSL_SASL') + diff --git a/src/test/rgw/kafka_stub.cc b/src/test/rgw/kafka_stub.cc new file mode 100644 index 000000000..6125a94cb --- /dev/null +++ b/src/test/rgw/kafka_stub.cc @@ -0,0 +1,68 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#include <librdkafka/rdkafka.h> + +const char *rd_kafka_topic_name(const rd_kafka_topic_t *rkt) { + return ""; +} + +rd_kafka_resp_err_t rd_kafka_last_error() { + return rd_kafka_resp_err_t(); +} + +const char *rd_kafka_err2str(rd_kafka_resp_err_t err) { + return ""; +} + +rd_kafka_conf_t *rd_kafka_conf_new() { + return nullptr; +} + +rd_kafka_conf_res_t rd_kafka_conf_set(rd_kafka_conf_t *conf, + const char *name, + const char *value, + char *errstr, size_t errstr_size) { + return rd_kafka_conf_res_t(); +} + +void rd_kafka_conf_set_dr_msg_cb(rd_kafka_conf_t *conf, + void (*dr_msg_cb) (rd_kafka_t *rk, + const rd_kafka_message_t * + rkmessage, + void *opaque)) {} + +void rd_kafka_conf_set_opaque(rd_kafka_conf_t *conf, void *opaque) {} + +rd_kafka_t *rd_kafka_new(rd_kafka_type_t type, rd_kafka_conf_t *conf, + char *errstr, size_t errstr_size) { + return nullptr; +} + +void rd_kafka_conf_destroy(rd_kafka_conf_t *conf) {} + +rd_kafka_resp_err_t rd_kafka_flush (rd_kafka_t *rk, int timeout_ms) { + return rd_kafka_resp_err_t(); +} + +void rd_kafka_destroy(rd_kafka_t *rk) {} + +rd_kafka_topic_t *rd_kafka_topic_new(rd_kafka_t *rk, const char *topic, + rd_kafka_topic_conf_t *conf) { + return nullptr; +} + +int rd_kafka_produce(rd_kafka_topic_t *rkt, int32_t partition, + int msgflags, + void *payload, size_t len, + const void *key, size_t keylen, + void *msg_opaque) { + return 0; +} + +int rd_kafka_poll(rd_kafka_t *rk, int timeout_ms) { + return 0; +} + +void rd_kafka_topic_destroy(rd_kafka_topic_t *rkt) {} + diff --git a/src/test/rgw/rgw_cr_test.cc b/src/test/rgw/rgw_cr_test.cc new file mode 100644 index 000000000..dc5d25d23 --- /dev/null +++ b/src/test/rgw/rgw_cr_test.cc @@ -0,0 +1,343 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab ft=cpp + +#include <cerrno> +#include <iostream> +#include <sstream> +#include <string> + +#include <fmt/format.h> + +#include "include/rados/librados.hpp" + +#include "common/common_init.h" +#include "common/config.h" +#include "common/ceph_argparse.h" +#include "common/debug.h" + +#include "rgw_coroutine.h" +#include "rgw_cr_rados.h" +#include "rgw_sal.h" +#include "rgw_sal_rados.h" + +#include "gtest/gtest.h" + +using namespace std::literals; + +static constexpr auto dout_subsys = ceph_subsys_rgw; + +static rgw::sal::RadosStore* store = nullptr; + +static const DoutPrefixProvider* dpp() { + struct GlobalPrefix : public DoutPrefixProvider { + CephContext *get_cct() const override { return g_ceph_context; } + unsigned get_subsys() const override { return dout_subsys; } + std::ostream& gen_prefix(std::ostream& out) const override { return out; } + }; + static GlobalPrefix global_dpp; + return &global_dpp; +} + +class StoreDestructor { + rgw::sal::Driver* driver; +public: + explicit StoreDestructor(rgw::sal::RadosStore* _s) : driver(_s) {} + ~StoreDestructor() { + DriverManager::close_storage(store); + } +}; + +struct TempPool { + inline static uint64_t num = 0; + std::string name = + fmt::format("{}-{}-{}", ::time(nullptr), ::getpid(),num++); + + TempPool() { + auto r = store->getRados()->get_rados_handle()->pool_create(name.c_str()); + assert(r == 0); + } + + ~TempPool() { + auto r = store->getRados()->get_rados_handle()->pool_delete(name.c_str()); + assert(r == 0); + } + + operator rgw_pool() { + return { name }; + } + + operator librados::IoCtx() { + librados::IoCtx ioctx; + auto r = store->getRados()->get_rados_handle()->ioctx_create(name.c_str(), + ioctx); + assert(r == 0); + return ioctx; + } +}; + +int run(RGWCoroutine* cr) { + RGWCoroutinesManager cr_mgr{store->ctx(), + store->getRados()->get_cr_registry()}; + std::list<RGWCoroutinesStack *> stacks; + auto stack = new RGWCoroutinesStack(store->ctx(), &cr_mgr); + stack->call(cr); + stacks.push_back(stack); + return cr_mgr.run(dpp(), stacks); +} + +TEST(ReadAttrs, Unfiltered) { + TempPool pool; + ceph::bufferlist bl; + auto dummy = "Dummy attribute value"s; + encode(dummy, bl); + const std::map<std::string, ceph::bufferlist> ref_attrs{ + { "foo"s, bl }, { "bar"s, bl }, { "baz"s, bl } + }; + auto oid = "object"s; + { + librados::IoCtx ioctx(pool); + librados::ObjectWriteOperation op; + op.setxattr("foo", bl); + op.setxattr("bar", bl); + op.setxattr("baz", bl); + auto r = ioctx.operate(oid, &op); + ASSERT_EQ(0, r); + } + std::map<std::string, ceph::bufferlist> attrs; + auto r = run(new RGWSimpleRadosReadAttrsCR(dpp(), store, {pool, oid}, &attrs, + true)); + ASSERT_EQ(0, r); + ASSERT_EQ(ref_attrs, attrs); +} + +TEST(ReadAttrs, Filtered) { + TempPool pool; + ceph::bufferlist bl; + auto dummy = "Dummy attribute value"s; + encode(dummy, bl); + const std::map<std::string, ceph::bufferlist> ref_attrs{ + { RGW_ATTR_PREFIX "foo"s, bl }, + { RGW_ATTR_PREFIX "bar"s, bl }, + { RGW_ATTR_PREFIX "baz"s, bl } + }; + auto oid = "object"s; + { + librados::IoCtx ioctx(pool); + librados::ObjectWriteOperation op; + op.setxattr(RGW_ATTR_PREFIX "foo", bl); + op.setxattr(RGW_ATTR_PREFIX "bar", bl); + op.setxattr(RGW_ATTR_PREFIX "baz", bl); + op.setxattr("oneOfTheseThingsIsNotLikeTheOthers", bl); + auto r = ioctx.operate(oid, &op); + ASSERT_EQ(0, r); + } + std::map<std::string, ceph::bufferlist> attrs; + auto r = run(new RGWSimpleRadosReadAttrsCR(dpp(), store, {pool, oid}, &attrs, + false)); + ASSERT_EQ(0, r); + ASSERT_EQ(ref_attrs, attrs); +} + +TEST(Read, Dne) { + TempPool pool; + std::string result; + auto r = run(new RGWSimpleRadosReadCR(dpp(), store, {pool, "doesnotexist"}, + &result, false)); + ASSERT_EQ(-ENOENT, r); +} + +TEST(Read, Read) { + TempPool pool; + auto data = "I am test data!"sv; + auto oid = "object"s; + { + bufferlist bl; + encode(data, bl); + librados::IoCtx ioctx(pool); + auto r = ioctx.write_full(oid, bl); + ASSERT_EQ(0, r); + } + std::string result; + auto r = run(new RGWSimpleRadosReadCR(dpp(), store, {pool, oid}, &result, + false)); + ASSERT_EQ(0, r); + ASSERT_EQ(result, data); +} + +TEST(Read, ReadVersion) { + TempPool pool; + auto data = "I am test data!"sv; + auto oid = "object"s; + RGWObjVersionTracker wobjv; + { + bufferlist bl; + encode(data, bl); + librados::IoCtx ioctx(pool); + librados::ObjectWriteOperation op; + wobjv.generate_new_write_ver(store->ctx()); + wobjv.prepare_op_for_write(&op); + op.write_full(bl); + auto r = ioctx.operate(oid, &op); + EXPECT_EQ(0, r); + wobjv.apply_write(); + } + RGWObjVersionTracker robjv; + std::string result; + auto r = run(new RGWSimpleRadosReadCR(dpp(), store, {pool, oid}, &result, + false, &robjv)); + ASSERT_EQ(0, r); + ASSERT_EQ(result, data); + data = "I am NEW test data!"; + { + bufferlist bl; + encode(data, bl); + librados::IoCtx ioctx(pool); + librados::ObjectWriteOperation op; + wobjv.generate_new_write_ver(store->ctx()); + wobjv.prepare_op_for_write(&op); + op.write_full(bl); + r = ioctx.operate(oid, &op); + EXPECT_EQ(0, r); + wobjv.apply_write(); + } + result.clear(); + r = run(new RGWSimpleRadosReadCR(dpp(), store, {pool, oid}, &result, false, + &robjv)); + ASSERT_EQ(-ECANCELED, r); + ASSERT_TRUE(result.empty()); + + robjv.clear(); + r = run(new RGWSimpleRadosReadCR(dpp(), store, {pool, oid}, &result, false, + &robjv)); + ASSERT_EQ(0, r); + ASSERT_EQ(result, data); + ASSERT_EQ(wobjv.read_version, robjv.read_version); +} + +TEST(Write, Exclusive) { + TempPool pool; + auto oid = "object"s; + { + bufferlist bl; + bl.append("I'm some data!"s); + librados::IoCtx ioctx(pool); + auto r = ioctx.write_full(oid, bl); + ASSERT_EQ(0, r); + } + auto r = run(new RGWSimpleRadosWriteCR(dpp(), store, {pool, oid}, + "I am some DIFFERENT data!"s, nullptr, + true)); + ASSERT_EQ(-EEXIST, r); +} + +TEST(Write, Write) { + TempPool pool; + auto oid = "object"s; + auto data = "I'm some data!"s; + auto r = run(new RGWSimpleRadosWriteCR(dpp(), store, {pool, oid}, + data, nullptr, true)); + ASSERT_EQ(0, r); + bufferlist bl; + librados::IoCtx ioctx(pool); + ioctx.read(oid, bl, 0, 0); + ASSERT_EQ(0, r); + std::string result; + decode(result, bl); + ASSERT_EQ(data, result); +} + +TEST(Write, ObjV) { + TempPool pool; + auto oid = "object"s; + RGWObjVersionTracker objv; + objv.generate_new_write_ver(store->ctx()); + auto r = run(new RGWSimpleRadosWriteCR(dpp(), store, {pool, oid}, + "I'm some data!"s, &objv, + true)); + RGWObjVersionTracker interfering_objv(objv); + r = run(new RGWSimpleRadosWriteCR(dpp(), store, {pool, oid}, + "I'm some newer, better data!"s, + &interfering_objv, false)); + ASSERT_EQ(0, r); + r = run(new RGWSimpleRadosWriteCR(dpp(), store, {pool, oid}, + "I'm some treacherous, obsolete data!"s, + &objv, false)); + ASSERT_EQ(-ECANCELED, r); +} + +TEST(WriteAttrs, Attrs) { + TempPool pool; + auto oid = "object"s; + bufferlist bl; + bl.append("I'm some data."); + std::map<std::string, bufferlist> wrattrs { + { "foo", bl }, { "bar", bl }, { "baz", bl } + }; + auto r = run(new RGWSimpleRadosWriteAttrsCR(dpp(), store, {pool, oid}, + wrattrs, nullptr, true)); + ASSERT_EQ(0, r); + std::map<std::string, bufferlist> rdattrs; + librados::IoCtx ioctx(pool); + r = ioctx.getxattrs(oid, rdattrs); + ASSERT_EQ(0, r); + ASSERT_EQ(wrattrs, rdattrs); +} + +TEST(WriteAttrs, Empty) { + TempPool pool; + auto oid = "object"s; + bufferlist bl; + std::map<std::string, bufferlist> wrattrs { + { "foo", bl }, { "bar", bl }, { "baz", bl } + }; + // With an empty bufferlist all attributes should be skipped. + auto r = run(new RGWSimpleRadosWriteAttrsCR(dpp(), store, {pool, oid}, + wrattrs, nullptr, true)); + ASSERT_EQ(0, r); + std::map<std::string, bufferlist> rdattrs; + librados::IoCtx ioctx(pool); + r = ioctx.getxattrs(oid, rdattrs); + ASSERT_EQ(0, r); + ASSERT_TRUE(rdattrs.empty()); +} + +int main(int argc, const char **argv) +{ + auto args = argv_to_vec(argc, argv); + auto cct = rgw_global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT, + CODE_ENVIRONMENT_UTILITY, 0); + + // for region -> zonegroup conversion (must happen before common_init_finish()) + if (!g_conf()->rgw_region.empty() && g_conf()->rgw_zonegroup.empty()) { + g_conf().set_val_or_die("rgw_zonegroup", g_conf()->rgw_region.c_str()); + } + + /* common_init_finish needs to be called after g_conf().set_val() */ + common_init_finish(g_ceph_context); + + + DriverManager::Config cfg = DriverManager::get_config(true, g_ceph_context); + + store = static_cast<rgw::sal::RadosStore*>( + DriverManager::get_storage(dpp(), + g_ceph_context, + cfg, + false, + false, + false, + false, + false, + true, + false)); + if (!store) { + std::cerr << "couldn't init storage provider" << std::endl; + return 5; //EIO + } + StoreDestructor store_destructor(static_cast<rgw::sal::RadosStore*>(store)); + + std::string pool{"rgw_cr_test"}; + store->getRados()->create_pool(dpp(), pool); + + testing::InitGoogleTest(); + return RUN_ALL_TESTS(); +} diff --git a/src/test/rgw/rgw_multi/__init__.py b/src/test/rgw/rgw_multi/__init__.py new file mode 100644 index 000000000..e69de29bb --- /dev/null +++ b/src/test/rgw/rgw_multi/__init__.py diff --git a/src/test/rgw/rgw_multi/conn.py b/src/test/rgw/rgw_multi/conn.py new file mode 100644 index 000000000..59bc2fdd3 --- /dev/null +++ b/src/test/rgw/rgw_multi/conn.py @@ -0,0 +1,41 @@ +import boto +import boto.s3.connection +import boto.iam.connection + +def get_gateway_connection(gateway, credentials): + """ connect to the given gateway """ + if gateway.connection is None: + gateway.connection = boto.connect_s3( + aws_access_key_id = credentials.access_key, + aws_secret_access_key = credentials.secret, + host = gateway.host, + port = gateway.port, + is_secure = False, + calling_format = boto.s3.connection.OrdinaryCallingFormat()) + return gateway.connection + +def get_gateway_secure_connection(gateway, credentials): + """ secure connect to the given gateway """ + if gateway.ssl_port == 0: + return None + if gateway.secure_connection is None: + gateway.secure_connection = boto.connect_s3( + aws_access_key_id = credentials.access_key, + aws_secret_access_key = credentials.secret, + host = gateway.host, + port = gateway.ssl_port, + is_secure = True, + validate_certs=False, + calling_format = boto.s3.connection.OrdinaryCallingFormat()) + return gateway.secure_connection + +def get_gateway_iam_connection(gateway, credentials): + """ connect to iam api of the given gateway """ + if gateway.iam_connection is None: + gateway.iam_connection = boto.connect_iam( + aws_access_key_id = credentials.access_key, + aws_secret_access_key = credentials.secret, + host = gateway.host, + port = gateway.port, + is_secure = False) + return gateway.iam_connection diff --git a/src/test/rgw/rgw_multi/multisite.py b/src/test/rgw/rgw_multi/multisite.py new file mode 100644 index 000000000..5d4dcd1aa --- /dev/null +++ b/src/test/rgw/rgw_multi/multisite.py @@ -0,0 +1,407 @@ +from abc import ABCMeta, abstractmethod +from io import StringIO + +import json + +from .conn import get_gateway_connection, get_gateway_iam_connection, get_gateway_secure_connection + +class Cluster: + """ interface to run commands against a distinct ceph cluster """ + __metaclass__ = ABCMeta + + @abstractmethod + def admin(self, args = None, **kwargs): + """ execute a radosgw-admin command """ + pass + +class Gateway: + """ interface to control a single radosgw instance """ + __metaclass__ = ABCMeta + + def __init__(self, host = None, port = None, cluster = None, zone = None, ssl_port = 0): + self.host = host + self.port = port + self.cluster = cluster + self.zone = zone + self.connection = None + self.secure_connection = None + self.ssl_port = ssl_port + self.iam_connection = None + + @abstractmethod + def start(self, args = []): + """ start the gateway with the given args """ + pass + + @abstractmethod + def stop(self): + """ stop the gateway """ + pass + + def endpoint(self): + return 'http://%s:%d' % (self.host, self.port) + +class SystemObject: + """ interface for system objects, represented in json format and + manipulated with radosgw-admin commands """ + __metaclass__ = ABCMeta + + def __init__(self, data = None, uuid = None): + self.data = data + self.id = uuid + if data: + self.load_from_json(data) + + @abstractmethod + def build_command(self, command): + """ return the command line for the given command, including arguments + to specify this object """ + pass + + @abstractmethod + def load_from_json(self, data): + """ update internal state based on json data """ + pass + + def command(self, cluster, cmd, args = None, **kwargs): + """ run the given command and return the output and retcode """ + args = self.build_command(cmd) + (args or []) + return cluster.admin(args, **kwargs) + + def json_command(self, cluster, cmd, args = None, **kwargs): + """ run the given command, parse the output and return the resulting + data and retcode """ + s, r = self.command(cluster, cmd, args or [], **kwargs) + if r == 0: + data = json.loads(s) + self.load_from_json(data) + self.data = data + return self.data, r + + # mixins for supported commands + class Create(object): + def create(self, cluster, args = None, **kwargs): + """ create the object with the given arguments """ + return self.json_command(cluster, 'create', args, **kwargs) + + class Delete(object): + def delete(self, cluster, args = None, **kwargs): + """ delete the object """ + # not json_command() because delete has no output + _, r = self.command(cluster, 'delete', args, **kwargs) + if r == 0: + self.data = None + return r + + class Get(object): + def get(self, cluster, args = None, **kwargs): + """ read the object from storage """ + kwargs['read_only'] = True + return self.json_command(cluster, 'get', args, **kwargs) + + class Set(object): + def set(self, cluster, data, args = None, **kwargs): + """ set the object by json """ + kwargs['stdin'] = StringIO(json.dumps(data)) + return self.json_command(cluster, 'set', args, **kwargs) + + class Modify(object): + def modify(self, cluster, args = None, **kwargs): + """ modify the object with the given arguments """ + return self.json_command(cluster, 'modify', args, **kwargs) + + class CreateDelete(Create, Delete): pass + class GetSet(Get, Set): pass + +class Zone(SystemObject, SystemObject.CreateDelete, SystemObject.GetSet, SystemObject.Modify): + def __init__(self, name, zonegroup = None, cluster = None, data = None, zone_id = None, gateways = None): + self.name = name + self.zonegroup = zonegroup + self.cluster = cluster + self.gateways = gateways or [] + super(Zone, self).__init__(data, zone_id) + + def zone_arg(self): + """ command-line argument to specify this zone """ + return ['--rgw-zone', self.name] + + def zone_args(self): + """ command-line arguments to specify this zone/zonegroup/realm """ + args = self.zone_arg() + if self.zonegroup: + args += self.zonegroup.zonegroup_args() + return args + + def build_command(self, command): + """ build a command line for the given command and args """ + return ['zone', command] + self.zone_args() + + def load_from_json(self, data): + """ load the zone from json """ + self.id = data['id'] + self.name = data['name'] + + def start(self, args = None): + """ start all gateways """ + for g in self.gateways: + g.start(args) + + def stop(self): + """ stop all gateways """ + for g in self.gateways: + g.stop() + + def period(self): + return self.zonegroup.period if self.zonegroup else None + + def realm(self): + return self.zonegroup.realm() if self.zonegroup else None + + def is_read_only(self): + return False + + def tier_type(self): + raise NotImplementedError + + def syncs_from(self, zone_name): + return zone_name != self.name + + def has_buckets(self): + return True + + def has_roles(self): + return True + + def get_conn(self, credentials): + return ZoneConn(self, credentials) # not implemented, but can be used + +class ZoneConn(object): + def __init__(self, zone, credentials): + self.zone = zone + self.name = zone.name + """ connect to the zone's first gateway """ + if isinstance(credentials, list): + self.credentials = credentials[0] + else: + self.credentials = credentials + + if self.zone.gateways is not None: + self.conn = get_gateway_connection(self.zone.gateways[0], self.credentials) + self.secure_conn = get_gateway_secure_connection(self.zone.gateways[0], self.credentials) + + self.iam_conn = get_gateway_iam_connection(self.zone.gateways[0], self.credentials) + + # create connections for the rest of the gateways (if exist) + for gw in list(self.zone.gateways): + get_gateway_connection(gw, self.credentials) + get_gateway_secure_connection(gw, self.credentials) + + get_gateway_iam_connection(gw, self.credentials) + + + def get_connection(self): + return self.conn + + def get_iam_connection(self): + return self.iam_conn + + def get_bucket(self, bucket_name, credentials): + raise NotImplementedError + + def check_bucket_eq(self, zone, bucket_name): + raise NotImplementedError + +class ZoneGroup(SystemObject, SystemObject.CreateDelete, SystemObject.GetSet, SystemObject.Modify): + def __init__(self, name, period = None, data = None, zonegroup_id = None, zones = None, master_zone = None): + self.name = name + self.period = period + self.zones = zones or [] + self.master_zone = master_zone + super(ZoneGroup, self).__init__(data, zonegroup_id) + self.rw_zones = [] + self.ro_zones = [] + self.zones_by_type = {} + for z in self.zones: + if z.is_read_only(): + self.ro_zones.append(z) + else: + self.rw_zones.append(z) + + def zonegroup_arg(self): + """ command-line argument to specify this zonegroup """ + return ['--rgw-zonegroup', self.name] + + def zonegroup_args(self): + """ command-line arguments to specify this zonegroup/realm """ + args = self.zonegroup_arg() + realm = self.realm() + if realm: + args += realm.realm_arg() + return args + + def build_command(self, command): + """ build a command line for the given command and args """ + return ['zonegroup', command] + self.zonegroup_args() + + def zone_by_id(self, zone_id): + """ return the matching zone by id """ + for zone in self.zones: + if zone.id == zone_id: + return zone + return None + + def load_from_json(self, data): + """ load the zonegroup from json """ + self.id = data['id'] + self.name = data['name'] + master_id = data['master_zone'] + if not self.master_zone or master_id != self.master_zone.id: + self.master_zone = self.zone_by_id(master_id) + + def add(self, cluster, zone, args = None, **kwargs): + """ add an existing zone to the zonegroup """ + args = zone.zone_arg() + (args or []) + data, r = self.json_command(cluster, 'add', args, **kwargs) + if r == 0: + zone.zonegroup = self + self.zones.append(zone) + return data, r + + def remove(self, cluster, zone, args = None, **kwargs): + """ remove an existing zone from the zonegroup """ + args = zone.zone_arg() + (args or []) + data, r = self.json_command(cluster, 'remove', args, **kwargs) + if r == 0: + zone.zonegroup = None + self.zones.remove(zone) + return data, r + + def realm(self): + return self.period.realm if self.period else None + +class Period(SystemObject, SystemObject.Get): + def __init__(self, realm = None, data = None, period_id = None, zonegroups = None, master_zonegroup = None): + self.realm = realm + self.zonegroups = zonegroups or [] + self.master_zonegroup = master_zonegroup + super(Period, self).__init__(data, period_id) + + def zonegroup_by_id(self, zonegroup_id): + """ return the matching zonegroup by id """ + for zonegroup in self.zonegroups: + if zonegroup.id == zonegroup_id: + return zonegroup + return None + + def build_command(self, command): + """ build a command line for the given command and args """ + return ['period', command] + + def load_from_json(self, data): + """ load the period from json """ + self.id = data['id'] + master_id = data['master_zonegroup'] + if not self.master_zonegroup or master_id != self.master_zonegroup.id: + self.master_zonegroup = self.zonegroup_by_id(master_id) + + def update(self, zone, args = None, **kwargs): + """ run 'radosgw-admin period update' on the given zone """ + assert(zone.cluster) + args = zone.zone_args() + (args or []) + if kwargs.pop('commit', False): + args.append('--commit') + return self.json_command(zone.cluster, 'update', args, **kwargs) + + def commit(self, zone, args = None, **kwargs): + """ run 'radosgw-admin period commit' on the given zone """ + assert(zone.cluster) + args = zone.zone_args() + (args or []) + return self.json_command(zone.cluster, 'commit', args, **kwargs) + +class Realm(SystemObject, SystemObject.CreateDelete, SystemObject.GetSet): + def __init__(self, name, period = None, data = None, realm_id = None): + self.name = name + self.current_period = period + super(Realm, self).__init__(data, realm_id) + + def realm_arg(self): + """ return the command-line arguments that specify this realm """ + return ['--rgw-realm', self.name] + + def build_command(self, command): + """ build a command line for the given command and args """ + return ['realm', command] + self.realm_arg() + + def load_from_json(self, data): + """ load the realm from json """ + self.id = data['id'] + + def pull(self, cluster, gateway, credentials, args = [], **kwargs): + """ pull an existing realm from the given gateway """ + args += ['--url', gateway.endpoint()] + args += credentials.credential_args() + return self.json_command(cluster, 'pull', args, **kwargs) + + def master_zonegroup(self): + """ return the current period's master zonegroup """ + if self.current_period is None: + return None + return self.current_period.master_zonegroup + + def meta_master_zone(self): + """ return the current period's metadata master zone """ + zonegroup = self.master_zonegroup() + if zonegroup is None: + return None + return zonegroup.master_zone + +class Credentials: + def __init__(self, access_key, secret): + self.access_key = access_key + self.secret = secret + + def credential_args(self): + return ['--access-key', self.access_key, '--secret', self.secret] + +class User(SystemObject): + def __init__(self, uid, data = None, name = None, credentials = None, tenant = None): + self.name = name + self.credentials = credentials or [] + self.tenant = tenant + super(User, self).__init__(data, uid) + + def user_arg(self): + """ command-line argument to specify this user """ + args = ['--uid', self.id] + if self.tenant: + args += ['--tenant', self.tenant] + return args + + def build_command(self, command): + """ build a command line for the given command and args """ + return ['user', command] + self.user_arg() + + def load_from_json(self, data): + """ load the user from json """ + self.id = data['user_id'] + self.name = data['display_name'] + self.credentials = [Credentials(k['access_key'], k['secret_key']) for k in data['keys']] + + def create(self, zone, args = None, **kwargs): + """ create the user with the given arguments """ + assert(zone.cluster) + args = zone.zone_args() + (args or []) + return self.json_command(zone.cluster, 'create', args, **kwargs) + + def info(self, zone, args = None, **kwargs): + """ read the user from storage """ + assert(zone.cluster) + args = zone.zone_args() + (args or []) + kwargs['read_only'] = True + return self.json_command(zone.cluster, 'info', args, **kwargs) + + def delete(self, zone, args = None, **kwargs): + """ delete the user """ + assert(zone.cluster) + args = zone.zone_args() + (args or []) + return self.command(zone.cluster, 'delete', args, **kwargs) diff --git a/src/test/rgw/rgw_multi/tests.py b/src/test/rgw/rgw_multi/tests.py new file mode 100644 index 000000000..156fac12e --- /dev/null +++ b/src/test/rgw/rgw_multi/tests.py @@ -0,0 +1,2861 @@ +import json +import random +import string +import sys +import time +import logging +import errno +import dateutil.parser + +from itertools import combinations +from itertools import zip_longest +from io import StringIO + +import boto +import boto.s3.connection +from boto.s3.website import WebsiteConfiguration +from boto.s3.cors import CORSConfiguration + +from nose.tools import eq_ as eq +from nose.tools import assert_not_equal, assert_equal +from nose.plugins.attrib import attr +from nose.plugins.skip import SkipTest + +from .multisite import Zone, ZoneGroup, Credentials + +from .conn import get_gateway_connection +from .tools import assert_raises + +class Config: + """ test configuration """ + def __init__(self, **kwargs): + # by default, wait up to 5 minutes before giving up on a sync checkpoint + self.checkpoint_retries = kwargs.get('checkpoint_retries', 60) + self.checkpoint_delay = kwargs.get('checkpoint_delay', 5) + # allow some time for realm reconfiguration after changing master zone + self.reconfigure_delay = kwargs.get('reconfigure_delay', 5) + self.tenant = kwargs.get('tenant', '') + +# rgw multisite tests, written against the interfaces provided in rgw_multi. +# these tests must be initialized and run by another module that provides +# implementations of these interfaces by calling init_multi() +realm = None +user = None +config = None +def init_multi(_realm, _user, _config=None): + global realm + realm = _realm + global user + user = _user + global config + config = _config or Config() + realm_meta_checkpoint(realm) + +def get_user(): + return user.id if user is not None else '' + +def get_tenant(): + return config.tenant if config is not None and config.tenant is not None else '' + +def get_realm(): + return realm + +log = logging.getLogger('rgw_multi.tests') + +num_buckets = 0 +run_prefix=''.join(random.choice(string.ascii_lowercase) for _ in range(6)) + +num_roles = 0 + +def get_zone_connection(zone, credentials): + """ connect to the zone's first gateway """ + if isinstance(credentials, list): + credentials = credentials[0] + return get_gateway_connection(zone.gateways[0], credentials) + +def mdlog_list(zone, period = None): + cmd = ['mdlog', 'list'] + if period: + cmd += ['--period', period] + (mdlog_json, _) = zone.cluster.admin(cmd, read_only=True) + return json.loads(mdlog_json) + +def mdlog_autotrim(zone): + zone.cluster.admin(['mdlog', 'autotrim']) + +def datalog_list(zone, args = None): + cmd = ['datalog', 'list'] + (args or []) + (datalog_json, _) = zone.cluster.admin(cmd, read_only=True) + return json.loads(datalog_json) + +def datalog_status(zone): + cmd = ['datalog', 'status'] + (datalog_json, _) = zone.cluster.admin(cmd, read_only=True) + return json.loads(datalog_json) + +def datalog_autotrim(zone): + zone.cluster.admin(['datalog', 'autotrim']) + +def bilog_list(zone, bucket, args = None): + cmd = ['bilog', 'list', '--bucket', bucket] + (args or []) + cmd += ['--tenant', config.tenant, '--uid', user.name] if config.tenant else [] + bilog, _ = zone.cluster.admin(cmd, read_only=True) + return json.loads(bilog) + +def bilog_autotrim(zone, args = None): + zone.cluster.admin(['bilog', 'autotrim'] + (args or [])) + +def bucket_layout(zone, bucket, args = None): + (bl_output,_) = zone.cluster.admin(['bucket', 'layout', '--bucket', bucket] + (args or [])) + return json.loads(bl_output) + +def parse_meta_sync_status(meta_sync_status_json): + log.debug('current meta sync status=%s', meta_sync_status_json) + sync_status = json.loads(meta_sync_status_json) + + sync_info = sync_status['sync_status']['info'] + global_sync_status = sync_info['status'] + num_shards = sync_info['num_shards'] + period = sync_info['period'] + realm_epoch = sync_info['realm_epoch'] + + sync_markers=sync_status['sync_status']['markers'] + log.debug('sync_markers=%s', sync_markers) + assert(num_shards == len(sync_markers)) + + markers={} + for i in range(num_shards): + # get marker, only if it's an incremental marker for the same realm epoch + if realm_epoch > sync_markers[i]['val']['realm_epoch'] or sync_markers[i]['val']['state'] == 0: + markers[i] = '' + else: + markers[i] = sync_markers[i]['val']['marker'] + + return period, realm_epoch, num_shards, markers + +def meta_sync_status(zone): + for _ in range(config.checkpoint_retries): + cmd = ['metadata', 'sync', 'status'] + zone.zone_args() + meta_sync_status_json, retcode = zone.cluster.admin(cmd, check_retcode=False, read_only=True) + if retcode == 0: + return parse_meta_sync_status(meta_sync_status_json) + assert(retcode == 2) # ENOENT + time.sleep(config.checkpoint_delay) + + assert False, 'failed to read metadata sync status for zone=%s' % zone.name + +def meta_master_log_status(master_zone): + cmd = ['mdlog', 'status'] + master_zone.zone_args() + mdlog_status_json, retcode = master_zone.cluster.admin(cmd, read_only=True) + mdlog_status = json.loads(mdlog_status_json) + + markers = {i: s['marker'] for i, s in enumerate(mdlog_status)} + log.debug('master meta markers=%s', markers) + return markers + +def compare_meta_status(zone, log_status, sync_status): + if len(log_status) != len(sync_status): + log.error('len(log_status)=%d, len(sync_status)=%d', len(log_status), len(sync_status)) + return False + + msg = '' + for i, l, s in zip(log_status, log_status.values(), sync_status.values()): + if l > s: + if len(msg): + msg += ', ' + msg += 'shard=' + str(i) + ' master=' + l + ' target=' + s + + if len(msg) > 0: + log.warning('zone %s behind master: %s', zone.name, msg) + return False + + return True + +def zone_meta_checkpoint(zone, meta_master_zone = None, master_status = None): + if not meta_master_zone: + meta_master_zone = zone.realm().meta_master_zone() + if not master_status: + master_status = meta_master_log_status(meta_master_zone) + + current_realm_epoch = realm.current_period.data['realm_epoch'] + + log.info('starting meta checkpoint for zone=%s', zone.name) + + for _ in range(config.checkpoint_retries): + period, realm_epoch, num_shards, sync_status = meta_sync_status(zone) + if realm_epoch < current_realm_epoch: + log.warning('zone %s is syncing realm epoch=%d, behind current realm epoch=%d', + zone.name, realm_epoch, current_realm_epoch) + else: + log.debug('log_status=%s', master_status) + log.debug('sync_status=%s', sync_status) + if compare_meta_status(zone, master_status, sync_status): + log.info('finish meta checkpoint for zone=%s', zone.name) + return + + time.sleep(config.checkpoint_delay) + assert False, 'failed meta checkpoint for zone=%s' % zone.name + +def zonegroup_meta_checkpoint(zonegroup, meta_master_zone = None, master_status = None): + if not meta_master_zone: + meta_master_zone = zonegroup.realm().meta_master_zone() + if not master_status: + master_status = meta_master_log_status(meta_master_zone) + + for zone in zonegroup.zones: + if zone == meta_master_zone: + continue + zone_meta_checkpoint(zone, meta_master_zone, master_status) + +def realm_meta_checkpoint(realm): + log.info('meta checkpoint') + + meta_master_zone = realm.meta_master_zone() + master_status = meta_master_log_status(meta_master_zone) + + for zonegroup in realm.current_period.zonegroups: + zonegroup_meta_checkpoint(zonegroup, meta_master_zone, master_status) + +def parse_data_sync_status(data_sync_status_json): + log.debug('current data sync status=%s', data_sync_status_json) + sync_status = json.loads(data_sync_status_json) + + global_sync_status=sync_status['sync_status']['info']['status'] + num_shards=sync_status['sync_status']['info']['num_shards'] + + sync_markers=sync_status['sync_status']['markers'] + log.debug('sync_markers=%s', sync_markers) + assert(num_shards == len(sync_markers)) + + markers={} + for i in range(num_shards): + markers[i] = sync_markers[i]['val']['marker'] + + return (num_shards, markers) + +def data_sync_status(target_zone, source_zone): + if target_zone == source_zone: + return None + + for _ in range(config.checkpoint_retries): + cmd = ['data', 'sync', 'status'] + target_zone.zone_args() + cmd += ['--source-zone', source_zone.name] + data_sync_status_json, retcode = target_zone.cluster.admin(cmd, check_retcode=False, read_only=True) + if retcode == 0: + return parse_data_sync_status(data_sync_status_json) + + assert(retcode == 2) # ENOENT + time.sleep(config.checkpoint_delay) + + assert False, 'failed to read data sync status for target_zone=%s source_zone=%s' % \ + (target_zone.name, source_zone.name) + +def bucket_sync_status(target_zone, source_zone, bucket_name): + if target_zone == source_zone: + return None + + cmd = ['bucket', 'sync', 'markers'] + target_zone.zone_args() + cmd += ['--source-zone', source_zone.name] + cmd += ['--bucket', bucket_name] + cmd += ['--tenant', config.tenant, '--uid', user.name] if config.tenant else [] + while True: + bucket_sync_status_json, retcode = target_zone.cluster.admin(cmd, check_retcode=False, read_only=True) + if retcode == 0: + break + + assert(retcode == 2) # ENOENT + + sync_status = json.loads(bucket_sync_status_json) + + markers={} + for entry in sync_status: + val = entry['val'] + pos = val['inc_marker']['position'].split('#')[-1] # get rid of shard id; e.g., 6#00000000002.132.3 -> 00000000002.132.3 + markers[entry['key']] = pos + + return markers + +def data_source_log_status(source_zone): + source_cluster = source_zone.cluster + cmd = ['datalog', 'status'] + source_zone.zone_args() + datalog_status_json, retcode = source_cluster.admin(cmd, read_only=True) + datalog_status = json.loads(datalog_status_json) + + markers = {i: s['marker'] for i, s in enumerate(datalog_status)} + log.debug('data markers for zone=%s markers=%s', source_zone.name, markers) + return markers + +def bucket_source_log_status(source_zone, bucket_name): + cmd = ['bilog', 'status'] + source_zone.zone_args() + cmd += ['--bucket', bucket_name] + cmd += ['--tenant', config.tenant, '--uid', user.name] if config.tenant else [] + source_cluster = source_zone.cluster + bilog_status_json, retcode = source_cluster.admin(cmd, read_only=True) + bilog_status = json.loads(bilog_status_json) + + m={} + markers={} + try: + m = bilog_status['markers'] + except: + pass + + for s in m: + key = s['key'] + val = s['val'] + markers[key] = val + + log.debug('bilog markers for zone=%s bucket=%s markers=%s', source_zone.name, bucket_name, markers) + return markers + +def compare_data_status(target_zone, source_zone, log_status, sync_status): + if len(log_status) != len(sync_status): + log.error('len(log_status)=%d len(sync_status)=%d', len(log_status), len(sync_status)) + return False + + msg = '' + for i, l, s in zip(log_status, log_status.values(), sync_status.values()): + if l > s: + if len(msg): + msg += ', ' + msg += 'shard=' + str(i) + ' master=' + l + ' target=' + s + + if len(msg) > 0: + log.warning('data of zone %s behind zone %s: %s', target_zone.name, source_zone.name, msg) + return False + + return True + +def compare_bucket_status(target_zone, source_zone, bucket_name, log_status, sync_status): + if len(log_status) != len(sync_status): + log.error('len(log_status)=%d len(sync_status)=%d', len(log_status), len(sync_status)) + return False + + msg = '' + for i, l, s in zip(log_status, log_status.values(), sync_status.values()): + if l > s: + if len(msg): + msg += ', ' + msg += 'shard=' + str(i) + ' master=' + l + ' target=' + s + + if len(msg) > 0: + log.warning('bucket %s zone %s behind zone %s: %s', bucket_name, target_zone.name, source_zone.name, msg) + return False + + return True + +def zone_data_checkpoint(target_zone, source_zone): + if not target_zone.syncs_from(source_zone.name): + return + + log_status = data_source_log_status(source_zone) + log.info('starting data checkpoint for target_zone=%s source_zone=%s', target_zone.name, source_zone.name) + + for _ in range(config.checkpoint_retries): + num_shards, sync_status = data_sync_status(target_zone, source_zone) + + log.debug('log_status=%s', log_status) + log.debug('sync_status=%s', sync_status) + + if compare_data_status(target_zone, source_zone, log_status, sync_status): + log.info('finished data checkpoint for target_zone=%s source_zone=%s', + target_zone.name, source_zone.name) + return + time.sleep(config.checkpoint_delay) + + assert False, 'failed data checkpoint for target_zone=%s source_zone=%s' % \ + (target_zone.name, source_zone.name) + +def zonegroup_data_checkpoint(zonegroup_conns): + for source_conn in zonegroup_conns.rw_zones: + for target_conn in zonegroup_conns.zones: + if source_conn.zone == target_conn.zone: + continue + log.debug('data checkpoint: source=%s target=%s', source_conn.zone.name, target_conn.zone.name) + zone_data_checkpoint(target_conn.zone, source_conn.zone) + +def zone_bucket_checkpoint(target_zone, source_zone, bucket_name): + if not target_zone.syncs_from(source_zone.name): + return + + cmd = ['bucket', 'sync', 'checkpoint'] + cmd += ['--bucket', bucket_name, '--source-zone', source_zone.name] + retry_delay_ms = config.checkpoint_delay * 1000 + timeout_sec = config.checkpoint_retries * config.checkpoint_delay + cmd += ['--retry-delay-ms', str(retry_delay_ms), '--timeout-sec', str(timeout_sec)] + cmd += target_zone.zone_args() + target_zone.cluster.admin(cmd, debug_rgw=1) + +def zonegroup_bucket_checkpoint(zonegroup_conns, bucket_name): + for source_conn in zonegroup_conns.rw_zones: + for target_conn in zonegroup_conns.zones: + if source_conn.zone == target_conn.zone: + continue + log.debug('bucket checkpoint: source=%s target=%s bucket=%s', source_conn.zone.name, target_conn.zone.name, bucket_name) + zone_bucket_checkpoint(target_conn.zone, source_conn.zone, bucket_name) + for source_conn, target_conn in combinations(zonegroup_conns.zones, 2): + if target_conn.zone.has_buckets(): + target_conn.check_bucket_eq(source_conn, bucket_name) + +def set_master_zone(zone): + zone.modify(zone.cluster, ['--master']) + zonegroup = zone.zonegroup + zonegroup.period.update(zone, commit=True) + zonegroup.master_zone = zone + log.info('Set master zone=%s, waiting %ds for reconfiguration..', zone.name, config.reconfigure_delay) + time.sleep(config.reconfigure_delay) + +def set_sync_from_all(zone, flag): + s = 'true' if flag else 'false' + zone.modify(zone.cluster, ['--sync-from-all={}'.format(s)]) + zonegroup = zone.zonegroup + zonegroup.period.update(zone, commit=True) + log.info('Set sync_from_all flag on zone %s to %s', zone.name, s) + time.sleep(config.reconfigure_delay) + +def set_redirect_zone(zone, redirect_zone): + id_str = redirect_zone.id if redirect_zone else '' + zone.modify(zone.cluster, ['--redirect-zone={}'.format(id_str)]) + zonegroup = zone.zonegroup + zonegroup.period.update(zone, commit=True) + log.info('Set redirect_zone zone %s to "%s"', zone.name, id_str) + time.sleep(config.reconfigure_delay) + +def enable_bucket_sync(zone, bucket_name): + cmd = ['bucket', 'sync', 'enable', '--bucket', bucket_name] + zone.zone_args() + zone.cluster.admin(cmd) + +def disable_bucket_sync(zone, bucket_name): + cmd = ['bucket', 'sync', 'disable', '--bucket', bucket_name] + zone.zone_args() + zone.cluster.admin(cmd) + +def check_buckets_sync_status_obj_not_exist(zone, buckets): + for _ in range(config.checkpoint_retries): + cmd = ['log', 'list'] + zone.zone_arg() + log_list, ret = zone.cluster.admin(cmd, check_retcode=False, read_only=True) + for bucket in buckets: + if log_list.find(':'+bucket+":") >= 0: + break + else: + return + time.sleep(config.checkpoint_delay) + assert False + +def gen_bucket_name(): + global num_buckets + + num_buckets += 1 + return run_prefix + '-' + str(num_buckets) + +def gen_role_name(): + global num_roles + + num_roles += 1 + return "roles" + '-' + run_prefix + '-' + str(num_roles) + +class ZonegroupConns: + def __init__(self, zonegroup): + self.zonegroup = zonegroup + self.zones = [] + self.ro_zones = [] + self.rw_zones = [] + self.master_zone = None + + for z in zonegroup.zones: + zone_conn = z.get_conn(user.credentials) + self.zones.append(zone_conn) + if z.is_read_only(): + self.ro_zones.append(zone_conn) + else: + self.rw_zones.append(zone_conn) + + if z == zonegroup.master_zone: + self.master_zone = zone_conn + +def check_all_buckets_exist(zone_conn, buckets): + if not zone_conn.zone.has_buckets(): + return True + + for b in buckets: + try: + zone_conn.get_bucket(b) + except: + log.critical('zone %s does not contain bucket %s', zone_conn.zone.name, b) + return False + + return True + +def check_all_buckets_dont_exist(zone_conn, buckets): + if not zone_conn.zone.has_buckets(): + return True + + for b in buckets: + try: + zone_conn.get_bucket(b) + except: + continue + + log.critical('zone %s contains bucket %s', zone.zone, b) + return False + + return True + +def create_role_per_zone(zonegroup_conns, roles_per_zone = 1): + roles = [] + zone_role = [] + for zone in zonegroup_conns.rw_zones: + for i in range(roles_per_zone): + role_name = gen_role_name() + log.info('create role zone=%s name=%s', zone.name, role_name) + policy_document = "{\"Version\":\"2012-10-17\",\"Statement\":[{\"Effect\":\"Allow\",\"Principal\":{\"AWS\":[\"arn:aws:iam:::user/testuser\"]},\"Action\":[\"sts:AssumeRole\"]}]}" + role = zone.create_role("", role_name, policy_document, "") + roles.append(role_name) + zone_role.append((zone, role)) + + return roles, zone_role + +def create_bucket_per_zone(zonegroup_conns, buckets_per_zone = 1): + buckets = [] + zone_bucket = [] + for zone in zonegroup_conns.rw_zones: + for i in range(buckets_per_zone): + bucket_name = gen_bucket_name() + log.info('create bucket zone=%s name=%s', zone.name, bucket_name) + bucket = zone.create_bucket(bucket_name) + buckets.append(bucket_name) + zone_bucket.append((zone, bucket)) + + return buckets, zone_bucket + +def create_bucket_per_zone_in_realm(): + buckets = [] + zone_bucket = [] + for zonegroup in realm.current_period.zonegroups: + zg_conn = ZonegroupConns(zonegroup) + b, z = create_bucket_per_zone(zg_conn) + buckets.extend(b) + zone_bucket.extend(z) + return buckets, zone_bucket + +def test_bucket_create(): + zonegroup = realm.master_zonegroup() + zonegroup_conns = ZonegroupConns(zonegroup) + buckets, _ = create_bucket_per_zone(zonegroup_conns) + zonegroup_meta_checkpoint(zonegroup) + + for zone in zonegroup_conns.zones: + assert check_all_buckets_exist(zone, buckets) + +def test_bucket_recreate(): + zonegroup = realm.master_zonegroup() + zonegroup_conns = ZonegroupConns(zonegroup) + buckets, _ = create_bucket_per_zone(zonegroup_conns) + zonegroup_meta_checkpoint(zonegroup) + + + for zone in zonegroup_conns.zones: + assert check_all_buckets_exist(zone, buckets) + + # recreate buckets on all zones, make sure they weren't removed + for zone in zonegroup_conns.rw_zones: + for bucket_name in buckets: + bucket = zone.create_bucket(bucket_name) + + for zone in zonegroup_conns.zones: + assert check_all_buckets_exist(zone, buckets) + + zonegroup_meta_checkpoint(zonegroup) + + for zone in zonegroup_conns.zones: + assert check_all_buckets_exist(zone, buckets) + +def test_bucket_remove(): + zonegroup = realm.master_zonegroup() + zonegroup_conns = ZonegroupConns(zonegroup) + buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns) + zonegroup_meta_checkpoint(zonegroup) + + for zone in zonegroup_conns.zones: + assert check_all_buckets_exist(zone, buckets) + + for zone, bucket_name in zone_bucket: + zone.conn.delete_bucket(bucket_name) + + zonegroup_meta_checkpoint(zonegroup) + + for zone in zonegroup_conns.zones: + assert check_all_buckets_dont_exist(zone, buckets) + +def get_bucket(zone, bucket_name): + return zone.conn.get_bucket(bucket_name) + +def get_key(zone, bucket_name, obj_name): + b = get_bucket(zone, bucket_name) + return b.get_key(obj_name) + +def new_key(zone, bucket_name, obj_name): + b = get_bucket(zone, bucket_name) + return b.new_key(obj_name) + +def check_bucket_eq(zone_conn1, zone_conn2, bucket): + if zone_conn2.zone.has_buckets(): + zone_conn2.check_bucket_eq(zone_conn1, bucket.name) + +def check_role_eq(zone_conn1, zone_conn2, role): + if zone_conn2.zone.has_roles(): + zone_conn2.check_role_eq(zone_conn1, role['create_role_response']['create_role_result']['role']['role_name']) + +def test_object_sync(): + zonegroup = realm.master_zonegroup() + zonegroup_conns = ZonegroupConns(zonegroup) + buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns) + + objnames = [ 'myobj', '_myobj', ':', '&' ] + content = 'asdasd' + + # don't wait for meta sync just yet + for zone, bucket_name in zone_bucket: + for objname in objnames: + k = new_key(zone, bucket_name, objname) + k.set_contents_from_string(content) + + zonegroup_meta_checkpoint(zonegroup) + + for source_conn, bucket in zone_bucket: + for target_conn in zonegroup_conns.zones: + if source_conn.zone == target_conn.zone: + continue + + zone_bucket_checkpoint(target_conn.zone, source_conn.zone, bucket.name) + check_bucket_eq(source_conn, target_conn, bucket) + +def test_object_delete(): + zonegroup = realm.master_zonegroup() + zonegroup_conns = ZonegroupConns(zonegroup) + buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns) + + objname = 'myobj' + content = 'asdasd' + + # don't wait for meta sync just yet + for zone, bucket in zone_bucket: + k = new_key(zone, bucket, objname) + k.set_contents_from_string(content) + + zonegroup_meta_checkpoint(zonegroup) + + # check object exists + for source_conn, bucket in zone_bucket: + for target_conn in zonegroup_conns.zones: + if source_conn.zone == target_conn.zone: + continue + + zone_bucket_checkpoint(target_conn.zone, source_conn.zone, bucket.name) + check_bucket_eq(source_conn, target_conn, bucket) + + # check object removal + for source_conn, bucket in zone_bucket: + k = get_key(source_conn, bucket, objname) + k.delete() + for target_conn in zonegroup_conns.zones: + if source_conn.zone == target_conn.zone: + continue + + zone_bucket_checkpoint(target_conn.zone, source_conn.zone, bucket.name) + check_bucket_eq(source_conn, target_conn, bucket) + +def get_latest_object_version(key): + for k in key.bucket.list_versions(key.name): + if k.is_latest: + return k + return None + +def test_versioned_object_incremental_sync(): + zonegroup = realm.master_zonegroup() + zonegroup_conns = ZonegroupConns(zonegroup) + buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns) + + # enable versioning + for _, bucket in zone_bucket: + bucket.configure_versioning(True) + + zonegroup_meta_checkpoint(zonegroup) + + # upload a dummy object to each bucket and wait for sync. this forces each + # bucket to finish a full sync and switch to incremental + for source_conn, bucket in zone_bucket: + new_key(source_conn, bucket, 'dummy').set_contents_from_string('') + for target_conn in zonegroup_conns.zones: + if source_conn.zone == target_conn.zone: + continue + zone_bucket_checkpoint(target_conn.zone, source_conn.zone, bucket.name) + + for _, bucket in zone_bucket: + # create and delete multiple versions of an object from each zone + for zone_conn in zonegroup_conns.rw_zones: + obj = 'obj-' + zone_conn.name + k = new_key(zone_conn, bucket, obj) + + k.set_contents_from_string('version1') + log.debug('version1 id=%s', k.version_id) + # don't delete version1 - this tests that the initial version + # doesn't get squashed into later versions + + # create and delete the following object versions to test that + # the operations don't race with each other during sync + k.set_contents_from_string('version2') + log.debug('version2 id=%s', k.version_id) + k.bucket.delete_key(obj, version_id=k.version_id) + + k.set_contents_from_string('version3') + log.debug('version3 id=%s', k.version_id) + k.bucket.delete_key(obj, version_id=k.version_id) + + for _, bucket in zone_bucket: + zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name) + + for _, bucket in zone_bucket: + # overwrite the acls to test that metadata-only entries are applied + for zone_conn in zonegroup_conns.rw_zones: + obj = 'obj-' + zone_conn.name + k = new_key(zone_conn, bucket.name, obj) + v = get_latest_object_version(k) + v.make_public() + + for _, bucket in zone_bucket: + zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name) + +def test_concurrent_versioned_object_incremental_sync(): + zonegroup = realm.master_zonegroup() + zonegroup_conns = ZonegroupConns(zonegroup) + zone = zonegroup_conns.rw_zones[0] + + # create a versioned bucket + bucket = zone.create_bucket(gen_bucket_name()) + log.debug('created bucket=%s', bucket.name) + bucket.configure_versioning(True) + + zonegroup_meta_checkpoint(zonegroup) + + # upload a dummy object and wait for sync. this forces each zone to finish + # a full sync and switch to incremental + new_key(zone, bucket, 'dummy').set_contents_from_string('') + zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name) + + # create several concurrent versions on each zone and let them race to sync + obj = 'obj' + for i in range(10): + for zone_conn in zonegroup_conns.rw_zones: + k = new_key(zone_conn, bucket, obj) + k.set_contents_from_string('version1') + log.debug('zone=%s version=%s', zone_conn.zone.name, k.version_id) + + zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name) + zonegroup_data_checkpoint(zonegroup_conns) + +def test_version_suspended_incremental_sync(): + zonegroup = realm.master_zonegroup() + zonegroup_conns = ZonegroupConns(zonegroup) + + zone = zonegroup_conns.rw_zones[0] + + # create a non-versioned bucket + bucket = zone.create_bucket(gen_bucket_name()) + log.debug('created bucket=%s', bucket.name) + zonegroup_meta_checkpoint(zonegroup) + + # upload an initial object + key1 = new_key(zone, bucket, 'obj') + key1.set_contents_from_string('') + log.debug('created initial version id=%s', key1.version_id) + zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name) + + # enable versioning + bucket.configure_versioning(True) + zonegroup_meta_checkpoint(zonegroup) + + # re-upload the object as a new version + key2 = new_key(zone, bucket, 'obj') + key2.set_contents_from_string('') + log.debug('created new version id=%s', key2.version_id) + zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name) + + # suspend versioning + bucket.configure_versioning(False) + zonegroup_meta_checkpoint(zonegroup) + + # re-upload the object as a 'null' version + key3 = new_key(zone, bucket, 'obj') + key3.set_contents_from_string('') + log.debug('created null version id=%s', key3.version_id) + zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name) + +def test_delete_marker_full_sync(): + zonegroup = realm.master_zonegroup() + zonegroup_conns = ZonegroupConns(zonegroup) + buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns) + + # enable versioning + for _, bucket in zone_bucket: + bucket.configure_versioning(True) + zonegroup_meta_checkpoint(zonegroup) + + for zone, bucket in zone_bucket: + # upload an initial object + key1 = new_key(zone, bucket, 'obj') + key1.set_contents_from_string('') + + # create a delete marker + key2 = new_key(zone, bucket, 'obj') + key2.delete() + + # wait for full sync + for _, bucket in zone_bucket: + zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name) + +def test_suspended_delete_marker_full_sync(): + zonegroup = realm.master_zonegroup() + zonegroup_conns = ZonegroupConns(zonegroup) + buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns) + + # enable/suspend versioning + for _, bucket in zone_bucket: + bucket.configure_versioning(True) + bucket.configure_versioning(False) + zonegroup_meta_checkpoint(zonegroup) + + for zone, bucket in zone_bucket: + # upload an initial object + key1 = new_key(zone, bucket, 'obj') + key1.set_contents_from_string('') + + # create a delete marker + key2 = new_key(zone, bucket, 'obj') + key2.delete() + + # wait for full sync + for _, bucket in zone_bucket: + zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name) + +def test_bucket_versioning(): + buckets, zone_bucket = create_bucket_per_zone_in_realm() + for _, bucket in zone_bucket: + bucket.configure_versioning(True) + res = bucket.get_versioning_status() + key = 'Versioning' + assert(key in res and res[key] == 'Enabled') + +def test_bucket_acl(): + buckets, zone_bucket = create_bucket_per_zone_in_realm() + for _, bucket in zone_bucket: + assert(len(bucket.get_acl().acl.grants) == 1) # single grant on owner + bucket.set_acl('public-read') + assert(len(bucket.get_acl().acl.grants) == 2) # new grant on AllUsers + +def test_bucket_cors(): + buckets, zone_bucket = create_bucket_per_zone_in_realm() + for _, bucket in zone_bucket: + cors_cfg = CORSConfiguration() + cors_cfg.add_rule(['DELETE'], 'https://www.example.com', allowed_header='*', max_age_seconds=3000) + bucket.set_cors(cors_cfg) + assert(bucket.get_cors().to_xml() == cors_cfg.to_xml()) + +def test_bucket_delete_notempty(): + zonegroup = realm.master_zonegroup() + zonegroup_conns = ZonegroupConns(zonegroup) + buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns) + zonegroup_meta_checkpoint(zonegroup) + + for zone_conn, bucket_name in zone_bucket: + # upload an object to each bucket on its own zone + conn = zone_conn.get_connection() + bucket = conn.get_bucket(bucket_name) + k = bucket.new_key('foo') + k.set_contents_from_string('bar') + # attempt to delete the bucket before this object can sync + try: + conn.delete_bucket(bucket_name) + except boto.exception.S3ResponseError as e: + assert(e.error_code == 'BucketNotEmpty') + continue + assert False # expected 409 BucketNotEmpty + + # assert that each bucket still exists on the master + c1 = zonegroup_conns.master_zone.conn + for _, bucket_name in zone_bucket: + assert c1.get_bucket(bucket_name) + +def test_multi_period_incremental_sync(): + zonegroup = realm.master_zonegroup() + if len(zonegroup.zones) < 3: + raise SkipTest("test_multi_period_incremental_sync skipped. Requires 3 or more zones in master zonegroup.") + + # periods to include in mdlog comparison + mdlog_periods = [realm.current_period.id] + + # create a bucket in each zone + zonegroup_conns = ZonegroupConns(zonegroup) + buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns) + + zonegroup_meta_checkpoint(zonegroup) + + z1, z2, z3 = zonegroup.zones[0:3] + assert(z1 == zonegroup.master_zone) + + # kill zone 3 gateways to freeze sync status to incremental in first period + z3.stop() + + # change master to zone 2 -> period 2 + set_master_zone(z2) + mdlog_periods += [realm.current_period.id] + + for zone_conn, _ in zone_bucket: + if zone_conn.zone == z3: + continue + bucket_name = gen_bucket_name() + log.info('create bucket zone=%s name=%s', zone_conn.name, bucket_name) + bucket = zone_conn.conn.create_bucket(bucket_name) + buckets.append(bucket_name) + + # wait for zone 1 to sync + zone_meta_checkpoint(z1) + + # change master back to zone 1 -> period 3 + set_master_zone(z1) + mdlog_periods += [realm.current_period.id] + + for zone_conn, bucket_name in zone_bucket: + if zone_conn.zone == z3: + continue + bucket_name = gen_bucket_name() + log.info('create bucket zone=%s name=%s', zone_conn.name, bucket_name) + zone_conn.conn.create_bucket(bucket_name) + buckets.append(bucket_name) + + # restart zone 3 gateway and wait for sync + z3.start() + zonegroup_meta_checkpoint(zonegroup) + + # verify that we end up with the same objects + for bucket_name in buckets: + for source_conn, _ in zone_bucket: + for target_conn in zonegroup_conns.zones: + if source_conn.zone == target_conn.zone: + continue + + if target_conn.zone.has_buckets(): + target_conn.check_bucket_eq(source_conn, bucket_name) + + # verify that mdlogs are not empty and match for each period + for period in mdlog_periods: + master_mdlog = mdlog_list(z1, period) + assert len(master_mdlog) > 0 + for zone in zonegroup.zones: + if zone == z1: + continue + mdlog = mdlog_list(zone, period) + assert len(mdlog) == len(master_mdlog) + + # autotrim mdlogs for master zone + mdlog_autotrim(z1) + + # autotrim mdlogs for peers + for zone in zonegroup.zones: + if zone == z1: + continue + mdlog_autotrim(zone) + + # verify that mdlogs are empty for each period + for period in mdlog_periods: + for zone in zonegroup.zones: + mdlog = mdlog_list(zone, period) + assert len(mdlog) == 0 + +def test_datalog_autotrim(): + zonegroup = realm.master_zonegroup() + zonegroup_conns = ZonegroupConns(zonegroup) + buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns) + + # upload an object to each zone to generate a datalog entry + for zone, bucket in zone_bucket: + k = new_key(zone, bucket.name, 'key') + k.set_contents_from_string('body') + + # wait for metadata and data sync to catch up + zonegroup_meta_checkpoint(zonegroup) + zonegroup_data_checkpoint(zonegroup_conns) + + # trim each datalog + for zone, _ in zone_bucket: + # read max markers for each shard + status = datalog_status(zone.zone) + + datalog_autotrim(zone.zone) + + for shard_id, shard_status in enumerate(status): + try: + before_trim = dateutil.parser.isoparse(shard_status['last_update']) + except: # empty timestamps look like "0.000000" and will fail here + continue + entries = datalog_list(zone.zone, ['--shard-id', str(shard_id), '--max-entries', '1']) + if not len(entries): + continue + after_trim = dateutil.parser.isoparse(entries[0]['timestamp']) + assert before_trim < after_trim, "any datalog entries must be newer than trim" + +def test_multi_zone_redirect(): + zonegroup = realm.master_zonegroup() + if len(zonegroup.rw_zones) < 2: + raise SkipTest("test_multi_period_incremental_sync skipped. Requires 3 or more zones in master zonegroup.") + + zonegroup_conns = ZonegroupConns(zonegroup) + (zc1, zc2) = zonegroup_conns.rw_zones[0:2] + + z1, z2 = (zc1.zone, zc2.zone) + + set_sync_from_all(z2, False) + + # create a bucket on the first zone + bucket_name = gen_bucket_name() + log.info('create bucket zone=%s name=%s', z1.name, bucket_name) + bucket = zc1.conn.create_bucket(bucket_name) + obj = 'testredirect' + + key = bucket.new_key(obj) + data = 'A'*512 + key.set_contents_from_string(data) + + zonegroup_meta_checkpoint(zonegroup) + + # try to read object from second zone (should fail) + bucket2 = get_bucket(zc2, bucket_name) + assert_raises(boto.exception.S3ResponseError, bucket2.get_key, obj) + + set_redirect_zone(z2, z1) + + key2 = bucket2.get_key(obj) + + eq(data, key2.get_contents_as_string(encoding='ascii')) + + key = bucket.new_key(obj) + + for x in ['a', 'b', 'c', 'd']: + data = x*512 + key.set_contents_from_string(data) + eq(data, key2.get_contents_as_string(encoding='ascii')) + + # revert config changes + set_sync_from_all(z2, True) + set_redirect_zone(z2, None) + +def test_zonegroup_remove(): + zonegroup = realm.master_zonegroup() + zonegroup_conns = ZonegroupConns(zonegroup) + if len(zonegroup.zones) < 2: + raise SkipTest("test_zonegroup_remove skipped. Requires 2 or more zones in master zonegroup.") + + zonegroup_meta_checkpoint(zonegroup) + z1, z2 = zonegroup.zones[0:2] + c1, c2 = (z1.cluster, z2.cluster) + + # get admin credentials out of existing zone + system_key = z1.data['system_key'] + admin_creds = Credentials(system_key['access_key'], system_key['secret_key']) + + # create a new zone in zonegroup on c2 and commit + zone = Zone('remove', zonegroup, c2) + zone.create(c2, admin_creds.credential_args()) + zonegroup.zones.append(zone) + zonegroup.period.update(zone, commit=True) + + zonegroup.remove(c1, zone) + + # another 'zonegroup remove' should fail with ENOENT + _, retcode = zonegroup.remove(c1, zone, check_retcode=False) + assert(retcode == 2) # ENOENT + + # delete the new zone + zone.delete(c2) + + # validate the resulting period + zonegroup.period.update(z1, commit=True) + + +def test_zg_master_zone_delete(): + + master_zg = realm.master_zonegroup() + master_zone = master_zg.master_zone + + assert(len(master_zg.zones) >= 1) + master_cluster = master_zg.zones[0].cluster + + rm_zg = ZoneGroup('remove_zg') + rm_zg.create(master_cluster) + + rm_zone = Zone('remove', rm_zg, master_cluster) + rm_zone.create(master_cluster) + master_zg.period.update(master_zone, commit=True) + + + rm_zone.delete(master_cluster) + # Period update: This should now fail as the zone will be the master zone + # in that zg + _, retcode = master_zg.period.update(master_zone, check_retcode=False) + assert(retcode == errno.EINVAL) + + # Proceed to delete the zonegroup as well, previous period now does not + # contain a dangling master_zone, this must succeed + rm_zg.delete(master_cluster) + master_zg.period.update(master_zone, commit=True) + +def test_set_bucket_website(): + buckets, zone_bucket = create_bucket_per_zone_in_realm() + for _, bucket in zone_bucket: + website_cfg = WebsiteConfiguration(suffix='index.html',error_key='error.html') + try: + bucket.set_website_configuration(website_cfg) + except boto.exception.S3ResponseError as e: + if e.error_code == 'MethodNotAllowed': + raise SkipTest("test_set_bucket_website skipped. Requires rgw_enable_static_website = 1.") + assert(bucket.get_website_configuration_with_xml()[1] == website_cfg.to_xml()) + +def test_set_bucket_policy(): + policy = '''{ + "Version": "2012-10-17", + "Statement": [{ + "Effect": "Allow", + "Principal": "*" + }] +}''' + buckets, zone_bucket = create_bucket_per_zone_in_realm() + for _, bucket in zone_bucket: + bucket.set_policy(policy) + assert(bucket.get_policy().decode('ascii') == policy) + +@attr('bucket_sync_disable') +def test_bucket_sync_disable(): + zonegroup = realm.master_zonegroup() + zonegroup_conns = ZonegroupConns(zonegroup) + buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns) + zonegroup_meta_checkpoint(zonegroup) + + for bucket_name in buckets: + disable_bucket_sync(realm.meta_master_zone(), bucket_name) + + for zone in zonegroup.zones: + check_buckets_sync_status_obj_not_exist(zone, buckets) + + zonegroup_data_checkpoint(zonegroup_conns) + +@attr('bucket_sync_disable') +def test_bucket_sync_enable_right_after_disable(): + zonegroup = realm.master_zonegroup() + zonegroup_conns = ZonegroupConns(zonegroup) + buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns) + + objnames = ['obj1', 'obj2', 'obj3', 'obj4'] + content = 'asdasd' + + for zone, bucket in zone_bucket: + for objname in objnames: + k = new_key(zone, bucket.name, objname) + k.set_contents_from_string(content) + + zonegroup_meta_checkpoint(zonegroup) + + for bucket_name in buckets: + zonegroup_bucket_checkpoint(zonegroup_conns, bucket_name) + + for bucket_name in buckets: + disable_bucket_sync(realm.meta_master_zone(), bucket_name) + enable_bucket_sync(realm.meta_master_zone(), bucket_name) + + objnames_2 = ['obj5', 'obj6', 'obj7', 'obj8'] + + for zone, bucket in zone_bucket: + for objname in objnames_2: + k = new_key(zone, bucket.name, objname) + k.set_contents_from_string(content) + + for bucket_name in buckets: + zonegroup_bucket_checkpoint(zonegroup_conns, bucket_name) + + zonegroup_data_checkpoint(zonegroup_conns) + +@attr('bucket_sync_disable') +def test_bucket_sync_disable_enable(): + zonegroup = realm.master_zonegroup() + zonegroup_conns = ZonegroupConns(zonegroup) + buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns) + + objnames = [ 'obj1', 'obj2', 'obj3', 'obj4' ] + content = 'asdasd' + + for zone, bucket in zone_bucket: + for objname in objnames: + k = new_key(zone, bucket.name, objname) + k.set_contents_from_string(content) + + zonegroup_meta_checkpoint(zonegroup) + + for bucket_name in buckets: + zonegroup_bucket_checkpoint(zonegroup_conns, bucket_name) + + for bucket_name in buckets: + disable_bucket_sync(realm.meta_master_zone(), bucket_name) + + zonegroup_meta_checkpoint(zonegroup) + + objnames_2 = [ 'obj5', 'obj6', 'obj7', 'obj8' ] + + for zone, bucket in zone_bucket: + for objname in objnames_2: + k = new_key(zone, bucket.name, objname) + k.set_contents_from_string(content) + + for bucket_name in buckets: + enable_bucket_sync(realm.meta_master_zone(), bucket_name) + + for bucket_name in buckets: + zonegroup_bucket_checkpoint(zonegroup_conns, bucket_name) + + zonegroup_data_checkpoint(zonegroup_conns) + +def test_multipart_object_sync(): + zonegroup = realm.master_zonegroup() + zonegroup_conns = ZonegroupConns(zonegroup) + buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns) + + _, bucket = zone_bucket[0] + + # initiate a multipart upload + upload = bucket.initiate_multipart_upload('MULTIPART') + mp = boto.s3.multipart.MultiPartUpload(bucket) + mp.key_name = upload.key_name + mp.id = upload.id + part_size = 5 * 1024 * 1024 # 5M min part size + mp.upload_part_from_file(StringIO('a' * part_size), 1) + mp.upload_part_from_file(StringIO('b' * part_size), 2) + mp.upload_part_from_file(StringIO('c' * part_size), 3) + mp.upload_part_from_file(StringIO('d' * part_size), 4) + mp.complete_upload() + + zonegroup_meta_checkpoint(zonegroup) + zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name) + +def test_encrypted_object_sync(): + zonegroup = realm.master_zonegroup() + zonegroup_conns = ZonegroupConns(zonegroup) + + if len(zonegroup.rw_zones) < 2: + raise SkipTest("test_zonegroup_remove skipped. Requires 2 or more zones in master zonegroup.") + + (zone1, zone2) = zonegroup_conns.rw_zones[0:2] + + # create a bucket on the first zone + bucket_name = gen_bucket_name() + log.info('create bucket zone=%s name=%s', zone1.name, bucket_name) + bucket = zone1.conn.create_bucket(bucket_name) + + # upload an object with sse-c encryption + sse_c_headers = { + 'x-amz-server-side-encryption-customer-algorithm': 'AES256', + 'x-amz-server-side-encryption-customer-key': 'pO3upElrwuEXSoFwCfnZPdSsmt/xWeFa0N9KgDijwVs=', + 'x-amz-server-side-encryption-customer-key-md5': 'DWygnHRtgiJ77HCm+1rvHw==' + } + key = bucket.new_key('testobj-sse-c') + data = 'A'*512 + key.set_contents_from_string(data, headers=sse_c_headers) + + # upload an object with sse-kms encryption + sse_kms_headers = { + 'x-amz-server-side-encryption': 'aws:kms', + # testkey-1 must be present in 'rgw crypt s3 kms encryption keys' (vstart.sh adds this) + 'x-amz-server-side-encryption-aws-kms-key-id': 'testkey-1', + } + key = bucket.new_key('testobj-sse-kms') + key.set_contents_from_string(data, headers=sse_kms_headers) + + # wait for the bucket metadata and data to sync + zonegroup_meta_checkpoint(zonegroup) + zone_bucket_checkpoint(zone2.zone, zone1.zone, bucket_name) + + # read the encrypted objects from the second zone + bucket2 = get_bucket(zone2, bucket_name) + key = bucket2.get_key('testobj-sse-c', headers=sse_c_headers) + eq(data, key.get_contents_as_string(headers=sse_c_headers, encoding='ascii')) + + key = bucket2.get_key('testobj-sse-kms') + eq(data, key.get_contents_as_string(encoding='ascii')) + +def test_bucket_index_log_trim(): + zonegroup = realm.master_zonegroup() + zonegroup_conns = ZonegroupConns(zonegroup) + + zone = zonegroup_conns.rw_zones[0] + + # create a test bucket, upload some objects, and wait for sync + def make_test_bucket(): + name = gen_bucket_name() + log.info('create bucket zone=%s name=%s', zone.name, name) + bucket = zone.conn.create_bucket(name) + for objname in ('a', 'b', 'c', 'd'): + k = new_key(zone, name, objname) + k.set_contents_from_string('foo') + zonegroup_meta_checkpoint(zonegroup) + zonegroup_bucket_checkpoint(zonegroup_conns, name) + return bucket + + # create a 'cold' bucket + cold_bucket = make_test_bucket() + + # trim with max-buckets=0 to clear counters for cold bucket. this should + # prevent it from being considered 'active' by the next autotrim + bilog_autotrim(zone.zone, [ + '--rgw-sync-log-trim-max-buckets', '0', + ]) + + # create an 'active' bucket + active_bucket = make_test_bucket() + + # trim with max-buckets=1 min-cold-buckets=0 to trim active bucket only + bilog_autotrim(zone.zone, [ + '--rgw-sync-log-trim-max-buckets', '1', + '--rgw-sync-log-trim-min-cold-buckets', '0', + ]) + + # verify active bucket has empty bilog + active_bilog = bilog_list(zone.zone, active_bucket.name) + assert(len(active_bilog) == 0) + + # verify cold bucket has nonempty bilog + cold_bilog = bilog_list(zone.zone, cold_bucket.name) + assert(len(cold_bilog) > 0) + + # trim with min-cold-buckets=999 to trim all buckets + bilog_autotrim(zone.zone, [ + '--rgw-sync-log-trim-max-buckets', '999', + '--rgw-sync-log-trim-min-cold-buckets', '999', + ]) + + # verify cold bucket has empty bilog + cold_bilog = bilog_list(zone.zone, cold_bucket.name) + assert(len(cold_bilog) == 0) + +def test_bucket_reshard_index_log_trim(): + zonegroup = realm.master_zonegroup() + zonegroup_conns = ZonegroupConns(zonegroup) + + zone = zonegroup_conns.rw_zones[0] + + # create a test bucket, upload some objects, and wait for sync + def make_test_bucket(): + name = gen_bucket_name() + log.info('create bucket zone=%s name=%s', zone.name, name) + bucket = zone.conn.create_bucket(name) + for objname in ('a', 'b', 'c', 'd'): + k = new_key(zone, name, objname) + k.set_contents_from_string('foo') + zonegroup_meta_checkpoint(zonegroup) + zonegroup_bucket_checkpoint(zonegroup_conns, name) + return bucket + + # create a 'test' bucket + test_bucket = make_test_bucket() + + # checking bucket layout before resharding + json_obj_1 = bucket_layout(zone.zone, test_bucket.name) + assert(len(json_obj_1['layout']['logs']) == 1) + + first_gen = json_obj_1['layout']['current_index']['gen'] + + before_reshard_bilog = bilog_list(zone.zone, test_bucket.name, ['--gen', str(first_gen)]) + assert(len(before_reshard_bilog) == 4) + + # Resharding the bucket + zone.zone.cluster.admin(['bucket', 'reshard', + '--bucket', test_bucket.name, + '--num-shards', '3', + '--yes-i-really-mean-it']) + + # checking bucket layout after 1st resharding + json_obj_2 = bucket_layout(zone.zone, test_bucket.name) + assert(len(json_obj_2['layout']['logs']) == 2) + + second_gen = json_obj_2['layout']['current_index']['gen'] + + after_reshard_bilog = bilog_list(zone.zone, test_bucket.name, ['--gen', str(second_gen)]) + assert(len(after_reshard_bilog) == 0) + + # upload more objects + for objname in ('e', 'f', 'g', 'h'): + k = new_key(zone, test_bucket.name, objname) + k.set_contents_from_string('foo') + zonegroup_bucket_checkpoint(zonegroup_conns, test_bucket.name) + + # Resharding the bucket again + zone.zone.cluster.admin(['bucket', 'reshard', + '--bucket', test_bucket.name, + '--num-shards', '3', + '--yes-i-really-mean-it']) + + # checking bucket layout after 2nd resharding + json_obj_3 = bucket_layout(zone.zone, test_bucket.name) + assert(len(json_obj_3['layout']['logs']) == 3) + + zonegroup_bucket_checkpoint(zonegroup_conns, test_bucket.name) + + bilog_autotrim(zone.zone) + + # checking bucket layout after 1st bilog autotrim + json_obj_4 = bucket_layout(zone.zone, test_bucket.name) + assert(len(json_obj_4['layout']['logs']) == 2) + + bilog_autotrim(zone.zone) + + # checking bucket layout after 2nd bilog autotrim + json_obj_5 = bucket_layout(zone.zone, test_bucket.name) + assert(len(json_obj_5['layout']['logs']) == 1) + + bilog_autotrim(zone.zone) + + # upload more objects + for objname in ('i', 'j', 'k', 'l'): + k = new_key(zone, test_bucket.name, objname) + k.set_contents_from_string('foo') + zonegroup_bucket_checkpoint(zonegroup_conns, test_bucket.name) + + # verify the bucket has non-empty bilog + test_bilog = bilog_list(zone.zone, test_bucket.name) + assert(len(test_bilog) > 0) + +@attr('bucket_reshard') +def test_bucket_reshard_incremental(): + zonegroup = realm.master_zonegroup() + zonegroup_conns = ZonegroupConns(zonegroup) + zone = zonegroup_conns.rw_zones[0] + + # create a bucket + bucket = zone.create_bucket(gen_bucket_name()) + log.debug('created bucket=%s', bucket.name) + zonegroup_meta_checkpoint(zonegroup) + + # upload some objects + for objname in ('a', 'b', 'c', 'd'): + k = new_key(zone, bucket.name, objname) + k.set_contents_from_string('foo') + zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name) + + # reshard in each zone + for z in zonegroup_conns.rw_zones: + z.zone.cluster.admin(['bucket', 'reshard', + '--bucket', bucket.name, + '--num-shards', '3', + '--yes-i-really-mean-it']) + + # upload more objects + for objname in ('e', 'f', 'g', 'h'): + k = new_key(zone, bucket.name, objname) + k.set_contents_from_string('foo') + zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name) + +@attr('bucket_reshard') +def test_bucket_reshard_full(): + zonegroup = realm.master_zonegroup() + zonegroup_conns = ZonegroupConns(zonegroup) + zone = zonegroup_conns.rw_zones[0] + + # create a bucket + bucket = zone.create_bucket(gen_bucket_name()) + log.debug('created bucket=%s', bucket.name) + zonegroup_meta_checkpoint(zonegroup) + + # stop gateways in other zones so we can force the bucket to full sync + for z in zonegroup_conns.rw_zones[1:]: + z.zone.stop() + + # use try-finally to restart gateways even if something fails + try: + # upload some objects + for objname in ('a', 'b', 'c', 'd'): + k = new_key(zone, bucket.name, objname) + k.set_contents_from_string('foo') + + # reshard on first zone + zone.zone.cluster.admin(['bucket', 'reshard', + '--bucket', bucket.name, + '--num-shards', '3', + '--yes-i-really-mean-it']) + + # upload more objects + for objname in ('e', 'f', 'g', 'h'): + k = new_key(zone, bucket.name, objname) + k.set_contents_from_string('foo') + finally: + for z in zonegroup_conns.rw_zones[1:]: + z.zone.start() + + zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name) + +def test_bucket_creation_time(): + zonegroup = realm.master_zonegroup() + zonegroup_conns = ZonegroupConns(zonegroup) + + zonegroup_meta_checkpoint(zonegroup) + + zone_buckets = [zone.get_connection().get_all_buckets() for zone in zonegroup_conns.rw_zones] + for z1, z2 in combinations(zone_buckets, 2): + for a, b in zip(z1, z2): + eq(a.name, b.name) + eq(a.creation_date, b.creation_date) + +def get_bucket_shard_objects(zone, num_shards): + """ + Get one object for each shard of the bucket index log + """ + cmd = ['bucket', 'shard', 'objects'] + zone.zone_args() + cmd += ['--num-shards', str(num_shards)] + shardobjs_json, ret = zone.cluster.admin(cmd, read_only=True) + assert ret == 0 + shardobjs = json.loads(shardobjs_json) + return shardobjs['objs'] + +def write_most_shards(zone, bucket_name, num_shards): + """ + Write one object to most (but not all) bucket index shards. + """ + objs = get_bucket_shard_objects(zone.zone, num_shards) + random.shuffle(objs) + del objs[-(len(objs)//10):] + for obj in objs: + k = new_key(zone, bucket_name, obj) + k.set_contents_from_string('foo') + +def reshard_bucket(zone, bucket_name, num_shards): + """ + Reshard a bucket + """ + cmd = ['bucket', 'reshard'] + zone.zone_args() + cmd += ['--bucket', bucket_name] + cmd += ['--num-shards', str(num_shards)] + cmd += ['--yes-i-really-mean-it'] + zone.cluster.admin(cmd) + +def get_obj_names(zone, bucket_name, maxobjs): + """ + Get names of objects in a bucket. + """ + cmd = ['bucket', 'list'] + zone.zone_args() + cmd += ['--bucket', bucket_name] + cmd += ['--max-entries', str(maxobjs)] + objs_json, _ = zone.cluster.admin(cmd, read_only=True) + objs = json.loads(objs_json) + return [o['name'] for o in objs] + +def bucket_keys_eq(zone1, zone2, bucket_name): + """ + Ensure that two buckets have the same keys, but get the lists through + radosgw-admin rather than S3 so it can be used when radosgw isn't running. + Only works for buckets of 10,000 objects since the tests calling it don't + need more, and the output from bucket list doesn't have an obvious marker + with which to continue. + """ + keys1 = get_obj_names(zone1, bucket_name, 10000) + keys2 = get_obj_names(zone2, bucket_name, 10000) + for key1, key2 in zip_longest(keys1, keys2): + if key1 is None: + log.critical('key=%s is missing from zone=%s', key1.name, + zone1.name) + assert False + if key2 is None: + log.critical('key=%s is missing from zone=%s', key2.name, + zone2.name) + assert False + +@attr('bucket_reshard') +def test_bucket_sync_run_basic_incremental(): + """ + Create several generations of objects, then run bucket sync + run to ensure they're all processed. + """ + zonegroup = realm.master_zonegroup() + zonegroup_conns = ZonegroupConns(zonegroup) + primary = zonegroup_conns.rw_zones[0] + + # create a bucket write objects to it and wait for them to sync, ensuring + # we are in incremental. + bucket = primary.create_bucket(gen_bucket_name()) + log.debug('created bucket=%s', bucket.name) + zonegroup_meta_checkpoint(zonegroup) + write_most_shards(primary, bucket.name, 11) + zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name) + + try: + # stop gateways in other zones so we can rely on bucket sync run + for secondary in zonegroup_conns.rw_zones[1:]: + secondary.zone.stop() + + # build up multiple generations each with some objects written to + # them. + generations = [17, 19, 23, 29, 31, 37] + for num_shards in generations: + reshard_bucket(primary.zone, bucket.name, num_shards) + write_most_shards(primary, bucket.name, num_shards) + + # bucket sync run on every secondary + for secondary in zonegroup_conns.rw_zones[1:]: + cmd = ['bucket', 'sync', 'run'] + secondary.zone.zone_args() + cmd += ['--bucket', bucket.name, '--source-zone', primary.name] + secondary.zone.cluster.admin(cmd) + + bucket_keys_eq(primary.zone, secondary.zone, bucket.name) + + finally: + # Restart so bucket_checkpoint can actually fetch things from the + # secondaries. Put this in a finally block so they restart even on + # error. + for secondary in zonegroup_conns.rw_zones[1:]: + secondary.zone.start() + + zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name) + +def trash_bucket(zone, bucket_name): + """ + Remove objects through radosgw-admin, zapping bilog to prevent the deletes + from replicating. + """ + objs = get_obj_names(zone, bucket_name, 10000) + # Delete the objects + for obj in objs: + cmd = ['object', 'rm'] + zone.zone_args() + cmd += ['--bucket', bucket_name] + cmd += ['--object', obj] + zone.cluster.admin(cmd) + + # Zap the bilog + cmd = ['bilog', 'trim'] + zone.zone_args() + cmd += ['--bucket', bucket_name] + zone.cluster.admin(cmd) + +@attr('bucket_reshard') +def test_zap_init_bucket_sync_run(): + """ + Create several generations of objects, trash them, then run bucket sync init + and bucket sync run. + """ + zonegroup = realm.master_zonegroup() + zonegroup_conns = ZonegroupConns(zonegroup) + primary = zonegroup_conns.rw_zones[0] + + bucket = primary.create_bucket(gen_bucket_name()) + log.debug('created bucket=%s', bucket.name) + zonegroup_meta_checkpoint(zonegroup) + + # Write zeroth generation + for obj in range(1, 6): + k = new_key(primary, bucket.name, f'obj{obj * 11}') + k.set_contents_from_string('foo') + zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name) + + # Write several more generations + generations = [17, 19, 23, 29, 31, 37] + for num_shards in generations: + reshard_bucket(primary.zone, bucket.name, num_shards) + for obj in range(1, 6): + k = new_key(primary, bucket.name, f'obj{obj * num_shards}') + k.set_contents_from_string('foo') + zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name) + + + # Stop gateways, trash bucket, init, sync, and restart for every secondary + for secondary in zonegroup_conns.rw_zones[1:]: + try: + secondary.zone.stop() + + trash_bucket(secondary.zone, bucket.name) + + cmd = ['bucket', 'sync', 'init'] + secondary.zone.zone_args() + cmd += ['--bucket', bucket.name] + cmd += ['--source-zone', primary.name] + secondary.zone.cluster.admin(cmd) + + cmd = ['bucket', 'sync', 'run'] + secondary.zone.zone_args() + cmd += ['--bucket', bucket.name, '--source-zone', primary.name] + secondary.zone.cluster.admin(cmd) + + bucket_keys_eq(primary.zone, secondary.zone, bucket.name) + + finally: + # Do this as a finally so we bring the zone back up even on error. + secondary.zone.start() + + zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name) + +def test_role_sync(): + zonegroup = realm.master_zonegroup() + zonegroup_conns = ZonegroupConns(zonegroup) + roles, zone_role = create_role_per_zone(zonegroup_conns) + + zonegroup_meta_checkpoint(zonegroup) + + for source_conn, role in zone_role: + for target_conn in zonegroup_conns.zones: + if source_conn.zone == target_conn.zone: + continue + + check_role_eq(source_conn, target_conn, role) + +@attr('data_sync_init') +def test_bucket_full_sync_after_data_sync_init(): + zonegroup = realm.master_zonegroup() + zonegroup_conns = ZonegroupConns(zonegroup) + primary = zonegroup_conns.rw_zones[0] + secondary = zonegroup_conns.rw_zones[1] + + bucket = primary.create_bucket(gen_bucket_name()) + log.debug('created bucket=%s', bucket.name) + zonegroup_meta_checkpoint(zonegroup) + + try: + # stop secondary zone before it starts a bucket full sync + secondary.zone.stop() + + # write some objects that don't sync yet + for obj in range(1, 6): + k = new_key(primary, bucket.name, f'obj{obj * 11}') + k.set_contents_from_string('foo') + + cmd = ['data', 'sync', 'init'] + secondary.zone.zone_args() + cmd += ['--source-zone', primary.name] + secondary.zone.cluster.admin(cmd) + finally: + # Do this as a finally so we bring the zone back up even on error. + secondary.zone.start() + + # expect all objects to replicate via 'bucket full sync' + zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name) + zonegroup_data_checkpoint(zonegroup_conns) + +@attr('data_sync_init') +@attr('bucket_reshard') +def test_resharded_bucket_full_sync_after_data_sync_init(): + zonegroup = realm.master_zonegroup() + zonegroup_conns = ZonegroupConns(zonegroup) + primary = zonegroup_conns.rw_zones[0] + secondary = zonegroup_conns.rw_zones[1] + + bucket = primary.create_bucket(gen_bucket_name()) + log.debug('created bucket=%s', bucket.name) + zonegroup_meta_checkpoint(zonegroup) + + try: + # stop secondary zone before it starts a bucket full sync + secondary.zone.stop() + + # Write zeroth generation + for obj in range(1, 6): + k = new_key(primary, bucket.name, f'obj{obj * 11}') + k.set_contents_from_string('foo') + + # Write several more generations + generations = [17, 19, 23, 29, 31, 37] + for num_shards in generations: + reshard_bucket(primary.zone, bucket.name, num_shards) + for obj in range(1, 6): + k = new_key(primary, bucket.name, f'obj{obj * num_shards}') + k.set_contents_from_string('foo') + + cmd = ['data', 'sync', 'init'] + secondary.zone.zone_args() + cmd += ['--source-zone', primary.name] + secondary.zone.cluster.admin(cmd) + finally: + # Do this as a finally so we bring the zone back up even on error. + secondary.zone.start() + + # expect all objects to replicate via 'bucket full sync' + zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name) + zonegroup_data_checkpoint(zonegroup_conns) + +@attr('data_sync_init') +def test_bucket_incremental_sync_after_data_sync_init(): + zonegroup = realm.master_zonegroup() + zonegroup_conns = ZonegroupConns(zonegroup) + primary = zonegroup_conns.rw_zones[0] + secondary = zonegroup_conns.rw_zones[1] + + bucket = primary.create_bucket(gen_bucket_name()) + log.debug('created bucket=%s', bucket.name) + zonegroup_meta_checkpoint(zonegroup) + + # upload a dummy object and wait for sync. this forces each zone to finish + # a full sync and switch to incremental + k = new_key(primary, bucket, 'dummy') + k.set_contents_from_string('foo') + zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name) + + try: + # stop secondary zone before it syncs the rest + secondary.zone.stop() + + # Write more objects to primary + for obj in range(1, 6): + k = new_key(primary, bucket.name, f'obj{obj * 11}') + k.set_contents_from_string('foo') + + cmd = ['data', 'sync', 'init'] + secondary.zone.zone_args() + cmd += ['--source-zone', primary.name] + secondary.zone.cluster.admin(cmd) + finally: + # Do this as a finally so we bring the zone back up even on error. + secondary.zone.start() + + # expect remaining objects to replicate via 'bucket incremental sync' + zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name) + zonegroup_data_checkpoint(zonegroup_conns) + +@attr('data_sync_init') +@attr('bucket_reshard') +def test_resharded_bucket_incremental_sync_latest_after_data_sync_init(): + zonegroup = realm.master_zonegroup() + zonegroup_conns = ZonegroupConns(zonegroup) + primary = zonegroup_conns.rw_zones[0] + secondary = zonegroup_conns.rw_zones[1] + + bucket = primary.create_bucket(gen_bucket_name()) + log.debug('created bucket=%s', bucket.name) + zonegroup_meta_checkpoint(zonegroup) + + # Write zeroth generation to primary + for obj in range(1, 6): + k = new_key(primary, bucket.name, f'obj{obj * 11}') + k.set_contents_from_string('foo') + + # Write several more generations + generations = [17, 19, 23, 29, 31, 37] + for num_shards in generations: + reshard_bucket(primary.zone, bucket.name, num_shards) + for obj in range(1, 6): + k = new_key(primary, bucket.name, f'obj{obj * num_shards}') + k.set_contents_from_string('foo') + + # wait for the secondary to catch up to the latest gen + zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name) + + try: + # stop secondary zone before it syncs the rest + secondary.zone.stop() + + # write some more objects to the last gen + for obj in range(1, 6): + k = new_key(primary, bucket.name, f'obj{obj * generations[-1]}') + k.set_contents_from_string('foo') + + cmd = ['data', 'sync', 'init'] + secondary.zone.zone_args() + cmd += ['--source-zone', primary.name] + secondary.zone.cluster.admin(cmd) + finally: + # Do this as a finally so we bring the zone back up even on error. + secondary.zone.start() + + # expect remaining objects in last gen to replicate via 'bucket incremental sync' + zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name) + zonegroup_data_checkpoint(zonegroup_conns) + +@attr('data_sync_init') +@attr('bucket_reshard') +def test_resharded_bucket_incremental_sync_oldest_after_data_sync_init(): + zonegroup = realm.master_zonegroup() + zonegroup_conns = ZonegroupConns(zonegroup) + primary = zonegroup_conns.rw_zones[0] + secondary = zonegroup_conns.rw_zones[1] + + bucket = primary.create_bucket(gen_bucket_name()) + log.debug('created bucket=%s', bucket.name) + zonegroup_meta_checkpoint(zonegroup) + + # Write zeroth generation to primary + for obj in range(1, 6): + k = new_key(primary, bucket.name, f'obj{obj * 11}') + k.set_contents_from_string('foo') + + # wait for the secondary to catch up + zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name) + + try: + # stop secondary zone before it syncs later generations + secondary.zone.stop() + + # Write several more generations + generations = [17, 19, 23, 29, 31, 37] + for num_shards in generations: + reshard_bucket(primary.zone, bucket.name, num_shards) + for obj in range(1, 6): + k = new_key(primary, bucket.name, f'obj{obj * num_shards}') + k.set_contents_from_string('foo') + + cmd = ['data', 'sync', 'init'] + secondary.zone.zone_args() + cmd += ['--source-zone', primary.name] + secondary.zone.cluster.admin(cmd) + finally: + # Do this as a finally so we bring the zone back up even on error. + secondary.zone.start() + + # expect all generations to replicate via 'bucket incremental sync' + zonegroup_bucket_checkpoint(zonegroup_conns, bucket.name) + zonegroup_data_checkpoint(zonegroup_conns) + +def sync_info(cluster, bucket = None): + cmd = ['sync', 'info'] + if bucket: + cmd += ['--bucket', bucket] + (result_json, retcode) = cluster.admin(cmd) + if retcode != 0: + assert False, 'failed to get sync policy' + + return json.loads(result_json) + +def get_sync_policy(cluster, bucket = None): + cmd = ['sync', 'policy', 'get'] + if bucket: + cmd += ['--bucket', bucket] + (result_json, retcode) = cluster.admin(cmd) + if retcode != 0: + assert False, 'failed to get sync policy' + + return json.loads(result_json) + +def create_sync_policy_group(cluster, group, status = "allowed", bucket = None): + cmd = ['sync', 'group', 'create', '--group-id', group, '--status' , status] + if bucket: + cmd += ['--bucket', bucket] + (result_json, retcode) = cluster.admin(cmd) + if retcode != 0: + assert False, 'failed to create sync policy group id=%s, bucket=%s' % (group, bucket) + return json.loads(result_json) + +def set_sync_policy_group_status(cluster, group, status, bucket = None): + cmd = ['sync', 'group', 'modify', '--group-id', group, '--status' , status] + if bucket: + cmd += ['--bucket', bucket] + (result_json, retcode) = cluster.admin(cmd) + if retcode != 0: + assert False, 'failed to set sync policy group id=%s, bucket=%s' % (group, bucket) + return json.loads(result_json) + +def get_sync_policy_group(cluster, group, bucket = None): + cmd = ['sync', 'group', 'get', '--group-id', group] + if bucket: + cmd += ['--bucket', bucket] + (result_json, retcode) = cluster.admin(cmd) + if retcode != 0: + assert False, 'failed to get sync policy group id=%s, bucket=%s' % (group, bucket) + return json.loads(result_json) + +def remove_sync_policy_group(cluster, group, bucket = None): + cmd = ['sync', 'group', 'remove', '--group-id', group] + if bucket: + cmd += ['--bucket', bucket] + (result_json, retcode) = cluster.admin(cmd) + if retcode != 0: + assert False, 'failed to remove sync policy group id=%s, bucket=%s' % (group, bucket) + return json.loads(result_json) + +def create_sync_group_flow_symmetrical(cluster, group, flow_id, zones, bucket = None): + cmd = ['sync', 'group', 'flow', 'create', '--group-id', group, '--flow-id' , flow_id, '--flow-type', 'symmetrical', '--zones=%s' % zones] + if bucket: + cmd += ['--bucket', bucket] + (result_json, retcode) = cluster.admin(cmd) + if retcode != 0: + assert False, 'failed to create sync group flow symmetrical groupid=%s, flow_id=%s, zones=%s, bucket=%s' % (group, flow_id, zones, bucket) + return json.loads(result_json) + +def create_sync_group_flow_directional(cluster, group, flow_id, src_zones, dest_zones, bucket = None): + cmd = ['sync', 'group', 'flow', 'create', '--group-id', group, '--flow-id' , flow_id, '--flow-type', 'directional', '--source-zone=%s' % src_zones, '--dest-zone=%s' % dest_zones] + if bucket: + cmd += ['--bucket', bucket] + (result_json, retcode) = cluster.admin(cmd) + if retcode != 0: + assert False, 'failed to create sync group flow directional groupid=%s, flow_id=%s, src_zones=%s, dest_zones=%s, bucket=%s' % (group, flow_id, src_zones, dest_zones, bucket) + return json.loads(result_json) + +def remove_sync_group_flow_symmetrical(cluster, group, flow_id, zones = None, bucket = None): + cmd = ['sync', 'group', 'flow', 'remove', '--group-id', group, '--flow-id' , flow_id, '--flow-type', 'symmetrical'] + if zones: + cmd += ['--zones=%s' % zones] + if bucket: + cmd += ['--bucket', bucket] + (result_json, retcode) = cluster.admin(cmd) + if retcode != 0: + assert False, 'failed to remove sync group flow symmetrical groupid=%s, flow_id=%s, zones=%s, bucket=%s' % (group, flow_id, zones, bucket) + return json.loads(result_json) + +def remove_sync_group_flow_directional(cluster, group, flow_id, src_zones, dest_zones, bucket = None): + cmd = ['sync', 'group', 'flow', 'remove', '--group-id', group, '--flow-id' , flow_id, '--flow-type', 'directional', '--source-zone=%s' % src_zones, '--dest-zone=%s' % dest_zones] + if bucket: + cmd += ['--bucket', bucket] + (result_json, retcode) = cluster.admin(cmd) + if retcode != 0: + assert False, 'failed to remove sync group flow directional groupid=%s, flow_id=%s, src_zones=%s, dest_zones=%s, bucket=%s' % (group, flow_id, src_zones, dest_zones, bucket) + return json.loads(result_json) + +def create_sync_group_pipe(cluster, group, pipe_id, src_zones, dest_zones, bucket = None, args = []): + cmd = ['sync', 'group', 'pipe', 'create', '--group-id', group, '--pipe-id' , pipe_id, '--source-zones=%s' % src_zones, '--dest-zones=%s' % dest_zones] + if bucket: + b_args = '--bucket=' + bucket + cmd.append(b_args) + if args: + cmd += args + (result_json, retcode) = cluster.admin(cmd) + if retcode != 0: + assert False, 'failed to create sync group pipe groupid=%s, pipe_id=%s, src_zones=%s, dest_zones=%s, bucket=%s' % (group, pipe_id, src_zones, dest_zones, bucket) + return json.loads(result_json) + +def remove_sync_group_pipe(cluster, group, pipe_id, bucket = None, args = None): + cmd = ['sync', 'group', 'pipe', 'remove', '--group-id', group, '--pipe-id' , pipe_id] + if bucket: + b_args = '--bucket=' + bucket + cmd.append(b_args) + if args: + cmd.append(args) + (result_json, retcode) = cluster.admin(cmd) + if retcode != 0: + assert False, 'failed to remove sync group pipe groupid=%s, pipe_id=%s, src_zones=%s, dest_zones=%s, bucket=%s' % (group, pipe_id, src_zones, dest_zones, bucket) + return json.loads(result_json) + +def create_zone_bucket(zone): + b_name = gen_bucket_name() + log.info('create bucket zone=%s name=%s', zone.name, b_name) + bucket = zone.create_bucket(b_name) + return bucket + +def create_object(zone_conn, bucket, objname, content): + k = new_key(zone_conn, bucket.name, objname) + k.set_contents_from_string(content) + +def create_objects(zone_conn, bucket, obj_arr, content): + for objname in obj_arr: + create_object(zone_conn, bucket, objname, content) + +def check_object_exists(bucket, objname, content = None): + k = bucket.get_key(objname) + assert_not_equal(k, None) + if (content != None): + assert_equal(k.get_contents_as_string(encoding='ascii'), content) + +def check_objects_exist(bucket, obj_arr, content = None): + for objname in obj_arr: + check_object_exists(bucket, objname, content) + +def check_object_not_exists(bucket, objname): + k = bucket.get_key(objname) + assert_equal(k, None) + +def check_objects_not_exist(bucket, obj_arr): + for objname in obj_arr: + check_object_not_exists(bucket, objname) + +@attr('sync_policy') +def test_sync_policy_config_zonegroup(): + """ + test_sync_policy_config_zonegroup: + test configuration of all sync commands + """ + zonegroup = realm.master_zonegroup() + zonegroup_meta_checkpoint(zonegroup) + + zonegroup_conns = ZonegroupConns(zonegroup) + z1, z2 = zonegroup.zones[0:2] + c1, c2 = (z1.cluster, z2.cluster) + + zones = z1.name+","+z2.name + + c1.admin(['sync', 'policy', 'get']) + + # (a) zonegroup level + create_sync_policy_group(c1, "sync-group") + set_sync_policy_group_status(c1, "sync-group", "enabled") + get_sync_policy_group(c1, "sync-group") + + get_sync_policy(c1) + + create_sync_group_flow_symmetrical(c1, "sync-group", "sync-flow1", zones) + create_sync_group_flow_directional(c1, "sync-group", "sync-flow2", z1.name, z2.name) + + create_sync_group_pipe(c1, "sync-group", "sync-pipe", zones, zones) + get_sync_policy_group(c1, "sync-group") + + zonegroup.period.update(z1, commit=True) + + # (b) bucket level + zc1, zc2 = zonegroup_conns.zones[0:2] + bucket = create_zone_bucket(zc1) + bucket_name = bucket.name + + create_sync_policy_group(c1, "sync-bucket", "allowed", bucket_name) + set_sync_policy_group_status(c1, "sync-bucket", "enabled", bucket_name) + get_sync_policy_group(c1, "sync-bucket", bucket_name) + + get_sync_policy(c1, bucket_name) + + create_sync_group_flow_symmetrical(c1, "sync-bucket", "sync-flow1", zones, bucket_name) + create_sync_group_flow_directional(c1, "sync-bucket", "sync-flow2", z1.name, z2.name, bucket_name) + + create_sync_group_pipe(c1, "sync-bucket", "sync-pipe", zones, zones, bucket_name) + get_sync_policy_group(c1, "sync-bucket", bucket_name) + + zonegroup_meta_checkpoint(zonegroup) + + remove_sync_group_pipe(c1, "sync-bucket", "sync-pipe", bucket_name) + remove_sync_group_flow_directional(c1, "sync-bucket", "sync-flow2", z1.name, z2.name, bucket_name) + remove_sync_group_flow_symmetrical(c1, "sync-bucket", "sync-flow1", zones, bucket_name) + remove_sync_policy_group(c1, "sync-bucket", bucket_name) + + get_sync_policy(c1, bucket_name) + + zonegroup_meta_checkpoint(zonegroup) + + remove_sync_group_pipe(c1, "sync-group", "sync-pipe") + remove_sync_group_flow_directional(c1, "sync-group", "sync-flow2", z1.name, z2.name) + remove_sync_group_flow_symmetrical(c1, "sync-group", "sync-flow1") + remove_sync_policy_group(c1, "sync-group") + + get_sync_policy(c1) + + zonegroup.period.update(z1, commit=True) + + return + +@attr('sync_policy') +def test_sync_flow_symmetrical_zonegroup_all(): + """ + test_sync_flow_symmetrical_zonegroup_all: + allows sync from all the zones to all other zones (default case) + """ + + zonegroup = realm.master_zonegroup() + zonegroup_meta_checkpoint(zonegroup) + + zonegroup_conns = ZonegroupConns(zonegroup) + + (zoneA, zoneB) = zonegroup.zones[0:2] + (zcA, zcB) = zonegroup_conns.zones[0:2] + + c1 = zoneA.cluster + + c1.admin(['sync', 'policy', 'get']) + + zones = zoneA.name + ',' + zoneB.name + create_sync_policy_group(c1, "sync-group") + create_sync_group_flow_symmetrical(c1, "sync-group", "sync-flow1", zones) + create_sync_group_pipe(c1, "sync-group", "sync-pipe", zones, zones) + set_sync_policy_group_status(c1, "sync-group", "enabled") + + zonegroup.period.update(zoneA, commit=True) + get_sync_policy(c1) + + objnames = [ 'obj1', 'obj2' ] + content = 'asdasd' + buckets = [] + + # create bucket & object in all zones + bucketA = create_zone_bucket(zcA) + buckets.append(bucketA) + create_object(zcA, bucketA, objnames[0], content) + + bucketB = create_zone_bucket(zcB) + buckets.append(bucketB) + create_object(zcB, bucketB, objnames[1], content) + + zonegroup_meta_checkpoint(zonegroup) + # 'zonegroup_data_checkpoint' currently fails for the zones not + # allowed to sync. So as a workaround, data checkpoint is done + # for only the ones configured. + zone_data_checkpoint(zoneB, zoneA) + + # verify if objects are synced accross the zone + bucket = get_bucket(zcB, bucketA.name) + check_object_exists(bucket, objnames[0], content) + + bucket = get_bucket(zcA, bucketB.name) + check_object_exists(bucket, objnames[1], content) + + remove_sync_policy_group(c1, "sync-group") + return + +@attr('sync_policy') +def test_sync_flow_symmetrical_zonegroup_select(): + """ + test_sync_flow_symmetrical_zonegroup_select: + allow sync between zoneA & zoneB + verify zoneC doesnt sync the data + """ + + zonegroup = realm.master_zonegroup() + zonegroup_conns = ZonegroupConns(zonegroup) + + if len(zonegroup.zones) < 3: + raise SkipTest("test_sync_flow_symmetrical_zonegroup_select skipped. Requires 3 or more zones in master zonegroup.") + + zonegroup_meta_checkpoint(zonegroup) + + (zoneA, zoneB, zoneC) = zonegroup.zones[0:3] + (zcA, zcB, zcC) = zonegroup_conns.zones[0:3] + + c1 = zoneA.cluster + + # configure sync policy + zones = zoneA.name + ',' + zoneB.name + c1.admin(['sync', 'policy', 'get']) + create_sync_policy_group(c1, "sync-group") + create_sync_group_flow_symmetrical(c1, "sync-group", "sync-flow", zones) + create_sync_group_pipe(c1, "sync-group", "sync-pipe", zones, zones) + set_sync_policy_group_status(c1, "sync-group", "enabled") + + zonegroup.period.update(zoneA, commit=True) + get_sync_policy(c1) + + buckets = [] + content = 'asdasd' + + # create bucketA & objects in zoneA + objnamesA = [ 'obj1', 'obj2', 'obj3' ] + bucketA = create_zone_bucket(zcA) + buckets.append(bucketA) + create_objects(zcA, bucketA, objnamesA, content) + + # create bucketB & objects in zoneB + objnamesB = [ 'obj4', 'obj5', 'obj6' ] + bucketB = create_zone_bucket(zcB) + buckets.append(bucketB) + create_objects(zcB, bucketB, objnamesB, content) + + zonegroup_meta_checkpoint(zonegroup) + zone_data_checkpoint(zoneB, zoneA) + zone_data_checkpoint(zoneA, zoneB) + + # verify if objnamesA synced to only zoneB but not zoneC + bucket = get_bucket(zcB, bucketA.name) + check_objects_exist(bucket, objnamesA, content) + + bucket = get_bucket(zcC, bucketA.name) + check_objects_not_exist(bucket, objnamesA) + + # verify if objnamesB synced to only zoneA but not zoneC + bucket = get_bucket(zcA, bucketB.name) + check_objects_exist(bucket, objnamesB, content) + + bucket = get_bucket(zcC, bucketB.name) + check_objects_not_exist(bucket, objnamesB) + + remove_sync_policy_group(c1, "sync-group") + return + +@attr('sync_policy') +def test_sync_flow_directional_zonegroup_select(): + """ + test_sync_flow_directional_zonegroup_select: + allow sync from only zoneA to zoneB + + verify that data doesn't get synced to zoneC and + zoneA shouldn't sync data from zoneB either + """ + + zonegroup = realm.master_zonegroup() + zonegroup_conns = ZonegroupConns(zonegroup) + + if len(zonegroup.zones) < 3: + raise SkipTest("test_sync_flow_symmetrical_zonegroup_select skipped. Requires 3 or more zones in master zonegroup.") + + zonegroup_meta_checkpoint(zonegroup) + + (zoneA, zoneB, zoneC) = zonegroup.zones[0:3] + (zcA, zcB, zcC) = zonegroup_conns.zones[0:3] + + c1 = zoneA.cluster + + # configure sync policy + zones = zoneA.name + ',' + zoneB.name + c1.admin(['sync', 'policy', 'get']) + create_sync_policy_group(c1, "sync-group") + create_sync_group_flow_directional(c1, "sync-group", "sync-flow", zoneA.name, zoneB.name) + create_sync_group_pipe(c1, "sync-group", "sync-pipe", zoneA.name, zoneB.name) + set_sync_policy_group_status(c1, "sync-group", "enabled") + + zonegroup.period.update(zoneA, commit=True) + get_sync_policy(c1) + + buckets = [] + content = 'asdasd' + + # create bucketA & objects in zoneA + objnamesA = [ 'obj1', 'obj2', 'obj3' ] + bucketA = create_zone_bucket(zcA) + buckets.append(bucketA) + create_objects(zcA, bucketA, objnamesA, content) + + # create bucketB & objects in zoneB + objnamesB = [ 'obj4', 'obj5', 'obj6' ] + bucketB = create_zone_bucket(zcB) + buckets.append(bucketB) + create_objects(zcB, bucketB, objnamesB, content) + + zonegroup_meta_checkpoint(zonegroup) + zone_data_checkpoint(zoneB, zoneA) + + # verify if objnamesA synced to only zoneB but not zoneC + bucket = get_bucket(zcB, bucketA.name) + check_objects_exist(bucket, objnamesA, content) + + bucket = get_bucket(zcC, bucketA.name) + check_objects_not_exist(bucket, objnamesA) + + # verify if objnamesB are not synced to either zoneA or zoneC + bucket = get_bucket(zcA, bucketB.name) + check_objects_not_exist(bucket, objnamesB) + + bucket = get_bucket(zcC, bucketB.name) + check_objects_not_exist(bucket, objnamesB) + + """ + verify the same at bucketA level + configure another policy at bucketA level with src and dest + zones specified to zoneA and zoneB resp. + + verify zoneA bucketA syncs to zoneB BucketA but not viceversa. + """ + # reconfigure zonegroup pipe & flow + remove_sync_group_pipe(c1, "sync-group", "sync-pipe") + remove_sync_group_flow_directional(c1, "sync-group", "sync-flow", zoneA.name, zoneB.name) + create_sync_group_flow_symmetrical(c1, "sync-group", "sync-flow1", zones) + create_sync_group_pipe(c1, "sync-group", "sync-pipe", zones, zones) + + # change state to allowed + set_sync_policy_group_status(c1, "sync-group", "allowed") + + zonegroup.period.update(zoneA, commit=True) + get_sync_policy(c1) + + # configure sync policy for only bucketA and enable it + create_sync_policy_group(c1, "sync-bucket", "allowed", bucketA.name) + create_sync_group_flow_symmetrical(c1, "sync-bucket", "sync-flowA", zones, bucketA.name) + args = ['--source-bucket=*', '--dest-bucket=*'] + create_sync_group_pipe(c1, "sync-bucket", "sync-pipe", zoneA.name, zoneB.name, bucketA.name, args) + set_sync_policy_group_status(c1, "sync-bucket", "enabled", bucketA.name) + + get_sync_policy(c1, bucketA.name) + + zonegroup_meta_checkpoint(zonegroup) + + # create objects in bucketA in zoneA and zoneB + objnamesC = [ 'obj7', 'obj8', 'obj9' ] + objnamesD = [ 'obj10', 'obj11', 'obj12' ] + create_objects(zcA, bucketA, objnamesC, content) + create_objects(zcB, bucketA, objnamesD, content) + + zonegroup_meta_checkpoint(zonegroup) + zone_data_checkpoint(zoneB, zoneA) + + # verify that objnamesC are synced to bucketA in zoneB + bucket = get_bucket(zcB, bucketA.name) + check_objects_exist(bucket, objnamesC, content) + + # verify that objnamesD are not synced to bucketA in zoneA + bucket = get_bucket(zcA, bucketA.name) + check_objects_not_exist(bucket, objnamesD) + + remove_sync_policy_group(c1, "sync-bucket", bucketA.name) + remove_sync_policy_group(c1, "sync-group") + return + +@attr('sync_policy') +def test_sync_single_bucket(): + """ + test_sync_single_bucket: + Allow data sync for only bucketA but not for other buckets via + below 2 methods + + (a) zonegroup: symmetrical flow but configure pipe for only bucketA. + (b) bucket level: configure policy for bucketA + """ + + zonegroup = realm.master_zonegroup() + zonegroup_meta_checkpoint(zonegroup) + + zonegroup_conns = ZonegroupConns(zonegroup) + + (zoneA, zoneB) = zonegroup.zones[0:2] + (zcA, zcB) = zonegroup_conns.zones[0:2] + + c1 = zoneA.cluster + + c1.admin(['sync', 'policy', 'get']) + + zones = zoneA.name + ',' + zoneB.name + get_sync_policy(c1) + + objnames = [ 'obj1', 'obj2', 'obj3' ] + content = 'asdasd' + buckets = [] + + # create bucketA & bucketB in zoneA + bucketA = create_zone_bucket(zcA) + buckets.append(bucketA) + bucketB = create_zone_bucket(zcA) + buckets.append(bucketB) + + zonegroup_meta_checkpoint(zonegroup) + + """ + Method (a): configure pipe for only bucketA + """ + # configure sync policy & pipe for only bucketA + create_sync_policy_group(c1, "sync-group") + create_sync_group_flow_symmetrical(c1, "sync-group", "sync-flow1", zones) + args = ['--source-bucket=' + bucketA.name, '--dest-bucket=' + bucketA.name] + + create_sync_group_pipe(c1, "sync-group", "sync-pipe", zones, zones, None, args) + set_sync_policy_group_status(c1, "sync-group", "enabled") + get_sync_policy(c1) + zonegroup.period.update(zoneA, commit=True) + + sync_info(c1) + + # create objects in bucketA & bucketB + create_objects(zcA, bucketA, objnames, content) + create_object(zcA, bucketB, objnames, content) + + zonegroup_meta_checkpoint(zonegroup) + zone_data_checkpoint(zoneB, zoneA) + + # verify if bucketA objects are synced + bucket = get_bucket(zcB, bucketA.name) + check_objects_exist(bucket, objnames, content) + + # bucketB objects should not be synced + bucket = get_bucket(zcB, bucketB.name) + check_objects_not_exist(bucket, objnames) + + + """ + Method (b): configure policy at only bucketA level + """ + # reconfigure group pipe + remove_sync_group_pipe(c1, "sync-group", "sync-pipe") + create_sync_group_pipe(c1, "sync-group", "sync-pipe", zones, zones) + + # change state to allowed + set_sync_policy_group_status(c1, "sync-group", "allowed") + + zonegroup.period.update(zoneA, commit=True) + get_sync_policy(c1) + + + # configure sync policy for only bucketA and enable it + create_sync_policy_group(c1, "sync-bucket", "allowed", bucketA.name) + create_sync_group_flow_symmetrical(c1, "sync-bucket", "sync-flowA", zones, bucketA.name) + create_sync_group_pipe(c1, "sync-bucket", "sync-pipe", zones, zones, bucketA.name) + set_sync_policy_group_status(c1, "sync-bucket", "enabled", bucketA.name) + + get_sync_policy(c1, bucketA.name) + + # create object in bucketA + create_object(zcA, bucketA, objnames[2], content) + + # create object in bucketA too + create_object(zcA, bucketB, objnames[2], content) + + zonegroup_meta_checkpoint(zonegroup) + zone_data_checkpoint(zoneB, zoneA) + + # verify if bucketA objects are synced + bucket = get_bucket(zcB, bucketA.name) + check_object_exists(bucket, objnames[2], content) + + # bucketB objects should not be synced + bucket = get_bucket(zcB, bucketB.name) + check_object_not_exists(bucket, objnames[2]) + + remove_sync_policy_group(c1, "sync-bucket", bucketA.name) + remove_sync_policy_group(c1, "sync-group") + return + +@attr('sync_policy') +def test_sync_different_buckets(): + """ + test_sync_different_buckets: + sync zoneA bucketA to zoneB bucketB via below methods + + (a) zonegroup: directional flow but configure pipe for zoneA bucketA to zoneB bucketB + (b) bucket: configure another policy at bucketA level with pipe set to + another bucket(bucketB) in target zone. + + sync zoneA bucketA from zoneB bucketB + (c) configure another policy at bucketA level with pipe set from + another bucket(bucketB) in source zone. + + """ + + zonegroup = realm.master_zonegroup() + zonegroup_meta_checkpoint(zonegroup) + + zonegroup_conns = ZonegroupConns(zonegroup) + + (zoneA, zoneB) = zonegroup.zones[0:2] + (zcA, zcB) = zonegroup_conns.zones[0:2] + zones = zoneA.name + ',' + zoneB.name + + c1 = zoneA.cluster + + c1.admin(['sync', 'policy', 'get']) + + objnames = [ 'obj1', 'obj2' ] + objnamesB = [ 'obj3', 'obj4' ] + content = 'asdasd' + buckets = [] + + # create bucketA & bucketB in zoneA + bucketA = create_zone_bucket(zcA) + buckets.append(bucketA) + bucketB = create_zone_bucket(zcA) + buckets.append(bucketB) + + zonegroup_meta_checkpoint(zonegroup) + + """ + Method (a): zonegroup - configure pipe for only bucketA + """ + # configure pipe from zoneA bucketA to zoneB bucketB + create_sync_policy_group(c1, "sync-group") + create_sync_group_flow_symmetrical(c1, "sync-group", "sync-flow1", zones) + args = ['--source-bucket=' + bucketA.name, '--dest-bucket=' + bucketB.name] + create_sync_group_pipe(c1, "sync-group", "sync-pipe", zoneA.name, zoneB.name, None, args) + set_sync_policy_group_status(c1, "sync-group", "enabled") + zonegroup.period.update(zoneA, commit=True) + get_sync_policy(c1) + + # create objects in bucketA + create_objects(zcA, bucketA, objnames, content) + + zonegroup_meta_checkpoint(zonegroup) + zone_data_checkpoint(zoneB, zoneA) + + # verify that objects are synced to bucketB in zoneB + # but not to bucketA + bucket = get_bucket(zcB, bucketA.name) + check_objects_not_exist(bucket, objnames) + + bucket = get_bucket(zcB, bucketB.name) + check_objects_exist(bucket, objnames, content) + """ + Method (b): configure policy at only bucketA level with pipe + set to bucketB in target zone + """ + + remove_sync_group_pipe(c1, "sync-group", "sync-pipe") + create_sync_group_pipe(c1, "sync-group", "sync-pipe", zones, zones) + + # change state to allowed + set_sync_policy_group_status(c1, "sync-group", "allowed") + + zonegroup.period.update(zoneA, commit=True) + get_sync_policy(c1) + + # configure sync policy for only bucketA and enable it + create_sync_policy_group(c1, "sync-bucket", "allowed", bucketA.name) + create_sync_group_flow_symmetrical(c1, "sync-bucket", "sync-flowA", zones, bucketA.name) + args = ['--source-bucket=*', '--dest-bucket=' + bucketB.name] + create_sync_group_pipe(c1, "sync-bucket", "sync-pipeA", zones, zones, bucketA.name, args) + set_sync_policy_group_status(c1, "sync-bucket", "enabled", bucketA.name) + + get_sync_policy(c1, bucketA.name) + + objnamesC = [ 'obj5', 'obj6' ] + + zonegroup_meta_checkpoint(zonegroup) + # create objects in bucketA + create_objects(zcA, bucketA, objnamesC, content) + + zonegroup_meta_checkpoint(zonegroup) + zone_data_checkpoint(zoneB, zoneA) + + """ + # verify that objects are synced to bucketB in zoneB + # but not to bucketA + """ + bucket = get_bucket(zcB, bucketA.name) + check_objects_not_exist(bucket, objnamesC) + + bucket = get_bucket(zcB, bucketB.name) + check_objects_exist(bucket, objnamesC, content) + + remove_sync_policy_group(c1, "sync-bucket", bucketA.name) + zonegroup_meta_checkpoint(zonegroup) + get_sync_policy(c1, bucketA.name) + + """ + Method (c): configure policy at only bucketA level with pipe + set from bucketB in source zone + verify zoneA bucketA syncs from zoneB BucketB but not bucketA + """ + + # configure sync policy for only bucketA and enable it + create_sync_policy_group(c1, "sync-bucket", "allowed", bucketA.name) + create_sync_group_flow_symmetrical(c1, "sync-bucket", "sync-flowA", zones, bucketA.name) + args = ['--source-bucket=' + bucketB.name, '--dest-bucket=' + '*'] + create_sync_group_pipe(c1, "sync-bucket", "sync-pipe", zones, zones, bucketA.name, args) + set_sync_policy_group_status(c1, "sync-bucket", "enabled", bucketA.name) + + get_sync_policy(c1, bucketA.name) + + # create objects in bucketA & B in ZoneB + objnamesD = [ 'obj7', 'obj8' ] + objnamesE = [ 'obj9', 'obj10' ] + + create_objects(zcB, bucketA, objnamesD, content) + create_objects(zcB, bucketB, objnamesE, content) + + zonegroup_meta_checkpoint(zonegroup) + zone_data_checkpoint(zoneA, zoneB) + """ + # verify that objects from only bucketB are synced to + # bucketA in zoneA + """ + bucket = get_bucket(zcA, bucketA.name) + check_objects_not_exist(bucket, objnamesD) + check_objects_exist(bucket, objnamesE, content) + + remove_sync_policy_group(c1, "sync-bucket", bucketA.name) + remove_sync_policy_group(c1, "sync-group") + return + +@attr('sync_policy') +def test_sync_multiple_buckets_to_single(): + """ + test_sync_multiple_buckets_to_single: + directional flow + (a) pipe: sync zoneA bucketA,bucketB to zoneB bucketB + + (b) configure another policy at bucketA level with pipe configured + to sync from multiple buckets (bucketA & bucketB) + + verify zoneA bucketA & bucketB syncs to zoneB BucketB + """ + + zonegroup = realm.master_zonegroup() + zonegroup_meta_checkpoint(zonegroup) + + zonegroup_conns = ZonegroupConns(zonegroup) + + (zoneA, zoneB) = zonegroup.zones[0:2] + (zcA, zcB) = zonegroup_conns.zones[0:2] + zones = zoneA.name + ',' + zoneB.name + + c1 = zoneA.cluster + + c1.admin(['sync', 'policy', 'get']) + + objnamesA = [ 'obj1', 'obj2' ] + objnamesB = [ 'obj3', 'obj4' ] + content = 'asdasd' + buckets = [] + + # create bucketA & bucketB in zoneA + bucketA = create_zone_bucket(zcA) + buckets.append(bucketA) + bucketB = create_zone_bucket(zcA) + buckets.append(bucketB) + + zonegroup_meta_checkpoint(zonegroup) + + # configure pipe from zoneA bucketA,bucketB to zoneB bucketB + create_sync_policy_group(c1, "sync-group") + create_sync_group_flow_directional(c1, "sync-group", "sync-flow", zoneA.name, zoneB.name) + source_buckets = [ bucketA.name, bucketB.name ] + for source_bucket in source_buckets: + args = ['--source-bucket=' + source_bucket, '--dest-bucket=' + bucketB.name] + create_sync_group_pipe(c1, "sync-group", "sync-pipe-%s" % source_bucket, zoneA.name, zoneB.name, None, args) + + set_sync_policy_group_status(c1, "sync-group", "enabled") + zonegroup.period.update(zoneA, commit=True) + get_sync_policy(c1) + + # create objects in bucketA & bucketB + create_objects(zcA, bucketA, objnamesA, content) + create_objects(zcA, bucketB, objnamesB, content) + + zonegroup_meta_checkpoint(zonegroup) + zone_data_checkpoint(zoneB, zoneA) + + # verify that both zoneA bucketA & bucketB objects are synced to + # bucketB in zoneB but not to bucketA + bucket = get_bucket(zcB, bucketA.name) + check_objects_not_exist(bucket, objnamesA) + check_objects_not_exist(bucket, objnamesB) + + bucket = get_bucket(zcB, bucketB.name) + check_objects_exist(bucket, objnamesA, content) + check_objects_exist(bucket, objnamesB, content) + + """ + Method (b): configure at bucket level + """ + # reconfigure pipe & flow + for source_bucket in source_buckets: + remove_sync_group_pipe(c1, "sync-group", "sync-pipe-%s" % source_bucket) + remove_sync_group_flow_directional(c1, "sync-group", "sync-flow", zoneA.name, zoneB.name) + create_sync_group_flow_symmetrical(c1, "sync-group", "sync-flow1", zones) + create_sync_group_pipe(c1, "sync-group", "sync-pipe", zones, zones) + + # change state to allowed + set_sync_policy_group_status(c1, "sync-group", "allowed") + + zonegroup.period.update(zoneA, commit=True) + get_sync_policy(c1) + + objnamesC = [ 'obj5', 'obj6' ] + objnamesD = [ 'obj7', 'obj8' ] + + # configure sync policy for only bucketA and enable it + create_sync_policy_group(c1, "sync-bucket", "allowed", bucketA.name) + create_sync_group_flow_symmetrical(c1, "sync-bucket", "sync-flowA", zones, bucketA.name) + source_buckets = [ bucketA.name, bucketB.name ] + for source_bucket in source_buckets: + args = ['--source-bucket=' + source_bucket, '--dest-bucket=' + '*'] + create_sync_group_pipe(c1, "sync-bucket", "sync-pipe-%s" % source_bucket, zoneA.name, zoneB.name, bucketA.name, args) + + set_sync_policy_group_status(c1, "sync-bucket", "enabled", bucketA.name) + + get_sync_policy(c1) + + zonegroup_meta_checkpoint(zonegroup) + # create objects in bucketA + create_objects(zcA, bucketA, objnamesC, content) + create_objects(zcA, bucketB, objnamesD, content) + + zonegroup_meta_checkpoint(zonegroup) + zone_data_checkpoint(zoneB, zoneA) + + # verify that both zoneA bucketA & bucketB objects are synced to + # bucketA in zoneB but not to bucketB + bucket = get_bucket(zcB, bucketB.name) + check_objects_not_exist(bucket, objnamesC) + check_objects_not_exist(bucket, objnamesD) + + bucket = get_bucket(zcB, bucketA.name) + check_objects_exist(bucket, objnamesD, content) + check_objects_exist(bucket, objnamesD, content) + + remove_sync_policy_group(c1, "sync-bucket", bucketA.name) + remove_sync_policy_group(c1, "sync-group") + return + +@attr('sync_policy') +def test_sync_single_bucket_to_multiple(): + """ + test_sync_single_bucket_to_multiple: + directional flow + (a) pipe: sync zoneA bucketA to zoneB bucketA & bucketB + + (b) configure another policy at bucketA level with pipe configured + to sync to multiple buckets (bucketA & bucketB) + + verify zoneA bucketA syncs to zoneB bucketA & bucketB + """ + + zonegroup = realm.master_zonegroup() + zonegroup_meta_checkpoint(zonegroup) + + zonegroup_conns = ZonegroupConns(zonegroup) + + (zoneA, zoneB) = zonegroup.zones[0:2] + (zcA, zcB) = zonegroup_conns.zones[0:2] + zones = zoneA.name + ',' + zoneB.name + + c1 = zoneA.cluster + + c1.admin(['sync', 'policy', 'get']) + + objnamesA = [ 'obj1', 'obj2' ] + content = 'asdasd' + buckets = [] + + # create bucketA & bucketB in zoneA + bucketA = create_zone_bucket(zcA) + buckets.append(bucketA) + bucketB = create_zone_bucket(zcA) + buckets.append(bucketB) + + zonegroup_meta_checkpoint(zonegroup) + + # configure pipe from zoneA bucketA to zoneB bucketA, bucketB + create_sync_policy_group(c1, "sync-group") + create_sync_group_flow_symmetrical(c1, "sync-group", "sync-flow1", zones) + + dest_buckets = [ bucketA.name, bucketB.name ] + for dest_bucket in dest_buckets: + args = ['--source-bucket=' + bucketA.name, '--dest-bucket=' + dest_bucket] + create_sync_group_pipe(c1, "sync-group", "sync-pipe-%s" % dest_bucket, zoneA.name, zoneB.name, None, args) + + create_sync_group_pipe(c1, "sync-group", "sync-pipe", zoneA.name, zoneB.name, None, args) + set_sync_policy_group_status(c1, "sync-group", "enabled") + zonegroup.period.update(zoneA, commit=True) + get_sync_policy(c1) + + # create objects in bucketA + create_objects(zcA, bucketA, objnamesA, content) + + zonegroup_meta_checkpoint(zonegroup) + zone_data_checkpoint(zoneB, zoneA) + + # verify that objects from zoneA bucketA are synced to both + # bucketA & bucketB in zoneB + bucket = get_bucket(zcB, bucketA.name) + check_objects_exist(bucket, objnamesA, content) + + bucket = get_bucket(zcB, bucketB.name) + check_objects_exist(bucket, objnamesA, content) + + """ + Method (b): configure at bucket level + """ + remove_sync_group_pipe(c1, "sync-group", "sync-pipe") + create_sync_group_pipe(c1, "sync-group", "sync-pipe", '*', '*') + + # change state to allowed + set_sync_policy_group_status(c1, "sync-group", "allowed") + + zonegroup.period.update(zoneA, commit=True) + get_sync_policy(c1) + + objnamesB = [ 'obj3', 'obj4' ] + + # configure sync policy for only bucketA and enable it + create_sync_policy_group(c1, "sync-bucket", "allowed", bucketA.name) + create_sync_group_flow_symmetrical(c1, "sync-bucket", "sync-flowA", zones, bucketA.name) + dest_buckets = [ bucketA.name, bucketB.name ] + for dest_bucket in dest_buckets: + args = ['--source-bucket=' + '*', '--dest-bucket=' + dest_bucket] + create_sync_group_pipe(c1, "sync-bucket", "sync-pipe-%s" % dest_bucket, zoneA.name, zoneB.name, bucketA.name, args) + + set_sync_policy_group_status(c1, "sync-bucket", "enabled", bucketA.name) + + get_sync_policy(c1) + + zonegroup_meta_checkpoint(zonegroup) + # create objects in bucketA + create_objects(zcA, bucketA, objnamesB, content) + + zonegroup_meta_checkpoint(zonegroup) + zone_data_checkpoint(zoneB, zoneA) + + # verify that objects from zoneA bucketA are synced to both + # bucketA & bucketB in zoneB + bucket = get_bucket(zcB, bucketA.name) + check_objects_exist(bucket, objnamesB, content) + + bucket = get_bucket(zcB, bucketB.name) + check_objects_exist(bucket, objnamesB, content) + + remove_sync_policy_group(c1, "sync-bucket", bucketA.name) + remove_sync_policy_group(c1, "sync-group") + return diff --git a/src/test/rgw/rgw_multi/tests_az.py b/src/test/rgw/rgw_multi/tests_az.py new file mode 100644 index 000000000..13ec832a2 --- /dev/null +++ b/src/test/rgw/rgw_multi/tests_az.py @@ -0,0 +1,597 @@ +import logging + +from nose import SkipTest +from nose.tools import assert_not_equal, assert_equal + +from boto.s3.deletemarker import DeleteMarker + +from .tests import get_realm, \ + ZonegroupConns, \ + zonegroup_meta_checkpoint, \ + zone_meta_checkpoint, \ + zone_bucket_checkpoint, \ + zone_data_checkpoint, \ + zonegroup_bucket_checkpoint, \ + check_bucket_eq, \ + gen_bucket_name, \ + get_user, \ + get_tenant + +from .zone_az import print_connection_info + + +# configure logging for the tests module +log = logging.getLogger(__name__) + + +########################################## +# utility functions for archive zone tests +########################################## + +def check_az_configured(): + """check if at least one archive zone exist""" + realm = get_realm() + zonegroup = realm.master_zonegroup() + + az_zones = zonegroup.zones_by_type.get("archive") + if az_zones is None or len(az_zones) != 1: + raise SkipTest("Requires one archive zone") + + +def is_az_zone(zone_conn): + """check if a specific zone is archive zone""" + if not zone_conn: + return False + return zone_conn.zone.tier_type() == "archive" + + +def init_env(): + """initialize the environment""" + check_az_configured() + + realm = get_realm() + zonegroup = realm.master_zonegroup() + zonegroup_conns = ZonegroupConns(zonegroup) + + zonegroup_meta_checkpoint(zonegroup) + + az_zones = [] + zones = [] + for conn in zonegroup_conns.zones: + if is_az_zone(conn): + zone_meta_checkpoint(conn.zone) + az_zones.append(conn) + elif not conn.zone.is_read_only(): + zones.append(conn) + + assert_not_equal(len(zones), 0) + assert_not_equal(len(az_zones), 0) + return zones, az_zones + + +def zone_full_checkpoint(target_zone, source_zone): + zone_meta_checkpoint(target_zone) + zone_data_checkpoint(target_zone, source_zone) + + +def check_bucket_exists_on_zone(zone, bucket_name): + try: + zone.conn.get_bucket(bucket_name) + except: + return False + return True + + +def check_key_exists(key): + try: + key.get_contents_as_string() + except: + return False + return True + + +def get_versioning_status(bucket): + res = bucket.get_versioning_status() + key = 'Versioning' + if not key in res: + return None + else: + return res[key] + + +def get_versioned_objs(bucket): + b = [] + for b_entry in bucket.list_versions(): + if isinstance(b_entry, DeleteMarker): + continue + d = {} + d['version_id'] = b_entry.version_id + d['size'] = b_entry.size + d['etag'] = b_entry.etag + d['is_latest'] = b_entry.is_latest + b.append({b_entry.key:d}) + return b + + +def get_versioned_entries(bucket): + dm = [] + ver = [] + for b_entry in bucket.list_versions(): + if isinstance(b_entry, DeleteMarker): + d = {} + d['version_id'] = b_entry.version_id + d['is_latest'] = b_entry.is_latest + dm.append({b_entry.name:d}) + else: + d = {} + d['version_id'] = b_entry.version_id + d['size'] = b_entry.size + d['etag'] = b_entry.etag + d['is_latest'] = b_entry.is_latest + ver.append({b_entry.key:d}) + return (dm, ver) + + +def get_number_buckets_by_zone(zone): + return len(zone.conn.get_all_buckets()) + + +def get_bucket_names_by_zone(zone): + return [b.name for b in zone.conn.get_all_buckets()] + + +def get_full_bucket_name(partial_bucket_name, bucket_names_az): + full_bucket_name = None + for bucket_name in bucket_names_az: + if bucket_name.startswith(partial_bucket_name): + full_bucket_name = bucket_name + break + return full_bucket_name + + +#################### +# archive zone tests +#################### + + +def test_az_info(): + """ log information for manual testing """ + return SkipTest("only used in manual testing") + zones, az_zones = init_env() + realm = get_realm() + zonegroup = realm.master_zonegroup() + bucket_name = gen_bucket_name() + # create bucket on the first of the rados zones + bucket = zones[0].create_bucket(bucket_name) + # create objects in the bucket + number_of_objects = 3 + for i in range(number_of_objects): + key = bucket.new_key(str(i)) + key.set_contents_from_string('bar') + print('Zonegroup: ' + zonegroup.name) + print('user: ' + get_user()) + print('tenant: ' + get_tenant()) + print('Master Zone') + print_connection_info(zones[0].conn) + print('Archive Zone') + print_connection_info(az_zones[0].conn) + print('Bucket: ' + bucket_name) + + +def test_az_create_empty_bucket(): + """ test empty bucket replication """ + zones, az_zones = init_env() + bucket_name = gen_bucket_name() + # create bucket on the non archive zone + zones[0].create_bucket(bucket_name) + # sync + zone_full_checkpoint(az_zones[0].zone, zones[0].zone) + # bucket exist on the archive zone + p = check_bucket_exists_on_zone(az_zones[0], bucket_name) + assert_equal(p, True) + + +def test_az_check_empty_bucket_versioning(): + """ test bucket vesioning with empty bucket """ + zones, az_zones = init_env() + bucket_name = gen_bucket_name() + # create bucket on the non archive zone + bucket = zones[0].create_bucket(bucket_name) + # sync + zone_full_checkpoint(az_zones[0].zone, zones[0].zone) + # get bucket on archive zone + bucket_az = az_zones[0].conn.get_bucket(bucket_name) + # check for non bucket versioning + p1 = get_versioning_status(bucket) is None + assert_equal(p1, True) + p2 = get_versioning_status(bucket_az) is None + assert_equal(p2, True) + + +def test_az_object_replication(): + """ test object replication """ + zones, az_zones = init_env() + bucket_name = gen_bucket_name() + # create bucket on the non archive zone + bucket = zones[0].create_bucket(bucket_name) + key = bucket.new_key("foo") + key.set_contents_from_string("bar") + # sync + zone_full_checkpoint(az_zones[0].zone, zones[0].zone) + # check object on archive zone + bucket_az = az_zones[0].conn.get_bucket(bucket_name) + key_az = bucket_az.get_key("foo") + p1 = key_az.get_contents_as_string(encoding='ascii') == "bar" + assert_equal(p1, True) + + +def test_az_object_replication_versioning(): + """ test object replication versioning """ + zones, az_zones = init_env() + bucket_name = gen_bucket_name() + # create object on the non archive zone + bucket = zones[0].create_bucket(bucket_name) + key = bucket.new_key("foo") + key.set_contents_from_string("bar") + # sync + zone_full_checkpoint(az_zones[0].zone, zones[0].zone) + # check object content on archive zone + bucket_az = az_zones[0].conn.get_bucket(bucket_name) + key_az = bucket_az.get_key("foo") + p1 = key_az.get_contents_as_string(encoding='ascii') == "bar" + assert_equal(p1, True) + # grab object versioning and etag + for b_version in bucket.list_versions(): + b_version_id = b_version.version_id + b_version_etag = b_version.etag + for b_az_version in bucket_az.list_versions(): + b_az_version_id = b_az_version.version_id + b_az_version_etag = b_az_version.etag + # check + p2 = b_version_id == 'null' + assert_equal(p2, True) + p3 = b_az_version_id != 'null' + assert_equal(p3, True) + p4 = b_version_etag == b_az_version_etag + assert_equal(p4, True) + + +def test_az_lazy_activation_of_versioned_bucket(): + """ test lazy activation of versioned bucket """ + zones, az_zones = init_env() + bucket_name = gen_bucket_name() + # create object on the non archive zone + bucket = zones[0].create_bucket(bucket_name) + # sync + zone_full_checkpoint(az_zones[0].zone, zones[0].zone) + # get bucket on archive zone + bucket_az = az_zones[0].conn.get_bucket(bucket_name) + # check for non bucket versioning + p1 = get_versioning_status(bucket) is None + assert_equal(p1, True) + p2 = get_versioning_status(bucket_az) is None + assert_equal(p2, True) + # create object on non archive zone + key = bucket.new_key("foo") + key.set_contents_from_string("bar") + # sync + zone_full_checkpoint(az_zones[0].zone, zones[0].zone) + # check lazy versioned buckets + p3 = get_versioning_status(bucket) is None + assert_equal(p3, True) + p4 = get_versioning_status(bucket_az) == 'Enabled' + assert_equal(p4, True) + + +def test_az_archive_zone_double_object_replication_versioning(): + """ test archive zone double object replication versioning """ + zones, az_zones = init_env() + bucket_name = gen_bucket_name() + # create object on the non archive zone + bucket = zones[0].create_bucket(bucket_name) + key = bucket.new_key("foo") + key.set_contents_from_string("bar") + # sync + zone_full_checkpoint(az_zones[0].zone, zones[0].zone) + # get bucket on archive zone + bucket_az = az_zones[0].conn.get_bucket(bucket_name) + # check for non bucket versioning + p1 = get_versioning_status(bucket) is None + assert_equal(p1, True) + p2 = get_versioning_status(bucket_az) == 'Enabled' + assert_equal(p2, True) + # overwrite object on non archive zone + key = bucket.new_key("foo") + key.set_contents_from_string("ouch") + # sync + zone_full_checkpoint(az_zones[0].zone, zones[0].zone) + # check lazy versioned buckets + p3 = get_versioning_status(bucket) is None + assert_equal(p3, True) + p4 = get_versioning_status(bucket_az) == 'Enabled' + assert_equal(p4, True) + # get versioned objects + objs = get_versioned_objs(bucket) + objs_az = get_versioned_objs(bucket_az) + # check version_id, size, and is_latest on non archive zone + p5 = objs[0]['foo']['version_id'] == 'null' + assert_equal(p5, True) + p6 = objs[0]['foo']['size'] == 4 + assert_equal(p6, True) + p7 = objs[0]['foo']['is_latest'] == True + assert_equal(p7, True) + # check version_id, size, is_latest on archive zone + latest_obj_az_etag = None + for obj_az in objs_az: + current_obj_az = obj_az['foo'] + if current_obj_az['is_latest'] == True: + p8 = current_obj_az['size'] == 4 + assert_equal(p8, True) + latest_obj_az_etag = current_obj_az['etag'] + else: + p9 = current_obj_az['size'] == 3 + assert_equal(p9, True) + assert_not_equal(current_obj_az['version_id'], 'null') + # check last versions' etags + p10 = objs[0]['foo']['etag'] == latest_obj_az_etag + assert_equal(p10, True) + + +def test_az_deleted_object_replication(): + """ test zone deleted object replication """ + zones, az_zones = init_env() + bucket_name = gen_bucket_name() + # create object on the non archive zone + bucket = zones[0].create_bucket(bucket_name) + key = bucket.new_key("foo") + key.set_contents_from_string("bar") + p1 = key.get_contents_as_string(encoding='ascii') == "bar" + assert_equal(p1, True) + # sync + zone_full_checkpoint(az_zones[0].zone, zones[0].zone) + # update object on non archive zone + key.set_contents_from_string("soup") + p2 = key.get_contents_as_string(encoding='ascii') == "soup" + assert_equal(p2, True) + # sync + zone_full_checkpoint(az_zones[0].zone, zones[0].zone) + # delete object on non archive zone + key.delete() + # sync + zone_full_checkpoint(az_zones[0].zone, zones[0].zone) + # check object on non archive zone + p3 = check_key_exists(key) == False + assert_equal(p3, True) + # check objects on archive zone + bucket_az = az_zones[0].conn.get_bucket(bucket_name) + key_az = bucket_az.get_key("foo") + p4 = check_key_exists(key_az) == True + assert_equal(p4, True) + p5 = key_az.get_contents_as_string(encoding='ascii') == "soup" + assert_equal(p5, True) + b_ver_az = get_versioned_objs(bucket_az) + p6 = len(b_ver_az) == 2 + assert_equal(p6, True) + + +def test_az_bucket_renaming_on_empty_bucket_deletion(): + """ test bucket renaming on empty bucket deletion """ + zones, az_zones = init_env() + bucket_name = gen_bucket_name() + # grab number of buckets on non archive zone + num_buckets = get_number_buckets_by_zone(zones[0]) + # grab number of buckets on archive zone + num_buckets_az = get_number_buckets_by_zone(az_zones[0]) + # create bucket on non archive zone + bucket = zones[0].create_bucket(bucket_name) + # sync + zone_full_checkpoint(az_zones[0].zone, zones[0].zone) + # delete bucket in non archive zone + zones[0].delete_bucket(bucket_name) + # sync + zone_full_checkpoint(az_zones[0].zone, zones[0].zone) + # check no new buckets on non archive zone + p1 = get_number_buckets_by_zone(zones[0]) == num_buckets + assert_equal(p1, True) + # check non deletion on bucket on archive zone + p2 = get_number_buckets_by_zone(az_zones[0]) == (num_buckets_az + 1) + assert_equal(p2, True) + # check bucket renaming + bucket_names_az = get_bucket_names_by_zone(az_zones[0]) + new_bucket_name = bucket_name + '-deleted-' + p3 = any(bucket_name.startswith(new_bucket_name) for bucket_name in bucket_names_az) + assert_equal(p3, True) + + +def test_az_old_object_version_in_archive_zone(): + """ test old object version in archive zone """ + zones, az_zones = init_env() + bucket_name = gen_bucket_name() + # grab number of buckets on non archive zone + num_buckets = get_number_buckets_by_zone(zones[0]) + # grab number of buckets on archive zone + num_buckets_az = get_number_buckets_by_zone(az_zones[0]) + # create bucket on non archive zone + bucket = zones[0].create_bucket(bucket_name) + # create object on non archive zone + key = bucket.new_key("foo") + key.set_contents_from_string("zero") + # sync + zone_full_checkpoint(az_zones[0].zone, zones[0].zone) + # save object version on archive zone + bucket_az = az_zones[0].conn.get_bucket(bucket_name) + b_ver_az = get_versioned_objs(bucket_az) + obj_az_version_id = b_ver_az[0]['foo']['version_id'] + # update object on non archive zone + key.set_contents_from_string("one") + # sync + zone_full_checkpoint(az_zones[0].zone, zones[0].zone) + # delete object on non archive zone + key.delete() + # delete bucket on non archive zone + zones[0].delete_bucket(bucket_name) + # sync + zone_full_checkpoint(az_zones[0].zone, zones[0].zone) + # check same buckets on non archive zone + p1 = get_number_buckets_by_zone(zones[0]) == num_buckets + assert_equal(p1, True) + # check for new bucket on archive zone + p2 = get_number_buckets_by_zone(az_zones[0]) == (num_buckets_az + 1) + assert_equal(p2, True) + # get new bucket name on archive zone + bucket_names_az = get_bucket_names_by_zone(az_zones[0]) + new_bucket_name_az = get_full_bucket_name(bucket_name + '-deleted-', bucket_names_az) + p3 = new_bucket_name_az is not None + assert_equal(p3, True) + # check number of objects on archive zone + new_bucket_az = az_zones[0].conn.get_bucket(new_bucket_name_az) + new_b_ver_az = get_versioned_objs(new_bucket_az) + p4 = len(new_b_ver_az) == 2 + assert_equal(p4, True) + # check versioned objects on archive zone + new_key_az = new_bucket_az.get_key("foo", version_id=obj_az_version_id) + p5 = new_key_az.get_contents_as_string(encoding='ascii') == "zero" + assert_equal(p5, True) + new_key_latest_az = new_bucket_az.get_key("foo") + p6 = new_key_latest_az.get_contents_as_string(encoding='ascii') == "one" + assert_equal(p6, True) + + +def test_az_force_bucket_renaming_if_same_bucket_name(): + """ test force bucket renaming if same bucket name """ + zones, az_zones = init_env() + bucket_name = gen_bucket_name() + # grab number of buckets on non archive zone + num_buckets = get_number_buckets_by_zone(zones[0]) + # grab number of buckets on archive zone + num_buckets_az = get_number_buckets_by_zone(az_zones[0]) + # create bucket on non archive zone + bucket = zones[0].create_bucket(bucket_name) + # sync + zone_full_checkpoint(az_zones[0].zone, zones[0].zone) + # check same buckets on non archive zone + p1 = get_number_buckets_by_zone(zones[0]) == (num_buckets + 1) + assert_equal(p1, True) + # check for new bucket on archive zone + p2 = get_number_buckets_by_zone(az_zones[0]) == (num_buckets_az + 1) + assert_equal(p2, True) + # delete bucket on non archive zone + zones[0].delete_bucket(bucket_name) + # sync + zone_full_checkpoint(az_zones[0].zone, zones[0].zone) + # check number of buckets on non archive zone + p3 = get_number_buckets_by_zone(zones[0]) == num_buckets + assert_equal(p3, True) + # check number of buckets on archive zone + p4 = get_number_buckets_by_zone(az_zones[0]) == (num_buckets_az + 1) + assert_equal(p4, True) + # get new bucket name on archive zone + bucket_names_az = get_bucket_names_by_zone(az_zones[0]) + new_bucket_name_az = get_full_bucket_name(bucket_name + '-deleted-', bucket_names_az) + p5 = new_bucket_name_az is not None + assert_equal(p5, True) + # create bucket on non archive zone + _ = zones[0].create_bucket(new_bucket_name_az) + # sync + zone_full_checkpoint(az_zones[0].zone, zones[0].zone) + # check number of buckets on non archive zone + p6 = get_number_buckets_by_zone(zones[0]) == (num_buckets + 1) + assert_equal(p6, True) + # check number of buckets on archive zone + p7 = get_number_buckets_by_zone(az_zones[0]) == (num_buckets_az + 2) + assert_equal(p7, True) + + +def test_az_versioning_support_in_zones(): + """ test versioning support on zones """ + zones, az_zones = init_env() + bucket_name = gen_bucket_name() + # create bucket on non archive zone + bucket = zones[0].create_bucket(bucket_name) + # sync + zone_full_checkpoint(az_zones[0].zone, zones[0].zone) + # get bucket on archive zone + bucket_az = az_zones[0].conn.get_bucket(bucket_name) + # check non versioned buckets + p1 = get_versioning_status(bucket) is None + assert_equal(p1, True) + p2 = get_versioning_status(bucket_az) is None + assert_equal(p2, True) + # create object on non archive zone + key = bucket.new_key("foo") + key.set_contents_from_string("zero") + # sync + zone_full_checkpoint(az_zones[0].zone, zones[0].zone) + # check bucket versioning + p3 = get_versioning_status(bucket) is None + assert_equal(p3, True) + p4 = get_versioning_status(bucket_az) == 'Enabled' + assert_equal(p4, True) + # enable bucket versioning on non archive zone + bucket.configure_versioning(True) + # sync + zone_full_checkpoint(az_zones[0].zone, zones[0].zone) + # check bucket versioning + p5 = get_versioning_status(bucket) == 'Enabled' + assert_equal(p5, True) + p6 = get_versioning_status(bucket_az) == 'Enabled' + assert_equal(p6, True) + # delete object on non archive zone + key.delete() + # sync + zone_full_checkpoint(az_zones[0].zone, zones[0].zone) + # check delete-markers and versions on non archive zone + (b_dm, b_ver) = get_versioned_entries(bucket) + p7 = len(b_dm) == 1 + assert_equal(p7, True) + p8 = len(b_ver) == 1 + assert_equal(p8, True) + # check delete-markers and versions on archive zone + (b_dm_az, b_ver_az) = get_versioned_entries(bucket_az) + p9 = len(b_dm_az) == 1 + assert_equal(p9, True) + p10 = len(b_ver_az) == 1 + assert_equal(p10, True) + # delete delete-marker on non archive zone + dm_version_id = b_dm[0]['foo']['version_id'] + bucket.delete_key("foo", version_id=dm_version_id) + # sync + zone_full_checkpoint(az_zones[0].zone, zones[0].zone) + # check delete-markers and versions on non archive zone + (b_dm, b_ver) = get_versioned_entries(bucket) + p11 = len(b_dm) == 0 + assert_equal(p11, True) + p12 = len(b_ver) == 1 + assert_equal(p12, True) + # check delete-markers and versions on archive zone + (b_dm_az, b_ver_az) = get_versioned_entries(bucket_az) + p13 = len(b_dm_az) == 1 + assert_equal(p13, True) + p14 = len(b_ver_az) == 1 + assert_equal(p14, True) + # delete delete-marker on archive zone + dm_az_version_id = b_dm_az[0]['foo']['version_id'] + bucket_az.delete_key("foo", version_id=dm_az_version_id) + # sync + zone_full_checkpoint(az_zones[0].zone, zones[0].zone) + # check delete-markers and versions on non archive zone + (b_dm, b_ver) = get_versioned_entries(bucket) + p15 = len(b_dm) == 0 + assert_equal(p15, True) + p16 = len(b_ver) == 1 + assert_equal(p16, True) + # check delete-markers and versions on archive zone + (b_dm_az, b_ver_az) = get_versioned_entries(bucket_az) + p17 = len(b_dm_az) == 0 + assert_equal(p17, True) + p17 = len(b_ver_az) == 1 + assert_equal(p17, True) + # check body in zones + obj_version_id = b_ver[0]['foo']['version_id'] + key = bucket.get_key("foo", version_id=obj_version_id) + p18 = key.get_contents_as_string(encoding='ascii') == "zero" + assert_equal(p18, True) + obj_az_version_id = b_ver_az[0]['foo']['version_id'] + key_az = bucket_az.get_key("foo", version_id=obj_az_version_id) + p19 = key_az.get_contents_as_string(encoding='ascii') == "zero" + assert_equal(p19, True) diff --git a/src/test/rgw/rgw_multi/tests_es.py b/src/test/rgw/rgw_multi/tests_es.py new file mode 100644 index 000000000..08c11718b --- /dev/null +++ b/src/test/rgw/rgw_multi/tests_es.py @@ -0,0 +1,276 @@ +import json +import logging + +import boto +import boto.s3.connection + +import datetime +import dateutil + +from itertools import zip_longest # type: ignore + +from nose.tools import eq_ as eq + +from .multisite import * +from .tests import * +from .zone_es import * + +log = logging.getLogger(__name__) + + +def check_es_configured(): + realm = get_realm() + zonegroup = realm.master_zonegroup() + + es_zones = zonegroup.zones_by_type.get("elasticsearch") + if not es_zones: + raise SkipTest("Requires at least one ES zone") + +def is_es_zone(zone_conn): + if not zone_conn: + return False + + return zone_conn.zone.tier_type() == "elasticsearch" + +def verify_search(bucket_name, src_keys, result_keys, f): + check_keys = [] + for k in src_keys: + if bucket_name: + if bucket_name != k.bucket.name: + continue + if f(k): + check_keys.append(k) + check_keys.sort(key = lambda l: (l.bucket.name, l.name, l.version_id)) + + log.debug('check keys:' + dump_json(check_keys)) + log.debug('result keys:' + dump_json(result_keys)) + + for k1, k2 in zip_longest(check_keys, result_keys): + assert k1 + assert k2 + check_object_eq(k1, k2) + +def do_check_mdsearch(conn, bucket, src_keys, req_str, src_filter): + if bucket: + bucket_name = bucket.name + else: + bucket_name = '' + req = MDSearch(conn, bucket_name, req_str) + result_keys = req.search(sort_key = lambda k: (k.bucket.name, k.name, k.version_id)) + verify_search(bucket_name, src_keys, result_keys, src_filter) + +def init_env(create_obj, num_keys = 5, buckets_per_zone = 1, bucket_init_cb = None): + check_es_configured() + + realm = get_realm() + zonegroup = realm.master_zonegroup() + zonegroup_conns = ZonegroupConns(zonegroup) + buckets, zone_bucket = create_bucket_per_zone(zonegroup_conns, buckets_per_zone = buckets_per_zone) + + if bucket_init_cb: + for zone_conn, bucket in zone_bucket: + bucket_init_cb(zone_conn, bucket) + + src_keys = [] + + owner = None + + obj_prefix=''.join(random.choice(string.ascii_lowercase) for _ in range(6)) + + # don't wait for meta sync just yet + for zone, bucket in zone_bucket: + for count in range(num_keys): + objname = obj_prefix + str(count) + k = new_key(zone, bucket.name, objname) + # k.set_contents_from_string(content + 'x' * count) + if not create_obj: + continue + + create_obj(k, count) + + if not owner: + for list_key in bucket.list_versions(): + owner = list_key.owner + break + + k = bucket.get_key(k.name, version_id = k.version_id) + k.owner = owner # owner is not set when doing get_key() + + src_keys.append(k) + + zonegroup_meta_checkpoint(zonegroup) + + sources = [] + targets = [] + for target_conn in zonegroup_conns.zones: + if not is_es_zone(target_conn): + sources.append(target_conn) + continue + + targets.append(target_conn) + + buckets = [] + # make sure all targets are synced + for source_conn, bucket in zone_bucket: + buckets.append(bucket) + for target_conn in targets: + zone_bucket_checkpoint(target_conn.zone, source_conn.zone, bucket.name) + + return targets, sources, buckets, src_keys + +def test_es_object_search(): + min_size = 10 + content = 'a' * min_size + + def create_obj(k, i): + k.set_contents_from_string(content + 'x' * i) + + targets, _, buckets, src_keys = init_env(create_obj, num_keys = 5, buckets_per_zone = 2) + + for target_conn in targets: + + # bucket checks + for bucket in buckets: + # check name + do_check_mdsearch(target_conn.conn, None, src_keys , 'bucket == ' + bucket.name, lambda k: k.bucket.name == bucket.name) + do_check_mdsearch(target_conn.conn, bucket, src_keys , 'bucket == ' + bucket.name, lambda k: k.bucket.name == bucket.name) + + # check on all buckets + for key in src_keys: + # limiting to checking specific key name, otherwise could get results from + # other runs / tests + do_check_mdsearch(target_conn.conn, None, src_keys , 'name == ' + key.name, lambda k: k.name == key.name) + + # check on specific bucket + for bucket in buckets: + for key in src_keys: + do_check_mdsearch(target_conn.conn, bucket, src_keys , 'name < ' + key.name, lambda k: k.name < key.name) + do_check_mdsearch(target_conn.conn, bucket, src_keys , 'name <= ' + key.name, lambda k: k.name <= key.name) + do_check_mdsearch(target_conn.conn, bucket, src_keys , 'name == ' + key.name, lambda k: k.name == key.name) + do_check_mdsearch(target_conn.conn, bucket, src_keys , 'name >= ' + key.name, lambda k: k.name >= key.name) + do_check_mdsearch(target_conn.conn, bucket, src_keys , 'name > ' + key.name, lambda k: k.name > key.name) + + do_check_mdsearch(target_conn.conn, bucket, src_keys , 'name == ' + src_keys[0].name + ' or name >= ' + src_keys[2].name, + lambda k: k.name == src_keys[0].name or k.name >= src_keys[2].name) + + # check etag + for key in src_keys: + do_check_mdsearch(target_conn.conn, bucket, src_keys , 'etag < ' + key.etag[1:-1], lambda k: k.etag < key.etag) + for key in src_keys: + do_check_mdsearch(target_conn.conn, bucket, src_keys , 'etag == ' + key.etag[1:-1], lambda k: k.etag == key.etag) + for key in src_keys: + do_check_mdsearch(target_conn.conn, bucket, src_keys , 'etag > ' + key.etag[1:-1], lambda k: k.etag > key.etag) + + # check size + for key in src_keys: + do_check_mdsearch(target_conn.conn, bucket, src_keys , 'size < ' + str(key.size), lambda k: k.size < key.size) + for key in src_keys: + do_check_mdsearch(target_conn.conn, bucket, src_keys , 'size <= ' + str(key.size), lambda k: k.size <= key.size) + for key in src_keys: + do_check_mdsearch(target_conn.conn, bucket, src_keys , 'size == ' + str(key.size), lambda k: k.size == key.size) + for key in src_keys: + do_check_mdsearch(target_conn.conn, bucket, src_keys , 'size >= ' + str(key.size), lambda k: k.size >= key.size) + for key in src_keys: + do_check_mdsearch(target_conn.conn, bucket, src_keys , 'size > ' + str(key.size), lambda k: k.size > key.size) + +def date_from_str(s): + return dateutil.parser.parse(s) + +def test_es_object_search_custom(): + min_size = 10 + content = 'a' * min_size + + def bucket_init(zone_conn, bucket): + req = MDSearchConfig(zone_conn.conn, bucket.name) + req.set_config('x-amz-meta-foo-str; string, x-amz-meta-foo-int; int, x-amz-meta-foo-date; date') + + def create_obj(k, i): + date = datetime.datetime.now() + datetime.timedelta(seconds=1) * i + date_str = date.strftime('%Y-%m-%dT%H:%M:%S.%f')[:-3] + 'Z' + k.set_contents_from_string(content + 'x' * i, headers = { 'X-Amz-Meta-Foo-Str': str(i * 5), + 'X-Amz-Meta-Foo-Int': str(i * 5), + 'X-Amz-Meta-Foo-Date': date_str}) + + targets, _, buckets, src_keys = init_env(create_obj, num_keys = 5, buckets_per_zone = 1, bucket_init_cb = bucket_init) + + + for target_conn in targets: + + # bucket checks + for bucket in buckets: + str_vals = [] + for key in src_keys: + # check string values + val = key.get_metadata('foo-str') + str_vals.append(val) + do_check_mdsearch(target_conn.conn, bucket, src_keys , 'x-amz-meta-foo-str < ' + val, lambda k: k.get_metadata('foo-str') < val) + do_check_mdsearch(target_conn.conn, bucket, src_keys , 'x-amz-meta-foo-str <= ' + val, lambda k: k.get_metadata('foo-str') <= val) + do_check_mdsearch(target_conn.conn, bucket, src_keys , 'x-amz-meta-foo-str == ' + val, lambda k: k.get_metadata('foo-str') == val) + do_check_mdsearch(target_conn.conn, bucket, src_keys , 'x-amz-meta-foo-str >= ' + val, lambda k: k.get_metadata('foo-str') >= val) + do_check_mdsearch(target_conn.conn, bucket, src_keys , 'x-amz-meta-foo-str > ' + val, lambda k: k.get_metadata('foo-str') > val) + + # check int values + sval = key.get_metadata('foo-int') + val = int(sval) + do_check_mdsearch(target_conn.conn, bucket, src_keys , 'x-amz-meta-foo-int < ' + sval, lambda k: int(k.get_metadata('foo-int')) < val) + do_check_mdsearch(target_conn.conn, bucket, src_keys , 'x-amz-meta-foo-int <= ' + sval, lambda k: int(k.get_metadata('foo-int')) <= val) + do_check_mdsearch(target_conn.conn, bucket, src_keys , 'x-amz-meta-foo-int == ' + sval, lambda k: int(k.get_metadata('foo-int')) == val) + do_check_mdsearch(target_conn.conn, bucket, src_keys , 'x-amz-meta-foo-int >= ' + sval, lambda k: int(k.get_metadata('foo-int')) >= val) + do_check_mdsearch(target_conn.conn, bucket, src_keys , 'x-amz-meta-foo-int > ' + sval, lambda k: int(k.get_metadata('foo-int')) > val) + + # check int values + sval = key.get_metadata('foo-date') + val = date_from_str(sval) + do_check_mdsearch(target_conn.conn, bucket, src_keys , 'x-amz-meta-foo-date < ' + sval, lambda k: date_from_str(k.get_metadata('foo-date')) < val) + do_check_mdsearch(target_conn.conn, bucket, src_keys , 'x-amz-meta-foo-date <= ' + sval, lambda k: date_from_str(k.get_metadata('foo-date')) <= val) + do_check_mdsearch(target_conn.conn, bucket, src_keys , 'x-amz-meta-foo-date == ' + sval, lambda k: date_from_str(k.get_metadata('foo-date')) == val) + do_check_mdsearch(target_conn.conn, bucket, src_keys , 'x-amz-meta-foo-date >= ' + sval, lambda k: date_from_str(k.get_metadata('foo-date')) >= val) + do_check_mdsearch(target_conn.conn, bucket, src_keys , 'x-amz-meta-foo-date > ' + sval, lambda k: date_from_str(k.get_metadata('foo-date')) > val) + + # 'or' query + for i in range(len(src_keys) // 2): + do_check_mdsearch(target_conn.conn, bucket, src_keys , 'x-amz-meta-foo-str <= ' + str_vals[i] + ' or x-amz-meta-foo-str >= ' + str_vals[-i], + lambda k: k.get_metadata('foo-str') <= str_vals[i] or k.get_metadata('foo-str') >= str_vals[-i] ) + + # 'and' query + for i in range(len(src_keys) // 2): + do_check_mdsearch(target_conn.conn, bucket, src_keys , 'x-amz-meta-foo-str >= ' + str_vals[i] + ' and x-amz-meta-foo-str <= ' + str_vals[i + 1], + lambda k: k.get_metadata('foo-str') >= str_vals[i] and k.get_metadata('foo-str') <= str_vals[i + 1] ) + # more complicated query + for i in range(len(src_keys) // 2): + do_check_mdsearch(target_conn.conn, None, src_keys , 'bucket == ' + bucket.name + ' and x-amz-meta-foo-str >= ' + str_vals[i] + + ' and (x-amz-meta-foo-str <= ' + str_vals[i + 1] + ')', + lambda k: k.bucket.name == bucket.name and (k.get_metadata('foo-str') >= str_vals[i] and + k.get_metadata('foo-str') <= str_vals[i + 1]) ) + +def test_es_bucket_conf(): + min_size = 0 + + def bucket_init(zone_conn, bucket): + req = MDSearchConfig(zone_conn.conn, bucket.name) + req.set_config('x-amz-meta-foo-str; string, x-amz-meta-foo-int; int, x-amz-meta-foo-date; date') + + targets, sources, buckets, _ = init_env(None, num_keys = 5, buckets_per_zone = 1, bucket_init_cb = bucket_init) + + for source_conn in sources: + for bucket in buckets: + req = MDSearchConfig(source_conn.conn, bucket.name) + conf = req.get_config() + + d = {} + + for entry in conf: + d[entry['Key']] = entry['Type'] + + eq(len(d), 3) + eq(d['x-amz-meta-foo-str'], 'str') + eq(d['x-amz-meta-foo-int'], 'int') + eq(d['x-amz-meta-foo-date'], 'date') + + req.del_config() + + conf = req.get_config() + + eq(len(conf), 0) + + break # no need to iterate over all zones diff --git a/src/test/rgw/rgw_multi/tools.py b/src/test/rgw/rgw_multi/tools.py new file mode 100644 index 000000000..dd7f91ade --- /dev/null +++ b/src/test/rgw/rgw_multi/tools.py @@ -0,0 +1,97 @@ +import json +import boto + +def append_attr_value(d, attr, attrv): + if attrv and len(str(attrv)) > 0: + d[attr] = attrv + +def append_attr(d, k, attr): + try: + attrv = getattr(k, attr) + except: + return + append_attr_value(d, attr, attrv) + +def get_attrs(k, attrs): + d = {} + for a in attrs: + append_attr(d, k, a) + + return d + +def append_query_arg(s, n, v): + if not v: + return s + nv = '{n}={v}'.format(n=n, v=v) + if not s: + return nv + return '{s}&{nv}'.format(s=s, nv=nv) + +class KeyJSONEncoder(boto.s3.key.Key): + @staticmethod + def default(k, versioned=False): + attrs = ['bucket', 'name', 'size', 'last_modified', 'metadata', 'cache_control', + 'content_type', 'content_disposition', 'content_language', + 'owner', 'storage_class', 'md5', 'version_id', 'encrypted', + 'delete_marker', 'expiry_date', 'VersionedEpoch', 'RgwxTag'] + d = get_attrs(k, attrs) + d['etag'] = k.etag[1:-1] + if versioned: + d['is_latest'] = k.is_latest + return d + +class DeleteMarkerJSONEncoder(boto.s3.key.Key): + @staticmethod + def default(k): + attrs = ['name', 'version_id', 'last_modified', 'owner'] + d = get_attrs(k, attrs) + d['delete_marker'] = True + d['is_latest'] = k.is_latest + return d + +class UserJSONEncoder(boto.s3.user.User): + @staticmethod + def default(k): + attrs = ['id', 'display_name'] + return get_attrs(k, attrs) + +class BucketJSONEncoder(boto.s3.bucket.Bucket): + @staticmethod + def default(k): + attrs = ['name', 'creation_date'] + return get_attrs(k, attrs) + +class BotoJSONEncoder(json.JSONEncoder): + def default(self, obj): + if isinstance(obj, boto.s3.key.Key): + return KeyJSONEncoder.default(obj) + if isinstance(obj, boto.s3.deletemarker.DeleteMarker): + return DeleteMarkerJSONEncoder.default(obj) + if isinstance(obj, boto.s3.user.User): + return UserJSONEncoder.default(obj) + if isinstance(obj, boto.s3.prefix.Prefix): + return (lambda x: {'prefix': x.name})(obj) + if isinstance(obj, boto.s3.bucket.Bucket): + return BucketJSONEncoder.default(obj) + return json.JSONEncoder.default(self, obj) + + +def dump_json(o, cls=BotoJSONEncoder): + return json.dumps(o, cls=cls, indent=4) + +def assert_raises(excClass, callableObj, *args, **kwargs): + """ + Like unittest.TestCase.assertRaises, but returns the exception. + """ + try: + callableObj(*args, **kwargs) + except excClass as e: + return e + else: + if hasattr(excClass, '__name__'): + excName = excClass.__name__ + else: + excName = str(excClass) + raise AssertionError("%s not raised" % excName) + + diff --git a/src/test/rgw/rgw_multi/zone_az.py b/src/test/rgw/rgw_multi/zone_az.py new file mode 100644 index 000000000..f9cd43574 --- /dev/null +++ b/src/test/rgw/rgw_multi/zone_az.py @@ -0,0 +1,42 @@ +import logging + +from .multisite import Zone + + +log = logging.getLogger('rgw_multi.tests') + + +class AZone(Zone): # pylint: disable=too-many-ancestors + """ archive zone class """ + def __init__(self, name, zonegroup=None, cluster=None, data=None, zone_id=None, gateways=None): + super(AZone, self).__init__(name, zonegroup, cluster, data, zone_id, gateways) + + def is_read_only(self): + return False + + def tier_type(self): + return "archive" + + def create(self, cluster, args=None, **kwargs): + if args is None: + args = '' + args += ['--tier-type', self.tier_type()] + return self.json_command(cluster, 'create', args) + + def has_buckets(self): + return False + + def has_roles(self): + return True + +class AZoneConfig: + """ archive zone configuration """ + def __init__(self, cfg, section): + pass + + +def print_connection_info(conn): + """print info of connection""" + print("Host: " + conn.host+':'+str(conn.port)) + print("AWS Secret Key: " + conn.aws_secret_access_key) + print("AWS Access Key: " + conn.aws_access_key_id) diff --git a/src/test/rgw/rgw_multi/zone_cloud.py b/src/test/rgw/rgw_multi/zone_cloud.py new file mode 100644 index 000000000..dd5640cf2 --- /dev/null +++ b/src/test/rgw/rgw_multi/zone_cloud.py @@ -0,0 +1,326 @@ +import json +import requests.compat +import logging + +import boto +import boto.s3.connection + +import dateutil.parser +import datetime + +import re + +from nose.tools import eq_ as eq +from itertools import zip_longest # type: ignore +from urllib.parse import urlparse + +from .multisite import * +from .tools import * + +log = logging.getLogger(__name__) + +def get_key_ver(k): + if not k.version_id: + return 'null' + return k.version_id + +def unquote(s): + if s[0] == '"' and s[-1] == '"': + return s[1:-1] + return s + +def check_object_eq(k1, k2, check_extra = True): + assert k1 + assert k2 + log.debug('comparing key name=%s', k1.name) + eq(k1.name, k2.name) + eq(k1.metadata, k2.metadata) + # eq(k1.cache_control, k2.cache_control) + eq(k1.content_type, k2.content_type) + eq(k1.content_encoding, k2.content_encoding) + eq(k1.content_disposition, k2.content_disposition) + eq(k1.content_language, k2.content_language) + + eq(unquote(k1.etag), unquote(k2.etag)) + + mtime1 = dateutil.parser.parse(k1.last_modified) + mtime2 = dateutil.parser.parse(k2.last_modified) + log.debug('k1.last_modified=%s k2.last_modified=%s', k1.last_modified, k2.last_modified) + assert abs((mtime1 - mtime2).total_seconds()) < 1 # handle different time resolution + # if check_extra: + # eq(k1.owner.id, k2.owner.id) + # eq(k1.owner.display_name, k2.owner.display_name) + # eq(k1.storage_class, k2.storage_class) + eq(k1.size, k2.size) + eq(get_key_ver(k1), get_key_ver(k2)) + # eq(k1.encrypted, k2.encrypted) + +def make_request(conn, method, bucket, key, query_args, headers): + result = conn.make_request(method, bucket=bucket, key=key, query_args=query_args, headers=headers) + if result.status // 100 != 2: + raise boto.exception.S3ResponseError(result.status, result.reason, result.read()) + return result + +class CloudKey: + def __init__(self, zone_bucket, k): + self.zone_bucket = zone_bucket + + # we need two keys: when listing buckets, we get keys that only contain partial data + # but we need to have the full data so that we could use all the meta-rgwx- headers + # that are needed in order to create a correct representation of the object + self.key = k + self.rgwx_key = k # assuming k has all the meta info on, if not then we'll update it in update() + self.update() + + def update(self): + k = self.key + rk = self.rgwx_key + + self.size = rk.size + orig_name = rk.metadata.get('rgwx-source-key') + if not orig_name: + self.rgwx_key = self.zone_bucket.bucket.get_key(k.name, version_id = k.version_id) + rk = self.rgwx_key + orig_name = rk.metadata.get('rgwx-source-key') + + self.name = orig_name + self.version_id = rk.metadata.get('rgwx-source-version-id') + + ve = rk.metadata.get('rgwx-versioned-epoch') + if ve: + self.versioned_epoch = int(ve) + else: + self.versioned_epoch = 0 + + mt = rk.metadata.get('rgwx-source-mtime') + if mt: + self.last_modified = datetime.datetime.utcfromtimestamp(float(mt)).strftime('%a, %d %b %Y %H:%M:%S GMT') + else: + self.last_modified = k.last_modified + + et = rk.metadata.get('rgwx-source-etag') + if rk.etag.find('-') >= 0 or et.find('-') >= 0: + # in this case we will use the source etag as it was uploaded via multipart upload + # in one of the zones, so there's no way to make sure etags are calculated the same + # way. In the other case we'd just want to keep the etag that was generated in the + # regular upload mechanism, which should be consistent in both ends + self.etag = et + else: + self.etag = rk.etag + + if k.etag[0] == '"' and self.etag[0] != '"': # inconsistent etag quoting when listing bucket vs object get + self.etag = '"' + self.etag + '"' + + new_meta = {} + for meta_key, meta_val in k.metadata.items(): + if not meta_key.startswith('rgwx-'): + new_meta[meta_key] = meta_val + + self.metadata = new_meta + + self.cache_control = k.cache_control + self.content_type = k.content_type + self.content_encoding = k.content_encoding + self.content_disposition = k.content_disposition + self.content_language = k.content_language + + + def get_contents_as_string(self, encoding=None): + r = self.key.get_contents_as_string(encoding=encoding) + + # the previous call changed the status of the source object, as it loaded + # its metadata + + self.rgwx_key = self.key + self.update() + + return r + + +class CloudZoneBucket: + def __init__(self, zone_conn, target_path, name): + self.zone_conn = zone_conn + self.name = name + self.cloud_conn = zone_conn.zone.cloud_conn + + target_path = target_path[:] + if target_path[-1] != '/': + target_path += '/' + target_path = target_path.replace('${bucket}', name) + + tp = target_path.split('/', 1) + + if len(tp) == 1: + self.target_bucket = target_path + self.target_prefix = '' + else: + self.target_bucket = tp[0] + self.target_prefix = tp[1] + + log.debug('target_path=%s target_bucket=%s target_prefix=%s', target_path, self.target_bucket, self.target_prefix) + self.bucket = self.cloud_conn.get_bucket(self.target_bucket) + + def get_all_versions(self): + l = [] + + for k in self.bucket.get_all_keys(prefix=self.target_prefix): + new_key = CloudKey(self, k) + + log.debug('appending o=[\'%s\', \'%s\', \'%d\']', new_key.name, new_key.version_id, new_key.versioned_epoch) + l.append(new_key) + + + sort_key = lambda k: (k.name, -k.versioned_epoch) + l.sort(key = sort_key) + + for new_key in l: + yield new_key + + def get_key(self, name, version_id=None): + return CloudKey(self, self.bucket.get_key(name, version_id=version_id)) + + +def parse_endpoint(endpoint): + o = urlparse(endpoint) + + netloc = o.netloc.split(':') + + host = netloc[0] + + if len(netloc) > 1: + port = int(netloc[1]) + else: + port = o.port + + is_secure = False + + if o.scheme == 'https': + is_secure = True + + if not port: + if is_secure: + port = 443 + else: + port = 80 + + return host, port, is_secure + + +class CloudZone(Zone): + def __init__(self, name, cloud_endpoint, credentials, source_bucket, target_path, + zonegroup = None, cluster = None, data = None, zone_id = None, gateways = None): + self.cloud_endpoint = cloud_endpoint + self.credentials = credentials + self.source_bucket = source_bucket + self.target_path = target_path + + self.target_path = self.target_path.replace('${zone}', name) + # self.target_path = self.target_path.replace('${zone_id}', zone_id) + self.target_path = self.target_path.replace('${zonegroup}', zonegroup.name) + self.target_path = self.target_path.replace('${zonegroup_id}', zonegroup.id) + + log.debug('target_path=%s', self.target_path) + + host, port, is_secure = parse_endpoint(cloud_endpoint) + + self.cloud_conn = boto.connect_s3( + aws_access_key_id = credentials.access_key, + aws_secret_access_key = credentials.secret, + host = host, + port = port, + is_secure = is_secure, + calling_format = boto.s3.connection.OrdinaryCallingFormat()) + super(CloudZone, self).__init__(name, zonegroup, cluster, data, zone_id, gateways) + + + def is_read_only(self): + return True + + def tier_type(self): + return "cloud" + + def create(self, cluster, args = None, check_retcode = True): + """ create the object with the given arguments """ + + if args is None: + args = '' + + tier_config = ','.join([ 'connection.endpoint=' + self.cloud_endpoint, + 'connection.access_key=' + self.credentials.access_key, + 'connection.secret=' + self.credentials.secret, + 'target_path=' + re.escape(self.target_path)]) + + args += [ '--tier-type', self.tier_type(), '--tier-config', tier_config ] + + return self.json_command(cluster, 'create', args, check_retcode=check_retcode) + + def has_buckets(self): + return False + + def has_roles(self): + return False + + class Conn(ZoneConn): + def __init__(self, zone, credentials): + super(CloudZone.Conn, self).__init__(zone, credentials) + + def get_bucket(self, bucket_name): + return CloudZoneBucket(self, self.zone.target_path, bucket_name) + + def create_bucket(self, name): + # should not be here, a bug in the test suite + log.critical('Conn.create_bucket() should not be called in cloud zone') + assert False + + def check_bucket_eq(self, zone_conn, bucket_name): + assert(zone_conn.zone.tier_type() == "rados") + + log.info('comparing bucket=%s zones={%s, %s}', bucket_name, self.name, self.name) + b1 = self.get_bucket(bucket_name) + b2 = zone_conn.get_bucket(bucket_name) + + log.debug('bucket1 objects:') + for o in b1.get_all_versions(): + log.debug('o=%s', o.name) + log.debug('bucket2 objects:') + for o in b2.get_all_versions(): + log.debug('o=%s', o.name) + + for k1, k2 in zip_longest(b1.get_all_versions(), b2.get_all_versions()): + if k1 is None: + log.critical('key=%s is missing from zone=%s', k2.name, self.name) + assert False + if k2 is None: + log.critical('key=%s is missing from zone=%s', k1.name, zone_conn.name) + assert False + + check_object_eq(k1, k2) + + + log.info('success, bucket identical: bucket=%s zones={%s, %s}', bucket_name, self.name, zone_conn.name) + + return True + + def create_role(self, path, rolename, policy_document, tag_list): + assert False + + def get_conn(self, credentials): + return self.Conn(self, credentials) + + +class CloudZoneConfig: + def __init__(self, cfg, section): + self.endpoint = cfg.get(section, 'endpoint') + access_key = cfg.get(section, 'access_key') + secret = cfg.get(section, 'secret') + self.credentials = Credentials(access_key, secret) + try: + self.target_path = cfg.get(section, 'target_path') + except: + self.target_path = 'rgw-${zonegroup_id}/${bucket}' + + try: + self.source_bucket = cfg.get(section, 'source_bucket') + except: + self.source_bucket = '*' + diff --git a/src/test/rgw/rgw_multi/zone_es.py b/src/test/rgw/rgw_multi/zone_es.py new file mode 100644 index 000000000..e98b3fdd8 --- /dev/null +++ b/src/test/rgw/rgw_multi/zone_es.py @@ -0,0 +1,256 @@ +import json +import requests.compat +import logging + +import boto +import boto.s3.connection + +import dateutil.parser + +from nose.tools import eq_ as eq +from itertools import zip_longest # type: ignore + +from .multisite import * +from .tools import * + +log = logging.getLogger(__name__) + +def get_key_ver(k): + if not k.version_id: + return 'null' + return k.version_id + +def check_object_eq(k1, k2, check_extra = True): + assert k1 + assert k2 + log.debug('comparing key name=%s', k1.name) + eq(k1.name, k2.name) + eq(k1.metadata, k2.metadata) + # eq(k1.cache_control, k2.cache_control) + eq(k1.content_type, k2.content_type) + # eq(k1.content_encoding, k2.content_encoding) + # eq(k1.content_disposition, k2.content_disposition) + # eq(k1.content_language, k2.content_language) + eq(k1.etag, k2.etag) + mtime1 = dateutil.parser.parse(k1.last_modified) + mtime2 = dateutil.parser.parse(k2.last_modified) + assert abs((mtime1 - mtime2).total_seconds()) < 1 # handle different time resolution + if check_extra: + eq(k1.owner.id, k2.owner.id) + eq(k1.owner.display_name, k2.owner.display_name) + # eq(k1.storage_class, k2.storage_class) + eq(k1.size, k2.size) + eq(get_key_ver(k1), get_key_ver(k2)) + # eq(k1.encrypted, k2.encrypted) + +def make_request(conn, method, bucket, key, query_args, headers): + result = conn.make_request(method, bucket=bucket, key=key, query_args=query_args, headers=headers) + if result.status // 100 != 2: + raise boto.exception.S3ResponseError(result.status, result.reason, result.read()) + return result + + +class MDSearch: + def __init__(self, conn, bucket_name, query, query_args = None, marker = None): + self.conn = conn + self.bucket_name = bucket_name or '' + if bucket_name: + self.bucket = boto.s3.bucket.Bucket(name=bucket_name) + else: + self.bucket = None + self.query = query + self.query_args = query_args + self.max_keys = None + self.marker = marker + + def raw_search(self): + q = self.query or '' + query_args = append_query_arg(self.query_args, 'query', requests.compat.quote_plus(q)) + if self.max_keys is not None: + query_args = append_query_arg(query_args, 'max-keys', self.max_keys) + if self.marker: + query_args = append_query_arg(query_args, 'marker', self.marker) + + query_args = append_query_arg(query_args, 'format', 'json') + + headers = {} + + result = make_request(self.conn, "GET", bucket=self.bucket_name, key='', query_args=query_args, headers=headers) + + l = [] + + result_dict = json.loads(result.read()) + + for entry in result_dict['Objects']: + bucket = self.conn.get_bucket(entry['Bucket'], validate = False) + k = boto.s3.key.Key(bucket, entry['Key']) + + k.version_id = entry['Instance'] + k.etag = entry['ETag'] + k.owner = boto.s3.user.User(id=entry['Owner']['ID'], display_name=entry['Owner']['DisplayName']) + k.last_modified = entry['LastModified'] + k.size = entry['Size'] + k.content_type = entry['ContentType'] + k.versioned_epoch = entry['VersionedEpoch'] + + k.metadata = {} + for e in entry['CustomMetadata']: + k.metadata[e['Name']] = str(e['Value']) # int values will return as int, cast to string for compatibility with object meta response + + l.append(k) + + return result_dict, l + + def search(self, drain = True, sort = True, sort_key = None): + l = [] + + is_done = False + + while not is_done: + result, result_keys = self.raw_search() + + l = l + result_keys + + is_done = not (drain and (result['IsTruncated'] == "true")) + marker = result['Marker'] + + if sort: + if not sort_key: + sort_key = lambda k: (k.name, -k.versioned_epoch) + l.sort(key = sort_key) + + return l + + +class MDSearchConfig: + def __init__(self, conn, bucket_name): + self.conn = conn + self.bucket_name = bucket_name or '' + if bucket_name: + self.bucket = boto.s3.bucket.Bucket(name=bucket_name) + else: + self.bucket = None + + def send_request(self, conf, method): + query_args = 'mdsearch' + headers = None + if conf: + headers = { 'X-Amz-Meta-Search': conf } + + query_args = append_query_arg(query_args, 'format', 'json') + + return make_request(self.conn, method, bucket=self.bucket_name, key='', query_args=query_args, headers=headers) + + def get_config(self): + result = self.send_request(None, 'GET') + return json.loads(result.read()) + + def set_config(self, conf): + self.send_request(conf, 'POST') + + def del_config(self): + self.send_request(None, 'DELETE') + + +class ESZoneBucket: + def __init__(self, zone_conn, name, conn): + self.zone_conn = zone_conn + self.name = name + self.conn = conn + + self.bucket = boto.s3.bucket.Bucket(name=name) + + def get_all_versions(self): + + marker = None + is_done = False + + req = MDSearch(self.conn, self.name, 'bucket == ' + self.name, marker=marker) + + for k in req.search(): + yield k + + + + +class ESZone(Zone): + def __init__(self, name, es_endpoint, zonegroup = None, cluster = None, data = None, zone_id = None, gateways = None): + self.es_endpoint = es_endpoint + super(ESZone, self).__init__(name, zonegroup, cluster, data, zone_id, gateways) + + def is_read_only(self): + return True + + def tier_type(self): + return "elasticsearch" + + def create(self, cluster, args = None, check_retcode = True): + """ create the object with the given arguments """ + + if args is None: + args = '' + + tier_config = ','.join([ 'endpoint=' + self.es_endpoint, 'explicit_custom_meta=false' ]) + + args += [ '--tier-type', self.tier_type(), '--tier-config', tier_config ] + + return self.json_command(cluster, 'create', args, check_retcode=check_retcode) + + def has_buckets(self): + return False + + def has_roles(self): + return False + + class Conn(ZoneConn): + def __init__(self, zone, credentials): + super(ESZone.Conn, self).__init__(zone, credentials) + + def get_bucket(self, bucket_name): + return ESZoneBucket(self, bucket_name, self.conn) + + def create_bucket(self, name): + # should not be here, a bug in the test suite + log.critical('Conn.create_bucket() should not be called in ES zone') + assert False + + def check_bucket_eq(self, zone_conn, bucket_name): + assert(zone_conn.zone.tier_type() == "rados") + + log.info('comparing bucket=%s zones={%s, %s}', bucket_name, self.name, self.name) + b1 = self.get_bucket(bucket_name) + b2 = zone_conn.get_bucket(bucket_name) + + log.debug('bucket1 objects:') + for o in b1.get_all_versions(): + log.debug('o=%s', o.name) + log.debug('bucket2 objects:') + for o in b2.get_all_versions(): + log.debug('o=%s', o.name) + + for k1, k2 in zip_longest(b1.get_all_versions(), b2.get_all_versions()): + if k1 is None: + log.critical('key=%s is missing from zone=%s', k2.name, self.name) + assert False + if k2 is None: + log.critical('key=%s is missing from zone=%s', k1.name, zone_conn.name) + assert False + + check_object_eq(k1, k2) + + + log.info('success, bucket identical: bucket=%s zones={%s, %s}', bucket_name, self.name, zone_conn.name) + + return True + + def create_role(self, path, rolename, policy_document, tag_list): + assert False + + def get_conn(self, credentials): + return self.Conn(self, credentials) + + +class ESZoneConfig: + def __init__(self, cfg, section): + self.endpoint = cfg.get(section, 'endpoint') + diff --git a/src/test/rgw/rgw_multi/zone_rados.py b/src/test/rgw/rgw_multi/zone_rados.py new file mode 100644 index 000000000..ac4edd004 --- /dev/null +++ b/src/test/rgw/rgw_multi/zone_rados.py @@ -0,0 +1,134 @@ +import logging +from boto.s3.deletemarker import DeleteMarker + +from itertools import zip_longest # type: ignore + +from nose.tools import eq_ as eq + +from .multisite import * + +log = logging.getLogger(__name__) + +def check_object_eq(k1, k2, check_extra = True): + assert k1 + assert k2 + log.debug('comparing key name=%s', k1.name) + eq(k1.name, k2.name) + eq(k1.version_id, k2.version_id) + eq(k1.is_latest, k2.is_latest) + eq(k1.last_modified, k2.last_modified) + if isinstance(k1, DeleteMarker): + assert isinstance(k2, DeleteMarker) + return + + eq(k1.get_contents_as_string(), k2.get_contents_as_string()) + eq(k1.metadata, k2.metadata) + eq(k1.cache_control, k2.cache_control) + eq(k1.content_type, k2.content_type) + eq(k1.content_encoding, k2.content_encoding) + eq(k1.content_disposition, k2.content_disposition) + eq(k1.content_language, k2.content_language) + eq(k1.etag, k2.etag) + if check_extra: + eq(k1.owner.id, k2.owner.id) + eq(k1.owner.display_name, k2.owner.display_name) + eq(k1.storage_class, k2.storage_class) + eq(k1.size, k2.size) + eq(k1.encrypted, k2.encrypted) + +class RadosZone(Zone): + def __init__(self, name, zonegroup = None, cluster = None, data = None, zone_id = None, gateways = None): + super(RadosZone, self).__init__(name, zonegroup, cluster, data, zone_id, gateways) + + def tier_type(self): + return "rados" + + + class Conn(ZoneConn): + def __init__(self, zone, credentials): + super(RadosZone.Conn, self).__init__(zone, credentials) + + def get_bucket(self, name): + return self.conn.get_bucket(name) + + def create_bucket(self, name): + return self.conn.create_bucket(name) + + def delete_bucket(self, name): + return self.conn.delete_bucket(name) + + def check_bucket_eq(self, zone_conn, bucket_name): + log.info('comparing bucket=%s zones={%s, %s}', bucket_name, self.name, zone_conn.name) + b1 = self.get_bucket(bucket_name) + b2 = zone_conn.get_bucket(bucket_name) + + b1_versions = b1.list_versions() + log.debug('bucket1 objects:') + for o in b1_versions: + log.debug('o=%s', o.name) + + b2_versions = b2.list_versions() + log.debug('bucket2 objects:') + for o in b2_versions: + log.debug('o=%s', o.name) + + for k1, k2 in zip_longest(b1_versions, b2_versions): + if k1 is None: + log.critical('key=%s is missing from zone=%s', k2.name, self.name) + assert False + if k2 is None: + log.critical('key=%s is missing from zone=%s', k1.name, zone_conn.name) + assert False + + check_object_eq(k1, k2) + + if isinstance(k1, DeleteMarker): + # verify that HEAD sees a delete marker + assert b1.get_key(k1.name) is None + assert b2.get_key(k2.name) is None + else: + # now get the keys through a HEAD operation, verify that the available data is the same + k1_head = b1.get_key(k1.name, version_id=k1.version_id) + k2_head = b2.get_key(k2.name, version_id=k2.version_id) + check_object_eq(k1_head, k2_head, False) + + if k1.version_id: + # compare the olh to make sure they agree about the current version + k1_olh = b1.get_key(k1.name) + k2_olh = b2.get_key(k2.name) + # if there's a delete marker, HEAD will return None + if k1_olh or k2_olh: + check_object_eq(k1_olh, k2_olh, False) + + log.info('success, bucket identical: bucket=%s zones={%s, %s}', bucket_name, self.name, zone_conn.name) + + return True + + def get_role(self, role_name): + return self.iam_conn.get_role(role_name) + + def check_role_eq(self, zone_conn, role_name): + log.info('comparing role=%s zones={%s, %s}', role_name, self.name, zone_conn.name) + r1 = self.get_role(role_name) + r2 = zone_conn.get_role(role_name) + + assert r1 + assert r2 + log.debug('comparing role name=%s', r1['get_role_response']['get_role_result']['role']['role_name']) + eq(r1['get_role_response']['get_role_result']['role']['role_name'], r2['get_role_response']['get_role_result']['role']['role_name']) + eq(r1['get_role_response']['get_role_result']['role']['role_id'], r2['get_role_response']['get_role_result']['role']['role_id']) + eq(r1['get_role_response']['get_role_result']['role']['path'], r2['get_role_response']['get_role_result']['role']['path']) + eq(r1['get_role_response']['get_role_result']['role']['arn'], r2['get_role_response']['get_role_result']['role']['arn']) + eq(r1['get_role_response']['get_role_result']['role']['max_session_duration'], r2['get_role_response']['get_role_result']['role']['max_session_duration']) + eq(r1['get_role_response']['get_role_result']['role']['assume_role_policy_document'], r2['get_role_response']['get_role_result']['role']['assume_role_policy_document']) + + log.info('success, role identical: role=%s zones={%s, %s}', role_name, self.name, zone_conn.name) + + return True + + def create_role(self, path, rolename, policy_document, tag_list): + return self.iam_conn.create_role(rolename, policy_document, path) + + def get_conn(self, credentials): + return self.Conn(self, credentials) + diff --git a/src/test/rgw/test-ceph-diff-sorted.sh b/src/test/rgw/test-ceph-diff-sorted.sh new file mode 100755 index 000000000..dddf4ae1b --- /dev/null +++ b/src/test/rgw/test-ceph-diff-sorted.sh @@ -0,0 +1,108 @@ +#!/usr/bin/env bash + +# set -e -x + +. "`dirname $0`/test-rgw-common.sh" + +temp_prefix="/tmp/`basename $0`-$$" + +short=${temp_prefix}-short +short_w_blank=${temp_prefix}-short-w-blank +long=${temp_prefix}-long +unsorted=${temp_prefix}-unsorted +empty=${temp_prefix}-empty +fake=${temp_prefix}-fake + +out1=${temp_prefix}-out1 +out2=${temp_prefix}-out2 + +cat >"${short}" <<EOF +bear +fox +hippo +zebra +EOF + +cat >"${short_w_blank}" <<EOF +bear +fox +hippo + +zebra +EOF + +cat >"${long}" <<EOF +badger +cuttlefish +fox +llama +octopus +penguine +seal +squid +whale +yak +zebra +EOF + +cat >"${unsorted}" <<EOF +bear +hippo +fox +zebra +EOF + +touch $empty + +#### testing #### + +# test perfect match +ceph-diff-sorted $long $long >"${out1}" +$assert $? -eq 0 +$assert $(cat $out1 | wc -l) -eq 0 + +# test non-match; use /bin/diff to verify +/bin/diff $short $long >"${out2}" +ceph-diff-sorted $short $long >"${out1}" +$assert $? -eq 1 +$assert $(cat $out1 | grep '^<' | wc -l) -eq $(cat $out2 | grep '^<' | wc -l) +$assert $(cat $out1 | grep '^>' | wc -l) -eq $(cat $out2 | grep '^>' | wc -l) + +/bin/diff $long $short >"${out2}" +ceph-diff-sorted $long $short >"${out1}" +$assert $? -eq 1 +$assert $(cat $out1 | grep '^<' | wc -l) -eq $(cat $out2 | grep '^<' | wc -l) +$assert $(cat $out1 | grep '^>' | wc -l) -eq $(cat $out2 | grep '^>' | wc -l) + +# test w blank line +ceph-diff-sorted $short $short_w_blank 2>/dev/null +$assert $? -eq 4 + +ceph-diff-sorted $short_w_blank $short 2>/dev/null +$assert $? -eq 4 + +# test unsorted input +ceph-diff-sorted $short $unsorted >"${out2}" 2>/dev/null +$assert $? -eq 4 + +ceph-diff-sorted $unsorted $short >"${out2}" 2>/dev/null +$assert $? -eq 4 + +# test bad # of args +ceph-diff-sorted 2>/dev/null +$assert $? -eq 2 + +ceph-diff-sorted $short 2>/dev/null +$assert $? -eq 2 + +# test bad file path + +ceph-diff-sorted $short $fake 2>/dev/null +$assert $? -eq 3 + +ceph-diff-sorted $fake $short 2>/dev/null +$assert $? -eq 3 + +#### clean-up #### + +/bin/rm -f $short $short_w_blank $long $unsorted $empty $out1 $out2 diff --git a/src/test/rgw/test-rgw-call.sh b/src/test/rgw/test-rgw-call.sh new file mode 100755 index 000000000..49399ebc3 --- /dev/null +++ b/src/test/rgw/test-rgw-call.sh @@ -0,0 +1,9 @@ +#!/usr/bin/env bash + +. "`dirname $0`/test-rgw-common.sh" +. "`dirname $0`/test-rgw-meta-sync.sh" + +# Do not use eval here. We have eval in test-rgw-common.sh:x(), so adding +# one here creates a double-eval situation. Passing arguments with spaces +# becomes impossible when double-eval strips escaping and quotes. +$@ diff --git a/src/test/rgw/test-rgw-common.sh b/src/test/rgw/test-rgw-common.sh new file mode 100644 index 000000000..da263c6d9 --- /dev/null +++ b/src/test/rgw/test-rgw-common.sh @@ -0,0 +1,195 @@ +#!/usr/bin/env bash + +rgw_flags="--debug-rgw=20 --debug-ms=1" + +function _assert { + src=$1; shift + lineno=$1; shift + [ "$@" ] || echo "$src: $lineno: assert failed: $@" || exit 1 +} + +assert="eval _assert \$BASH_SOURCE \$LINENO" + +function var_to_python_json_index { + echo "['$1']" | sed "s/\./'\]\['/g" +} + +function json_extract { +var="" +[ "$1" != "" ] && var=$(var_to_python_json_index $1) +shift +python3 - <<END +import json +s='$@' +data = json.loads(s) +print(data$var) +END +} + +function python_array_len { +python3 - <<END +arr=$@ +print(len(arr)) +END +} + +function project_python_array_field { +var=$(var_to_python_json_index $1) +shift +python3 - <<END +arr=$@ +s='( ' +for x in arr: + s += '"' + str(x$var) + '" ' +s += ')' +print(s) +END +} + + +x() { + # echo "x " "$@" >&2 + eval "$@" +} + + +script_dir=`dirname $0` +root_path=`(cd $script_dir/../..; pwd)` + +mstart=$root_path/mstart.sh +mstop=$root_path/mstop.sh +mrun=$root_path/mrun +mrgw=$root_path/mrgw.sh + +url=http://localhost + +function start_ceph_cluster { + [ $# -ne 1 ] && echo "start_ceph_cluster() needs 1 param" && exit 1 + + echo "$mstart $1" +} + +function rgw_admin { + [ $# -lt 1 ] && echo "rgw_admin() needs 1 param" && exit 1 + + echo "$mrun $1 radosgw-admin" +} + +function rgw { + [ $# -lt 2 ] && echo "rgw() needs at least 2 params" && exit 1 + + name=$1 + port=$2 + ssl_port=0 #ssl port not used + shift 2 + + echo "$mrgw $name $port $ssl_port $rgw_flags $@" +} + +function init_first_zone { + [ $# -ne 7 ] && echo "init_first_zone() needs 7 params" && exit 1 + + cid=$1 + realm=$2 + zg=$3 + zone=$4 + endpoints=$5 + + access_key=$6 + secret=$7 + +# initialize realm + x $(rgw_admin $cid) realm create --rgw-realm=$realm + +# create zonegroup, zone + x $(rgw_admin $cid) zonegroup create --rgw-zonegroup=$zg --master --default + x $(rgw_admin $cid) zone create --rgw-zonegroup=$zg --rgw-zone=$zone --access-key=${access_key} --secret=${secret} --endpoints=$endpoints --default + x $(rgw_admin $cid) user create --uid=zone.user --display-name=ZoneUser --access-key=${access_key} --secret=${secret} --system + + x $(rgw_admin $cid) period update --commit +} + +function init_zone_in_existing_zg { + [ $# -ne 8 ] && echo "init_zone_in_existing_zg() needs 8 params" && exit 1 + + cid=$1 + realm=$2 + zg=$3 + zone=$4 + master_zg_zone1_port=$5 + endpoints=$6 + + access_key=$7 + secret=$8 + + x $(rgw_admin $cid) realm pull --url=$url:$master_zg_zone1_port --access-key=${access_key} --secret=${secret} --default + x $(rgw_admin $cid) zonegroup default --rgw-zonegroup=$zg + x $(rgw_admin $cid) zone create --rgw-zonegroup=$zg --rgw-zone=$zone --access-key=${access_key} --secret=${secret} --endpoints=$endpoints + x $(rgw_admin $cid) period update --commit +} + +function init_first_zone_in_slave_zg { + [ $# -ne 8 ] && echo "init_first_zone_in_slave_zg() needs 8 params" && exit 1 + + cid=$1 + realm=$2 + zg=$3 + zone=$4 + master_zg_zone1_port=$5 + endpoints=$6 + + access_key=$7 + secret=$8 + +# create zonegroup, zone + x $(rgw_admin $cid) realm pull --url=$url:$master_zg_zone1_port --access-key=${access_key} --secret=${secret} + x $(rgw_admin $cid) realm default --rgw-realm=$realm + x $(rgw_admin $cid) zonegroup create --rgw-realm=$realm --rgw-zonegroup=$zg --endpoints=$endpoints --default + x $(rgw_admin $cid) zonegroup default --rgw-zonegroup=$zg + + x $(rgw_admin $cid) zone create --rgw-zonegroup=$zg --rgw-zone=$zone --access-key=${access_key} --secret=${secret} --endpoints=$endpoints + x $(rgw_admin $cid) zone default --rgw-zone=$zone + x $(rgw_admin $cid) zonegroup add --rgw-zonegroup=$zg --rgw-zone=$zone + + x $(rgw_admin $cid) period update --commit + +} + +function call_rgw_admin { + cid=$1 + shift 1 + x $(rgw_admin $cid) "$@" +} + +function get_mstart_parameters { + [ $# -ne 1 ] && echo "get_mstart_parameters() needs 1 param" && exit 1 + # bash arrays start from zero + index="$1" + index=$((index-1)) + if [ -n "$DEV_LIST" ]; then + IFS=', ' read -r -a dev_list <<< "$DEV_LIST" + if [ ${#dev_list[@]} -gt "$index" ]; then + local dev_name=${dev_list["$index"]} + parameters="--bluestore-devs $dev_name" + fi + fi + + if [ -n "$DB_DEV_LIST" ]; then + IFS=', ' read -r -a db_dev_list <<< "$DB_DEV_LIST" + if [ ${#db_dev_list[@]} -gt "$index" ]; then + local dev_name=${db_dev_list["$index"]} + parameters="$parameters"" -o bluestore_block_db_path=$dev_name" + fi + fi + + if [ -n "$WAL_DEV_LIST" ]; then + IFS=', ' read -r -a wal_dev_list <<< "$WAL_DEV_LIST" + if [ ${#wal_dev_list[@]} -gt "$index" ]; then + local dev_name=${wal_dev_list["$index"]} + parameters="$parameters"" -o bluestore_block_wal_path=$dev_name" + fi + fi + + echo "$parameters" +} + diff --git a/src/test/rgw/test-rgw-meta-sync.sh b/src/test/rgw/test-rgw-meta-sync.sh new file mode 100755 index 000000000..18f425298 --- /dev/null +++ b/src/test/rgw/test-rgw-meta-sync.sh @@ -0,0 +1,65 @@ +#!/usr/bin/env bash + +. "`dirname $0`/test-rgw-common.sh" + +set -e + +function get_metadata_sync_status { + cid=$1 + realm=$2 + + meta_sync_status_json=`$(rgw_admin $cid) --rgw-realm=$realm metadata sync status` + + global_sync_status=$(json_extract sync_status.info.status $meta_sync_status_json) + num_shards=$(json_extract sync_status.info.num_shards $meta_sync_status_json) + + echo "sync_status: $global_sync_status" + + sync_markers=$(json_extract sync_status.markers $meta_sync_status_json) + + num_shards2=$(python_array_len $sync_markers) + + [ "$global_sync_status" == "sync" ] && $assert $num_shards2 -eq $num_shards + + sync_states=$(project_python_array_field val.state $sync_markers) + eval secondary_status=$(project_python_array_field val.marker $sync_markers) +} + +function get_metadata_log_status { + cid=$1 + realm=$2 + + master_mdlog_status_json=`$(rgw_admin $cid) --rgw-realm=$realm mdlog status` + master_meta_status=$(json_extract "" $master_mdlog_status_json) + + eval master_status=$(project_python_array_field marker $master_meta_status) +} + +function wait_for_meta_sync { + master_id=$1 + cid=$2 + realm=$3 + + get_metadata_log_status $master_id $realm + echo "master_status=${master_status[*]}" + + while true; do + get_metadata_sync_status $cid $realm + + echo "secondary_status=${secondary_status[*]}" + + fail=0 + for i in `seq 0 $((num_shards-1))`; do + if [ "${master_status[$i]}" \> "${secondary_status[$i]}" ]; then + echo "shard $i not done syncing (${master_status[$i]} > ${secondary_status[$i]})" + fail=1 + break + fi + done + + [ $fail -eq 0 ] && echo "Success" && return || echo "Sync not complete" + + sleep 5 + done +} + diff --git a/src/test/rgw/test-rgw-multisite.sh b/src/test/rgw/test-rgw-multisite.sh new file mode 100755 index 000000000..a005b19e3 --- /dev/null +++ b/src/test/rgw/test-rgw-multisite.sh @@ -0,0 +1,83 @@ +#!/usr/bin/env bash + +[ $# -lt 1 ] && echo "usage: $0 <num-clusters> [rgw parameters...]" && exit 1 + +num_clusters=$1 +shift + +[ $num_clusters -lt 1 ] && echo "clusters num must be at least 1" && exit 1 + +. "`dirname $0`/test-rgw-common.sh" +. "`dirname $0`/test-rgw-meta-sync.sh" + +set -e + +realm_name=earth +zg=zg1 + +system_access_key="1234567890" +system_secret="pencil" + +# bring up first cluster +x $(start_ceph_cluster c1) -n $(get_mstart_parameters 1) + +if [ -n "$RGW_PER_ZONE" ]; then + rgws="$RGW_PER_ZONE" +else + rgws=1 +fi + +url=http://localhost + +i=1 +while [ $i -le $rgws ]; do + port=$((8100+i)) + endpoints="$endpoints""$url:$port," + i=$((i+1)) +done + +# create realm, zonegroup, zone, start rgws +init_first_zone c1 $realm_name $zg ${zg}-1 $endpoints $system_access_key $system_secret +i=1 +while [ $i -le $rgws ]; do + port=$((8100+i)) + x $(rgw c1 "$port" "$@") + i="$((i+1))" +done + +output=`$(rgw_admin c1) realm get` + +echo realm_status=$output + +# bring up next clusters + +endpoints="" +i=2 +while [ $i -le $num_clusters ]; do + x $(start_ceph_cluster c$i) -n $(get_mstart_parameters $i) + j=1 + endpoints="" + while [ $j -le $rgws ]; do + port=$((8000+i*100+j)) + endpoints="$endpoints""$url:$port," + j=$((j+1)) + done + + # create new zone, start rgw + init_zone_in_existing_zg c$i $realm_name $zg ${zg}-${i} 8101 $endpoints $zone_port $system_access_key $system_secret + j=1 + while [ $j -le $rgws ]; do + port=$((8000+i*100+j)) + x $(rgw c$i "$port" "$@") + j="$((j+1))" + done + i=$((i+1)) +done + +i=2 +while [ $i -le $num_clusters ]; do + wait_for_meta_sync c1 c$i $realm_name + + i=$((i+1)) +done + diff --git a/src/test/rgw/test_cls_fifo_legacy.cc b/src/test/rgw/test_cls_fifo_legacy.cc new file mode 100644 index 000000000..1fa5f8681 --- /dev/null +++ b/src/test/rgw/test_cls_fifo_legacy.cc @@ -0,0 +1,1184 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab +/* + * Ceph - scalable distributed file system + * + * Copyright (C) 2019 Red Hat, Inc. + * + * This is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License version 2.1, as published by the Free Software + * Foundation. See file COPYING. + * + */ + +#include <cerrno> +#include <iostream> +#include <string_view> + +#include "include/scope_guard.h" +#include "include/types.h" +#include "include/rados/librados.hpp" +#include "common/ceph_context.h" + +#include "cls/fifo/cls_fifo_ops.h" +#include "test/librados/test_cxx.h" +#include "global/global_context.h" + +#include "rgw_tools.h" +#include "cls_fifo_legacy.h" + +#include "gtest/gtest.h" + +using namespace std::literals; +using namespace std::string_literals; + +namespace R = librados; +namespace cb = ceph::buffer; +namespace fifo = rados::cls::fifo; +namespace RCf = rgw::cls::fifo; + +auto cct = new CephContext(CEPH_ENTITY_TYPE_CLIENT); +const DoutPrefix dp(cct, 1, "test legacy cls fifo: "); + +namespace { +int fifo_create(const DoutPrefixProvider *dpp, R::IoCtx& ioctx, + const std::string& oid, + std::string_view id, + optional_yield y, + std::optional<fifo::objv> objv = std::nullopt, + std::optional<std::string_view> oid_prefix = std::nullopt, + bool exclusive = false, + std::uint64_t max_part_size = RCf::default_max_part_size, + std::uint64_t max_entry_size = RCf::default_max_entry_size) +{ + R::ObjectWriteOperation op; + RCf::create_meta(&op, id, objv, oid_prefix, exclusive, max_part_size, + max_entry_size); + return rgw_rados_operate(dpp, ioctx, oid, &op, y); +} +} + +class LegacyFIFO : public testing::Test { +protected: + const std::string pool_name = get_temp_pool_name(); + const std::string fifo_id = "fifo"; + R::Rados rados; + librados::IoCtx ioctx; + + void SetUp() override { + ASSERT_EQ("", create_one_pool_pp(pool_name, rados)); + ASSERT_EQ(0, rados.ioctx_create(pool_name.c_str(), ioctx)); + } + void TearDown() override { + destroy_one_pool_pp(pool_name, rados); + } +}; + +using LegacyClsFIFO = LegacyFIFO; +using AioLegacyFIFO = LegacyFIFO; + + +TEST_F(LegacyClsFIFO, TestCreate) +{ + auto r = fifo_create(&dp, ioctx, fifo_id, ""s, null_yield); + EXPECT_EQ(-EINVAL, r); + r = fifo_create(&dp, ioctx, fifo_id, fifo_id, null_yield, std::nullopt, + std::nullopt, false, 0); + EXPECT_EQ(-EINVAL, r); + r = fifo_create(&dp, ioctx, fifo_id, {}, null_yield, + std::nullopt, std::nullopt, + false, RCf::default_max_part_size, 0); + EXPECT_EQ(-EINVAL, r); + r = fifo_create(&dp, ioctx, fifo_id, fifo_id, null_yield); + EXPECT_EQ(0, r); + std::uint64_t size; + ioctx.stat(fifo_id, &size, nullptr); + EXPECT_GT(size, 0); + /* test idempotency */ + r = fifo_create(&dp, ioctx, fifo_id, fifo_id, null_yield); + EXPECT_EQ(0, r); + r = fifo_create(&dp, ioctx, fifo_id, {}, null_yield, std::nullopt, + std::nullopt, false); + EXPECT_EQ(-EINVAL, r); + r = fifo_create(&dp, ioctx, fifo_id, {}, null_yield, std::nullopt, + "myprefix"sv, false); + EXPECT_EQ(-EINVAL, r); + r = fifo_create(&dp, ioctx, fifo_id, "foo"sv, null_yield, + std::nullopt, std::nullopt, false); + EXPECT_EQ(-EEXIST, r); +} + +TEST_F(LegacyClsFIFO, TestGetInfo) +{ + auto r = fifo_create(&dp, ioctx, fifo_id, fifo_id, null_yield); + fifo::info info; + std::uint32_t part_header_size; + std::uint32_t part_entry_overhead; + r = RCf::get_meta(&dp, ioctx, fifo_id, std::nullopt, &info, &part_header_size, + &part_entry_overhead, 0, null_yield); + EXPECT_EQ(0, r); + EXPECT_GT(part_header_size, 0); + EXPECT_GT(part_entry_overhead, 0); + EXPECT_FALSE(info.version.instance.empty()); + + r = RCf::get_meta(&dp, ioctx, fifo_id, info.version, &info, &part_header_size, + &part_entry_overhead, 0, null_yield); + EXPECT_EQ(0, r); + fifo::objv objv; + objv.instance = "foo"; + objv.ver = 12; + r = RCf::get_meta(&dp, ioctx, fifo_id, objv, &info, &part_header_size, + &part_entry_overhead, 0, null_yield); + EXPECT_EQ(-ECANCELED, r); +} + +TEST_F(LegacyFIFO, TestOpenDefault) +{ + std::unique_ptr<RCf::FIFO> fifo; + auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &fifo, null_yield); + ASSERT_EQ(0, r); + // force reading from backend + r = fifo->read_meta(&dp, null_yield); + EXPECT_EQ(0, r); + auto info = fifo->meta(); + EXPECT_EQ(info.id, fifo_id); +} + +TEST_F(LegacyFIFO, TestOpenParams) +{ + const std::uint64_t max_part_size = 10 * 1024; + const std::uint64_t max_entry_size = 128; + auto oid_prefix = "foo.123."sv; + fifo::objv objv; + objv.instance = "fooz"s; + objv.ver = 10; + + /* first successful create */ + std::unique_ptr<RCf::FIFO> f; + auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f, null_yield, objv, oid_prefix, + false, max_part_size, max_entry_size); + ASSERT_EQ(0, r); + + /* force reading from backend */ + r = f->read_meta(&dp, null_yield); + auto info = f->meta(); + EXPECT_EQ(info.id, fifo_id); + EXPECT_EQ(info.params.max_part_size, max_part_size); + EXPECT_EQ(info.params.max_entry_size, max_entry_size); + EXPECT_EQ(info.version, objv); +} + +namespace { +template<class T> +std::pair<T, std::string> decode_entry(const RCf::list_entry& entry) +{ + T val; + auto iter = entry.data.cbegin(); + decode(val, iter); + return std::make_pair(std::move(val), entry.marker); +} +} + + +TEST_F(LegacyFIFO, TestPushListTrim) +{ + std::unique_ptr<RCf::FIFO> f; + auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f, null_yield); + ASSERT_EQ(0, r); + static constexpr auto max_entries = 10u; + for (uint32_t i = 0; i < max_entries; ++i) { + cb::list bl; + encode(i, bl); + r = f->push(&dp, bl, null_yield); + ASSERT_EQ(0, r); + } + + std::optional<std::string> marker; + /* get entries one by one */ + std::vector<RCf::list_entry> result; + bool more = false; + for (auto i = 0u; i < max_entries; ++i) { + + r = f->list(&dp, 1, marker, &result, &more, null_yield); + ASSERT_EQ(0, r); + + bool expected_more = (i != (max_entries - 1)); + ASSERT_EQ(expected_more, more); + ASSERT_EQ(1, result.size()); + + std::uint32_t val; + std::tie(val, marker) = decode_entry<std::uint32_t>(result.front()); + + ASSERT_EQ(i, val); + result.clear(); + } + + /* get all entries at once */ + std::string markers[max_entries]; + std::uint32_t min_entry = 0; + r = f->list(&dp, max_entries * 10, std::nullopt, &result, &more, null_yield); + ASSERT_EQ(0, r); + + ASSERT_FALSE(more); + ASSERT_EQ(max_entries, result.size()); + for (auto i = 0u; i < max_entries; ++i) { + std::uint32_t val; + std::tie(val, markers[i]) = decode_entry<std::uint32_t>(result[i]); + ASSERT_EQ(i, val); + } + + /* trim one entry */ + r = f->trim(&dp, markers[min_entry], false, null_yield); + ASSERT_EQ(0, r); + ++min_entry; + + r = f->list(&dp, max_entries * 10, std::nullopt, &result, &more, null_yield); + ASSERT_EQ(0, r); + ASSERT_FALSE(more); + ASSERT_EQ(max_entries - min_entry, result.size()); + + for (auto i = min_entry; i < max_entries; ++i) { + std::uint32_t val; + std::tie(val, markers[i - min_entry]) = + decode_entry<std::uint32_t>(result[i - min_entry]); + EXPECT_EQ(i, val); + } +} + + +TEST_F(LegacyFIFO, TestPushTooBig) +{ + static constexpr auto max_part_size = 2048ull; + static constexpr auto max_entry_size = 128ull; + + std::unique_ptr<RCf::FIFO> f; + auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f, null_yield, std::nullopt, + std::nullopt, false, max_part_size, max_entry_size); + ASSERT_EQ(0, r); + + char buf[max_entry_size + 1]; + memset(buf, 0, sizeof(buf)); + + cb::list bl; + bl.append(buf, sizeof(buf)); + + r = f->push(&dp, bl, null_yield); + EXPECT_EQ(-E2BIG, r); +} + + +TEST_F(LegacyFIFO, TestMultipleParts) +{ + static constexpr auto max_part_size = 2048ull; + static constexpr auto max_entry_size = 128ull; + std::unique_ptr<RCf::FIFO> f; + auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f, null_yield, std::nullopt, + std::nullopt, false, max_part_size, + max_entry_size); + ASSERT_EQ(0, r); + + char buf[max_entry_size]; + memset(buf, 0, sizeof(buf)); + const auto [part_header_size, part_entry_overhead] = + f->get_part_layout_info(); + const auto entries_per_part = ((max_part_size - part_header_size) / + (max_entry_size + part_entry_overhead)); + const auto max_entries = entries_per_part * 4 + 1; + /* push enough entries */ + for (auto i = 0u; i < max_entries; ++i) { + cb::list bl; + *(int *)buf = i; + bl.append(buf, sizeof(buf)); + r = f->push(&dp, bl, null_yield); + ASSERT_EQ(0, r); + } + + auto info = f->meta(); + ASSERT_EQ(info.id, fifo_id); + /* head should have advanced */ + ASSERT_GT(info.head_part_num, 0); + + /* list all at once */ + std::vector<RCf::list_entry> result; + bool more = false; + r = f->list(&dp, max_entries, std::nullopt, &result, &more, null_yield); + ASSERT_EQ(0, r); + EXPECT_EQ(false, more); + ASSERT_EQ(max_entries, result.size()); + + for (auto i = 0u; i < max_entries; ++i) { + auto& bl = result[i].data; + ASSERT_EQ(i, *(int *)bl.c_str()); + } + + std::optional<std::string> marker; + /* get entries one by one */ + + for (auto i = 0u; i < max_entries; ++i) { + r = f->list(&dp, 1, marker, &result, &more, null_yield); + ASSERT_EQ(0, r); + ASSERT_EQ(result.size(), 1); + const bool expected_more = (i != (max_entries - 1)); + ASSERT_EQ(expected_more, more); + + std::uint32_t val; + std::tie(val, marker) = decode_entry<std::uint32_t>(result.front()); + + auto& entry = result.front(); + auto& bl = entry.data; + ASSERT_EQ(i, *(int *)bl.c_str()); + marker = entry.marker; + } + + /* trim one at a time */ + marker.reset(); + for (auto i = 0u; i < max_entries; ++i) { + /* read single entry */ + r = f->list(&dp, 1, marker, &result, &more, null_yield); + ASSERT_EQ(0, r); + ASSERT_EQ(result.size(), 1); + const bool expected_more = (i != (max_entries - 1)); + ASSERT_EQ(expected_more, more); + + marker = result.front().marker; + r = f->trim(&dp, *marker, false, null_yield); + ASSERT_EQ(0, r); + + /* check tail */ + info = f->meta(); + ASSERT_EQ(info.tail_part_num, i / entries_per_part); + + /* try to read all again, see how many entries left */ + r = f->list(&dp, max_entries, marker, &result, &more, null_yield); + ASSERT_EQ(max_entries - i - 1, result.size()); + ASSERT_EQ(false, more); + } + + /* tail now should point at head */ + info = f->meta(); + ASSERT_EQ(info.head_part_num, info.tail_part_num); + + RCf::part_info partinfo; + /* check old tails are removed */ + for (auto i = 0; i < info.tail_part_num; ++i) { + r = f->get_part_info(&dp, i, &partinfo, null_yield); + ASSERT_EQ(-ENOENT, r); + } + /* check current tail exists */ + r = f->get_part_info(&dp, info.tail_part_num, &partinfo, null_yield); + ASSERT_EQ(0, r); +} + +TEST_F(LegacyFIFO, TestTwoPushers) +{ + static constexpr auto max_part_size = 2048ull; + static constexpr auto max_entry_size = 128ull; + + std::unique_ptr<RCf::FIFO> f; + auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f, null_yield, std::nullopt, + std::nullopt, false, max_part_size, + max_entry_size); + ASSERT_EQ(0, r); + char buf[max_entry_size]; + memset(buf, 0, sizeof(buf)); + + auto [part_header_size, part_entry_overhead] = f->get_part_layout_info(); + const auto entries_per_part = ((max_part_size - part_header_size) / + (max_entry_size + part_entry_overhead)); + const auto max_entries = entries_per_part * 4 + 1; + std::unique_ptr<RCf::FIFO> f2; + r = RCf::FIFO::open(&dp, ioctx, fifo_id, &f2, null_yield); + std::vector fifos{&f, &f2}; + + for (auto i = 0u; i < max_entries; ++i) { + cb::list bl; + *(int *)buf = i; + bl.append(buf, sizeof(buf)); + auto& f = *fifos[i % fifos.size()]; + r = f->push(&dp, bl, null_yield); + ASSERT_EQ(0, r); + } + + /* list all by both */ + std::vector<RCf::list_entry> result; + bool more = false; + r = f2->list(&dp, max_entries, std::nullopt, &result, &more, null_yield); + ASSERT_EQ(0, r); + ASSERT_EQ(false, more); + ASSERT_EQ(max_entries, result.size()); + + r = f2->list(&dp, max_entries, std::nullopt, &result, &more, null_yield); + ASSERT_EQ(0, r); + ASSERT_EQ(false, more); + ASSERT_EQ(max_entries, result.size()); + + for (auto i = 0u; i < max_entries; ++i) { + auto& bl = result[i].data; + ASSERT_EQ(i, *(int *)bl.c_str()); + } +} + +TEST_F(LegacyFIFO, TestTwoPushersTrim) +{ + static constexpr auto max_part_size = 2048ull; + static constexpr auto max_entry_size = 128ull; + std::unique_ptr<RCf::FIFO> f1; + auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f1, null_yield, std::nullopt, + std::nullopt, false, max_part_size, + max_entry_size); + ASSERT_EQ(0, r); + + char buf[max_entry_size]; + memset(buf, 0, sizeof(buf)); + + auto [part_header_size, part_entry_overhead] = f1->get_part_layout_info(); + const auto entries_per_part = ((max_part_size - part_header_size) / + (max_entry_size + part_entry_overhead)); + const auto max_entries = entries_per_part * 4 + 1; + + std::unique_ptr<RCf::FIFO> f2; + r = RCf::FIFO::open(&dp, ioctx, fifo_id, &f2, null_yield); + ASSERT_EQ(0, r); + + /* push one entry to f2 and the rest to f1 */ + for (auto i = 0u; i < max_entries; ++i) { + cb::list bl; + *(int *)buf = i; + bl.append(buf, sizeof(buf)); + auto& f = (i < 1 ? f2 : f1); + r = f->push(&dp, bl, null_yield); + ASSERT_EQ(0, r); + } + + /* trim half by fifo1 */ + auto num = max_entries / 2; + std::string marker; + std::vector<RCf::list_entry> result; + bool more = false; + r = f1->list(&dp, num, std::nullopt, &result, &more, null_yield); + ASSERT_EQ(0, r); + ASSERT_EQ(true, more); + ASSERT_EQ(num, result.size()); + + for (auto i = 0u; i < num; ++i) { + auto& bl = result[i].data; + ASSERT_EQ(i, *(int *)bl.c_str()); + } + + auto& entry = result[num - 1]; + marker = entry.marker; + r = f1->trim(&dp, marker, false, null_yield); + /* list what's left by fifo2 */ + + const auto left = max_entries - num; + f2->list(&dp, left, marker, &result, &more, null_yield); + ASSERT_EQ(left, result.size()); + ASSERT_EQ(false, more); + + for (auto i = num; i < max_entries; ++i) { + auto& bl = result[i - num].data; + ASSERT_EQ(i, *(int *)bl.c_str()); + } +} + +TEST_F(LegacyFIFO, TestPushBatch) +{ + static constexpr auto max_part_size = 2048ull; + static constexpr auto max_entry_size = 128ull; + + std::unique_ptr<RCf::FIFO> f; + auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f, null_yield, std::nullopt, + std::nullopt, false, max_part_size, + max_entry_size); + ASSERT_EQ(0, r); + + char buf[max_entry_size]; + memset(buf, 0, sizeof(buf)); + auto [part_header_size, part_entry_overhead] = f->get_part_layout_info(); + auto entries_per_part = ((max_part_size - part_header_size) / + (max_entry_size + part_entry_overhead)); + auto max_entries = entries_per_part * 4 + 1; /* enough entries to span multiple parts */ + std::vector<cb::list> bufs; + for (auto i = 0u; i < max_entries; ++i) { + cb::list bl; + *(int *)buf = i; + bl.append(buf, sizeof(buf)); + bufs.push_back(bl); + } + ASSERT_EQ(max_entries, bufs.size()); + + r = f->push(&dp, bufs, null_yield); + ASSERT_EQ(0, r); + + /* list all */ + + std::vector<RCf::list_entry> result; + bool more = false; + r = f->list(&dp, max_entries, std::nullopt, &result, &more, null_yield); + ASSERT_EQ(0, r); + ASSERT_EQ(false, more); + ASSERT_EQ(max_entries, result.size()); + for (auto i = 0u; i < max_entries; ++i) { + auto& bl = result[i].data; + ASSERT_EQ(i, *(int *)bl.c_str()); + } + auto& info = f->meta(); + ASSERT_EQ(info.head_part_num, 4); +} + +TEST_F(LegacyFIFO, TestAioTrim) +{ + static constexpr auto max_part_size = 2048ull; + static constexpr auto max_entry_size = 128ull; + std::unique_ptr<RCf::FIFO> f; + auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f, null_yield, std::nullopt, + std::nullopt, false, max_part_size, + max_entry_size); + ASSERT_EQ(0, r); + + char buf[max_entry_size]; + memset(buf, 0, sizeof(buf)); + const auto [part_header_size, part_entry_overhead] = + f->get_part_layout_info(); + const auto entries_per_part = ((max_part_size - part_header_size) / + (max_entry_size + part_entry_overhead)); + const auto max_entries = entries_per_part * 4 + 1; + /* push enough entries */ + std::vector<cb::list> bufs; + for (auto i = 0u; i < max_entries; ++i) { + cb::list bl; + *(int *)buf = i; + bl.append(buf, sizeof(buf)); + bufs.push_back(std::move(bl)); + } + ASSERT_EQ(max_entries, bufs.size()); + + r = f->push(&dp, bufs, null_yield); + ASSERT_EQ(0, r); + + auto info = f->meta(); + ASSERT_EQ(info.id, fifo_id); + /* head should have advanced */ + ASSERT_GT(info.head_part_num, 0); + + /* list all at once */ + std::vector<RCf::list_entry> result; + bool more = false; + r = f->list(&dp, max_entries, std::nullopt, &result, &more, null_yield); + ASSERT_EQ(0, r); + ASSERT_EQ(false, more); + ASSERT_EQ(max_entries, result.size()); + + std::optional<std::string> marker; + /* trim one at a time */ + result.clear(); + more = false; + marker.reset(); + for (auto i = 0u; i < max_entries; ++i) { + /* read single entry */ + r = f->list(&dp, 1, marker, &result, &more, null_yield); + ASSERT_EQ(0, r); + ASSERT_EQ(result.size(), 1); + const bool expected_more = (i != (max_entries - 1)); + ASSERT_EQ(expected_more, more); + + marker = result.front().marker; + std::unique_ptr<R::AioCompletion> c(rados.aio_create_completion(nullptr, + nullptr)); + f->trim(&dp, *marker, false, c.get()); + c->wait_for_complete(); + r = c->get_return_value(); + ASSERT_EQ(0, r); + + /* check tail */ + info = f->meta(); + ASSERT_EQ(info.tail_part_num, i / entries_per_part); + + /* try to read all again, see how many entries left */ + r = f->list(&dp, max_entries, marker, &result, &more, null_yield); + ASSERT_EQ(max_entries - i - 1, result.size()); + ASSERT_EQ(false, more); + } + + /* tail now should point at head */ + info = f->meta(); + ASSERT_EQ(info.head_part_num, info.tail_part_num); + + RCf::part_info partinfo; + /* check old tails are removed */ + for (auto i = 0; i < info.tail_part_num; ++i) { + r = f->get_part_info(&dp, i, &partinfo, null_yield); + ASSERT_EQ(-ENOENT, r); + } + /* check current tail exists */ + r = f->get_part_info(&dp, info.tail_part_num, &partinfo, null_yield); + ASSERT_EQ(0, r); +} + +TEST_F(LegacyFIFO, TestTrimExclusive) { + std::unique_ptr<RCf::FIFO> f; + auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f, null_yield); + ASSERT_EQ(0, r); + std::vector<RCf::list_entry> result; + bool more = false; + + static constexpr auto max_entries = 10u; + for (uint32_t i = 0; i < max_entries; ++i) { + cb::list bl; + encode(i, bl); + f->push(&dp, bl, null_yield); + } + + f->list(&dp, 1, std::nullopt, &result, &more, null_yield); + auto [val, marker] = decode_entry<std::uint32_t>(result.front()); + ASSERT_EQ(0, val); + f->trim(&dp, marker, true, null_yield); + + result.clear(); + f->list(&dp, max_entries, std::nullopt, &result, &more, null_yield); + std::tie(val, marker) = decode_entry<std::uint32_t>(result.front()); + ASSERT_EQ(0, val); + f->trim(&dp, result[4].marker, true, null_yield); + + result.clear(); + f->list(&dp, max_entries, std::nullopt, &result, &more, null_yield); + std::tie(val, marker) = decode_entry<std::uint32_t>(result.front()); + ASSERT_EQ(4, val); + f->trim(&dp, result.back().marker, true, null_yield); + + result.clear(); + f->list(&dp, max_entries, std::nullopt, &result, &more, null_yield); + std::tie(val, marker) = decode_entry<std::uint32_t>(result.front()); + ASSERT_EQ(result.size(), 1); + ASSERT_EQ(max_entries - 1, val); +} + +TEST_F(AioLegacyFIFO, TestPushListTrim) +{ + std::unique_ptr<RCf::FIFO> f; + auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f, null_yield); + ASSERT_EQ(0, r); + static constexpr auto max_entries = 10u; + for (uint32_t i = 0; i < max_entries; ++i) { + cb::list bl; + encode(i, bl); + auto c = R::Rados::aio_create_completion(); + f->push(&dp, bl, c); + c->wait_for_complete(); + r = c->get_return_value(); + c->release(); + ASSERT_EQ(0, r); + } + + std::optional<std::string> marker; + /* get entries one by one */ + std::vector<RCf::list_entry> result; + bool more = false; + for (auto i = 0u; i < max_entries; ++i) { + auto c = R::Rados::aio_create_completion(); + f->list(&dp, 1, marker, &result, &more, c); + c->wait_for_complete(); + r = c->get_return_value(); + c->release(); + ASSERT_EQ(0, r); + + bool expected_more = (i != (max_entries - 1)); + ASSERT_EQ(expected_more, more); + ASSERT_EQ(1, result.size()); + + std::uint32_t val; + std::tie(val, marker) = decode_entry<std::uint32_t>(result.front()); + + ASSERT_EQ(i, val); + result.clear(); + } + + /* get all entries at once */ + std::string markers[max_entries]; + std::uint32_t min_entry = 0; + auto c = R::Rados::aio_create_completion(); + f->list(&dp, max_entries * 10, std::nullopt, &result, &more, c); + c->wait_for_complete(); + r = c->get_return_value(); + c->release(); + ASSERT_EQ(0, r); + + ASSERT_FALSE(more); + ASSERT_EQ(max_entries, result.size()); + for (auto i = 0u; i < max_entries; ++i) { + std::uint32_t val; + std::tie(val, markers[i]) = decode_entry<std::uint32_t>(result[i]); + ASSERT_EQ(i, val); + } + + /* trim one entry */ + c = R::Rados::aio_create_completion(); + f->trim(&dp, markers[min_entry], false, c); + c->wait_for_complete(); + r = c->get_return_value(); + c->release(); + ASSERT_EQ(0, r); + ++min_entry; + + c = R::Rados::aio_create_completion(); + f->list(&dp, max_entries * 10, std::nullopt, &result, &more, c); + c->wait_for_complete(); + r = c->get_return_value(); + c->release(); + ASSERT_EQ(0, r); + ASSERT_FALSE(more); + ASSERT_EQ(max_entries - min_entry, result.size()); + + for (auto i = min_entry; i < max_entries; ++i) { + std::uint32_t val; + std::tie(val, markers[i - min_entry]) = + decode_entry<std::uint32_t>(result[i - min_entry]); + EXPECT_EQ(i, val); + } +} + + +TEST_F(AioLegacyFIFO, TestPushTooBig) +{ + static constexpr auto max_part_size = 2048ull; + static constexpr auto max_entry_size = 128ull; + + std::unique_ptr<RCf::FIFO> f; + auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f, null_yield, std::nullopt, + std::nullopt, false, max_part_size, max_entry_size); + ASSERT_EQ(0, r); + + char buf[max_entry_size + 1]; + memset(buf, 0, sizeof(buf)); + + cb::list bl; + bl.append(buf, sizeof(buf)); + + auto c = R::Rados::aio_create_completion(); + f->push(&dp, bl, c); + c->wait_for_complete(); + r = c->get_return_value(); + ASSERT_EQ(-E2BIG, r); + c->release(); + + c = R::Rados::aio_create_completion(); + f->push(&dp, std::vector<cb::list>{}, c); + c->wait_for_complete(); + r = c->get_return_value(); + c->release(); + EXPECT_EQ(0, r); +} + + +TEST_F(AioLegacyFIFO, TestMultipleParts) +{ + static constexpr auto max_part_size = 2048ull; + static constexpr auto max_entry_size = 128ull; + std::unique_ptr<RCf::FIFO> f; + auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f, null_yield, std::nullopt, + std::nullopt, false, max_part_size, + max_entry_size); + ASSERT_EQ(0, r); + + { + auto c = R::Rados::aio_create_completion(); + f->get_head_info(&dp, [&](int r, RCf::part_info&& p) { + ASSERT_EQ(0, p.magic); + ASSERT_EQ(0, p.min_ofs); + ASSERT_EQ(0, p.last_ofs); + ASSERT_EQ(0, p.next_ofs); + ASSERT_EQ(0, p.min_index); + ASSERT_EQ(0, p.max_index); + ASSERT_EQ(ceph::real_time{}, p.max_time); + }, c); + c->wait_for_complete(); + r = c->get_return_value(); + c->release(); + } + + char buf[max_entry_size]; + memset(buf, 0, sizeof(buf)); + const auto [part_header_size, part_entry_overhead] = + f->get_part_layout_info(); + const auto entries_per_part = ((max_part_size - part_header_size) / + (max_entry_size + part_entry_overhead)); + const auto max_entries = entries_per_part * 4 + 1; + /* push enough entries */ + for (auto i = 0u; i < max_entries; ++i) { + cb::list bl; + *(int *)buf = i; + bl.append(buf, sizeof(buf)); + auto c = R::Rados::aio_create_completion(); + f->push(&dp, bl, c); + c->wait_for_complete(); + r = c->get_return_value(); + c->release(); + EXPECT_EQ(0, r); + } + + auto info = f->meta(); + ASSERT_EQ(info.id, fifo_id); + /* head should have advanced */ + ASSERT_GT(info.head_part_num, 0); + + /* list all at once */ + std::vector<RCf::list_entry> result; + bool more = false; + auto c = R::Rados::aio_create_completion(); + f->list(&dp, max_entries, std::nullopt, &result, &more, c); + c->wait_for_complete(); + r = c->get_return_value(); + c->release(); + EXPECT_EQ(0, r); + EXPECT_EQ(false, more); + ASSERT_EQ(max_entries, result.size()); + + for (auto i = 0u; i < max_entries; ++i) { + auto& bl = result[i].data; + ASSERT_EQ(i, *(int *)bl.c_str()); + } + + std::optional<std::string> marker; + /* get entries one by one */ + + for (auto i = 0u; i < max_entries; ++i) { + c = R::Rados::aio_create_completion(); + f->list(&dp, 1, marker, &result, &more, c); + c->wait_for_complete(); + r = c->get_return_value(); + c->release(); + EXPECT_EQ(0, r); + ASSERT_EQ(result.size(), 1); + const bool expected_more = (i != (max_entries - 1)); + ASSERT_EQ(expected_more, more); + + std::uint32_t val; + std::tie(val, marker) = decode_entry<std::uint32_t>(result.front()); + + auto& entry = result.front(); + auto& bl = entry.data; + ASSERT_EQ(i, *(int *)bl.c_str()); + marker = entry.marker; + } + + /* trim one at a time */ + marker.reset(); + for (auto i = 0u; i < max_entries; ++i) { + /* read single entry */ + c = R::Rados::aio_create_completion(); + f->list(&dp, 1, marker, &result, &more, c); + c->wait_for_complete(); + r = c->get_return_value(); + c->release(); + EXPECT_EQ(0, r); + ASSERT_EQ(result.size(), 1); + const bool expected_more = (i != (max_entries - 1)); + ASSERT_EQ(expected_more, more); + + marker = result.front().marker; + c = R::Rados::aio_create_completion(); + f->trim(&dp, *marker, false, c); + c->wait_for_complete(); + r = c->get_return_value(); + c->release(); + EXPECT_EQ(0, r); + ASSERT_EQ(result.size(), 1); + + /* check tail */ + info = f->meta(); + ASSERT_EQ(info.tail_part_num, i / entries_per_part); + + /* try to read all again, see how many entries left */ + c = R::Rados::aio_create_completion(); + f->list(&dp, max_entries, marker, &result, &more, c); + c->wait_for_complete(); + r = c->get_return_value(); + c->release(); + EXPECT_EQ(0, r); + ASSERT_EQ(max_entries - i - 1, result.size()); + ASSERT_EQ(false, more); + } + + /* tail now should point at head */ + info = f->meta(); + ASSERT_EQ(info.head_part_num, info.tail_part_num); + + /* check old tails are removed */ + for (auto i = 0; i < info.tail_part_num; ++i) { + c = R::Rados::aio_create_completion(); + RCf::part_info partinfo; + f->get_part_info(i, &partinfo, c); + c->wait_for_complete(); + r = c->get_return_value(); + c->release(); + ASSERT_EQ(-ENOENT, r); + } + /* check current tail exists */ + std::uint64_t next_ofs; + { + c = R::Rados::aio_create_completion(); + RCf::part_info partinfo; + f->get_part_info(info.tail_part_num, &partinfo, c); + c->wait_for_complete(); + r = c->get_return_value(); + c->release(); + next_ofs = partinfo.next_ofs; + } + ASSERT_EQ(0, r); + + c = R::Rados::aio_create_completion(); + f->get_head_info(&dp, [&](int r, RCf::part_info&& p) { + ASSERT_EQ(next_ofs, p.next_ofs); + }, c); + c->wait_for_complete(); + r = c->get_return_value(); + c->release(); + ASSERT_EQ(0, r); +} + +TEST_F(AioLegacyFIFO, TestTwoPushers) +{ + static constexpr auto max_part_size = 2048ull; + static constexpr auto max_entry_size = 128ull; + + std::unique_ptr<RCf::FIFO> f; + auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f, null_yield, std::nullopt, + std::nullopt, false, max_part_size, + max_entry_size); + ASSERT_EQ(0, r); + char buf[max_entry_size]; + memset(buf, 0, sizeof(buf)); + + auto [part_header_size, part_entry_overhead] = f->get_part_layout_info(); + const auto entries_per_part = ((max_part_size - part_header_size) / + (max_entry_size + part_entry_overhead)); + const auto max_entries = entries_per_part * 4 + 1; + std::unique_ptr<RCf::FIFO> f2; + r = RCf::FIFO::open(&dp, ioctx, fifo_id, &f2, null_yield); + std::vector fifos{&f, &f2}; + + for (auto i = 0u; i < max_entries; ++i) { + cb::list bl; + *(int *)buf = i; + bl.append(buf, sizeof(buf)); + auto& f = *fifos[i % fifos.size()]; + auto c = R::Rados::aio_create_completion(); + f->push(&dp, bl, c); + c->wait_for_complete(); + r = c->get_return_value(); + c->release(); + ASSERT_EQ(0, r); + } + + /* list all by both */ + std::vector<RCf::list_entry> result; + bool more = false; + auto c = R::Rados::aio_create_completion(); + f2->list(&dp, max_entries, std::nullopt, &result, &more, c); + c->wait_for_complete(); + r = c->get_return_value(); + c->release(); + ASSERT_EQ(0, r); + ASSERT_EQ(false, more); + ASSERT_EQ(max_entries, result.size()); + + c = R::Rados::aio_create_completion(); + f2->list(&dp, max_entries, std::nullopt, &result, &more, c); + c->wait_for_complete(); + r = c->get_return_value(); + c->release(); + ASSERT_EQ(0, r); + ASSERT_EQ(false, more); + ASSERT_EQ(max_entries, result.size()); + + for (auto i = 0u; i < max_entries; ++i) { + auto& bl = result[i].data; + ASSERT_EQ(i, *(int *)bl.c_str()); + } +} + +TEST_F(AioLegacyFIFO, TestTwoPushersTrim) +{ + static constexpr auto max_part_size = 2048ull; + static constexpr auto max_entry_size = 128ull; + std::unique_ptr<RCf::FIFO> f1; + auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f1, null_yield, std::nullopt, + std::nullopt, false, max_part_size, + max_entry_size); + ASSERT_EQ(0, r); + + char buf[max_entry_size]; + memset(buf, 0, sizeof(buf)); + + auto [part_header_size, part_entry_overhead] = f1->get_part_layout_info(); + const auto entries_per_part = ((max_part_size - part_header_size) / + (max_entry_size + part_entry_overhead)); + const auto max_entries = entries_per_part * 4 + 1; + + std::unique_ptr<RCf::FIFO> f2; + r = RCf::FIFO::open(&dp, ioctx, fifo_id, &f2, null_yield); + ASSERT_EQ(0, r); + + /* push one entry to f2 and the rest to f1 */ + for (auto i = 0u; i < max_entries; ++i) { + cb::list bl; + *(int *)buf = i; + bl.append(buf, sizeof(buf)); + auto& f = (i < 1 ? f2 : f1); + auto c = R::Rados::aio_create_completion(); + f->push(&dp, bl, c); + c->wait_for_complete(); + r = c->get_return_value(); + c->release(); + ASSERT_EQ(0, r); + } + + /* trim half by fifo1 */ + auto num = max_entries / 2; + std::string marker; + std::vector<RCf::list_entry> result; + bool more = false; + auto c = R::Rados::aio_create_completion(); + f1->list(&dp, num, std::nullopt, &result, &more, c); + c->wait_for_complete(); + r = c->get_return_value(); + c->release(); + ASSERT_EQ(0, r); + ASSERT_EQ(true, more); + ASSERT_EQ(num, result.size()); + + for (auto i = 0u; i < num; ++i) { + auto& bl = result[i].data; + ASSERT_EQ(i, *(int *)bl.c_str()); + } + + auto& entry = result[num - 1]; + marker = entry.marker; + c = R::Rados::aio_create_completion(); + f1->trim(&dp, marker, false, c); + c->wait_for_complete(); + r = c->get_return_value(); + c->release(); + ASSERT_EQ(0, r); + /* list what's left by fifo2 */ + + const auto left = max_entries - num; + c = R::Rados::aio_create_completion(); + f2->list(&dp, left, marker, &result, &more, c); + c->wait_for_complete(); + r = c->get_return_value(); + c->release(); + ASSERT_EQ(0, r); + ASSERT_EQ(left, result.size()); + ASSERT_EQ(false, more); + + for (auto i = num; i < max_entries; ++i) { + auto& bl = result[i - num].data; + ASSERT_EQ(i, *(int *)bl.c_str()); + } +} + +TEST_F(AioLegacyFIFO, TestPushBatch) +{ + static constexpr auto max_part_size = 2048ull; + static constexpr auto max_entry_size = 128ull; + + std::unique_ptr<RCf::FIFO> f; + auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f, null_yield, std::nullopt, + std::nullopt, false, max_part_size, + max_entry_size); + ASSERT_EQ(0, r); + + char buf[max_entry_size]; + memset(buf, 0, sizeof(buf)); + auto [part_header_size, part_entry_overhead] = f->get_part_layout_info(); + auto entries_per_part = ((max_part_size - part_header_size) / + (max_entry_size + part_entry_overhead)); + auto max_entries = entries_per_part * 4 + 1; /* enough entries to span multiple parts */ + std::vector<cb::list> bufs; + for (auto i = 0u; i < max_entries; ++i) { + cb::list bl; + *(int *)buf = i; + bl.append(buf, sizeof(buf)); + bufs.push_back(bl); + } + ASSERT_EQ(max_entries, bufs.size()); + + auto c = R::Rados::aio_create_completion(); + f->push(&dp, bufs, c); + c->wait_for_complete(); + r = c->get_return_value(); + c->release(); + ASSERT_EQ(0, r); + + /* list all */ + + std::vector<RCf::list_entry> result; + bool more = false; + c = R::Rados::aio_create_completion(); + f->list(&dp, max_entries, std::nullopt, &result, &more, c); + c->wait_for_complete(); + r = c->get_return_value(); + c->release(); + ASSERT_EQ(0, r); + ASSERT_EQ(false, more); + ASSERT_EQ(max_entries, result.size()); + for (auto i = 0u; i < max_entries; ++i) { + auto& bl = result[i].data; + ASSERT_EQ(i, *(int *)bl.c_str()); + } + auto& info = f->meta(); + ASSERT_EQ(info.head_part_num, 4); +} + +TEST_F(LegacyFIFO, TrimAll) +{ + std::unique_ptr<RCf::FIFO> f; + auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f, null_yield); + ASSERT_EQ(0, r); + static constexpr auto max_entries = 10u; + for (uint32_t i = 0; i < max_entries; ++i) { + cb::list bl; + encode(i, bl); + r = f->push(&dp, bl, null_yield); + ASSERT_EQ(0, r); + } + + /* trim one entry */ + r = f->trim(&dp, RCf::marker::max().to_string(), false, null_yield); + ASSERT_EQ(-ENODATA, r); + + std::vector<RCf::list_entry> result; + bool more; + r = f->list(&dp, 1, std::nullopt, &result, &more, null_yield); + ASSERT_EQ(0, r); + ASSERT_TRUE(result.empty()); +} + +TEST_F(LegacyFIFO, AioTrimAll) +{ + std::unique_ptr<RCf::FIFO> f; + auto r = RCf::FIFO::create(&dp, ioctx, fifo_id, &f, null_yield); + ASSERT_EQ(0, r); + static constexpr auto max_entries = 10u; + for (uint32_t i = 0; i < max_entries; ++i) { + cb::list bl; + encode(i, bl); + r = f->push(&dp, bl, null_yield); + ASSERT_EQ(0, r); + } + + auto c = R::Rados::aio_create_completion(); + f->trim(&dp, RCf::marker::max().to_string(), false, c); + c->wait_for_complete(); + r = c->get_return_value(); + c->release(); + ASSERT_EQ(-ENODATA, r); + + std::vector<RCf::list_entry> result; + bool more; + r = f->list(&dp, 1, std::nullopt, &result, &more, null_yield); + ASSERT_EQ(0, r); + ASSERT_TRUE(result.empty()); +} diff --git a/src/test/rgw/test_http_manager.cc b/src/test/rgw/test_http_manager.cc new file mode 100644 index 000000000..f2daeddca --- /dev/null +++ b/src/test/rgw/test_http_manager.cc @@ -0,0 +1,148 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab +/* + * Ceph - scalable distributed file system + * + * Copyright (C) 2015 Red Hat + * + * This is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License version 2.1, as published by the Free Software + * Foundation. See file COPYING. + * + */ +#include "rgw_rados.h" +#include "rgw_http_client.h" +#include "global/global_init.h" +#include "common/ceph_argparse.h" +#include <unistd.h> +#include <curl/curl.h> +#include <boost/asio/ip/tcp.hpp> +#include <boost/asio/write.hpp> +#include <thread> +#include <gtest/gtest.h> + +using namespace std; + +namespace { + using tcp = boost::asio::ip::tcp; + + // if we have a racing where another thread manages to bind and listen the + // port picked by this acceptor, try again. + static constexpr int MAX_BIND_RETRIES = 60; + + tcp::acceptor try_bind(boost::asio::io_context& ioctx) { + using tcp = boost::asio::ip::tcp; + tcp::endpoint endpoint(tcp::v4(), 0); + tcp::acceptor acceptor(ioctx); + acceptor.open(endpoint.protocol()); + for (int retries = 0;; retries++) { + try { + acceptor.bind(endpoint); + // yay! + break; + } catch (const boost::system::system_error& e) { + if (retries == MAX_BIND_RETRIES) { + throw; + } + if (e.code() != boost::system::errc::address_in_use) { + throw; + } + } + // backoff a little bit + sleep(1); + } + return acceptor; + } +} + +TEST(HTTPManager, ReadTruncated) +{ + using tcp = boost::asio::ip::tcp; + boost::asio::io_context ioctx; + auto acceptor = try_bind(ioctx); + acceptor.listen(); + + std::thread server{[&] { + tcp::socket socket{ioctx}; + acceptor.accept(socket); + std::string_view response = + "HTTP/1.1 200 OK\r\n" + "Content-Length: 1024\r\n" + "\r\n" + "short body"; + boost::asio::write(socket, boost::asio::buffer(response)); + }}; + const auto url = std::string{"http://127.0.0.1:"} + std::to_string(acceptor.local_endpoint().port()); + + RGWHTTPClient client{g_ceph_context, "GET", url}; + EXPECT_EQ(-EAGAIN, RGWHTTP::process(&client, null_yield)); + + server.join(); +} + +TEST(HTTPManager, Head) +{ + using tcp = boost::asio::ip::tcp; + boost::asio::io_context ioctx; + auto acceptor = try_bind(ioctx); + acceptor.listen(); + + std::thread server{[&] { + tcp::socket socket{ioctx}; + acceptor.accept(socket); + std::string_view response = + "HTTP/1.1 200 OK\r\n" + "Content-Length: 1024\r\n" + "\r\n"; + boost::asio::write(socket, boost::asio::buffer(response)); + }}; + const auto url = std::string{"http://127.0.0.1:"} + std::to_string(acceptor.local_endpoint().port()); + + RGWHTTPClient client{g_ceph_context, "HEAD", url}; + EXPECT_EQ(0, RGWHTTP::process(&client, null_yield)); + + server.join(); +} + +TEST(HTTPManager, SignalThread) +{ + auto cct = g_ceph_context; + RGWHTTPManager http(cct); + + ASSERT_EQ(0, http.start()); + + // default pipe buffer size according to man pipe + constexpr size_t max_pipe_buffer_size = 65536; + // each signal writes 4 bytes to the pipe + constexpr size_t max_pipe_signals = max_pipe_buffer_size / sizeof(uint32_t); + // add_request and unregister_request + constexpr size_t pipe_signals_per_request = 2; + // number of http requests to fill the pipe buffer + constexpr size_t max_requests = max_pipe_signals / pipe_signals_per_request; + + // send one extra request to test that we don't deadlock + constexpr size_t num_requests = max_requests + 1; + + for (size_t i = 0; i < num_requests; i++) { + RGWHTTPClient client{cct, "PUT", "http://127.0.0.1:80"}; + http.add_request(&client); + } +} + +int main(int argc, char** argv) +{ + auto args = argv_to_vec(argc, argv); + auto cct = global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT, + CODE_ENVIRONMENT_UTILITY, + CINIT_FLAG_NO_DEFAULT_CONFIG_FILE); + common_init_finish(g_ceph_context); + + rgw_http_client_init(cct->get()); + rgw_setup_saved_curl_handles(); + ::testing::InitGoogleTest(&argc, argv); + int r = RUN_ALL_TESTS(); + rgw_release_all_curl_handles(); + rgw_http_client_cleanup(); + return r; +} diff --git a/src/test/rgw/test_log_backing.cc b/src/test/rgw/test_log_backing.cc new file mode 100644 index 000000000..e4109d535 --- /dev/null +++ b/src/test/rgw/test_log_backing.cc @@ -0,0 +1,365 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab +/* + * Ceph - scalable distributed file system + * + * Copyright (C) 2019 Red Hat, Inc. + * + * This is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License version 2.1, as published by the Free Software + * Foundation. See file COPYING. + * + */ + +#include "rgw_log_backing.h" + +#include <cerrno> +#include <iostream> +#include <string_view> + +#include <fmt/format.h> + +#include "include/types.h" +#include "include/rados/librados.hpp" + +#include "test/librados/test_cxx.h" +#include "global/global_context.h" + +#include "cls/log/cls_log_client.h" + +#include "rgw_tools.h" +#include "cls_fifo_legacy.h" + +#include "gtest/gtest.h" + +namespace lr = librados; +namespace cb = ceph::buffer; +namespace fifo = rados::cls::fifo; +namespace RCf = rgw::cls::fifo; + +auto cct = new CephContext(CEPH_ENTITY_TYPE_CLIENT); +const DoutPrefix dp(cct, 1, "test log backing: "); + +class LogBacking : public testing::Test { +protected: + static constexpr int SHARDS = 3; + const std::string pool_name = get_temp_pool_name(); + lr::Rados rados; + lr::IoCtx ioctx; + lr::Rados rados2; + lr::IoCtx ioctx2; + + void SetUp() override { + ASSERT_EQ("", create_one_pool_pp(pool_name, rados)); + ASSERT_EQ(0, rados.ioctx_create(pool_name.c_str(), ioctx)); + connect_cluster_pp(rados2); + ASSERT_EQ(0, rados2.ioctx_create(pool_name.c_str(), ioctx2)); + } + void TearDown() override { + destroy_one_pool_pp(pool_name, rados); + } + + std::string get_oid(uint64_t gen_id, int i) const { + return (gen_id > 0 ? + fmt::format("shard@G{}.{}", gen_id, i) : + fmt::format("shard.{}", i)); + } + + void make_omap() { + for (int i = 0; i < SHARDS; ++i) { + using ceph::encode; + lr::ObjectWriteOperation op; + cb::list bl; + encode(i, bl); + cls_log_add(op, ceph_clock_now(), {}, "meow", bl); + auto r = rgw_rados_operate(&dp, ioctx, get_oid(0, i), &op, null_yield); + ASSERT_GE(r, 0); + } + } + + void add_omap(int i) { + using ceph::encode; + lr::ObjectWriteOperation op; + cb::list bl; + encode(i, bl); + cls_log_add(op, ceph_clock_now(), {}, "meow", bl); + auto r = rgw_rados_operate(&dp, ioctx, get_oid(0, i), &op, null_yield); + ASSERT_GE(r, 0); + } + + void empty_omap() { + for (int i = 0; i < SHARDS; ++i) { + auto oid = get_oid(0, i); + std::string to_marker; + { + lr::ObjectReadOperation op; + std::list<cls_log_entry> entries; + bool truncated = false; + cls_log_list(op, {}, {}, {}, 1, entries, &to_marker, &truncated); + auto r = rgw_rados_operate(&dp, ioctx, oid, &op, nullptr, null_yield); + ASSERT_GE(r, 0); + ASSERT_FALSE(entries.empty()); + } + { + lr::ObjectWriteOperation op; + cls_log_trim(op, {}, {}, {}, to_marker); + auto r = rgw_rados_operate(&dp, ioctx, oid, &op, null_yield); + ASSERT_GE(r, 0); + } + { + lr::ObjectReadOperation op; + std::list<cls_log_entry> entries; + bool truncated = false; + cls_log_list(op, {}, {}, {}, 1, entries, &to_marker, &truncated); + auto r = rgw_rados_operate(&dp, ioctx, oid, &op, nullptr, null_yield); + ASSERT_GE(r, 0); + ASSERT_TRUE(entries.empty()); + } + } + } + + void make_fifo() + { + for (int i = 0; i < SHARDS; ++i) { + std::unique_ptr<RCf::FIFO> fifo; + auto r = RCf::FIFO::create(&dp, ioctx, get_oid(0, i), &fifo, null_yield); + ASSERT_EQ(0, r); + ASSERT_TRUE(fifo); + } + } + + void add_fifo(int i) + { + using ceph::encode; + std::unique_ptr<RCf::FIFO> fifo; + auto r = RCf::FIFO::open(&dp, ioctx, get_oid(0, i), &fifo, null_yield); + ASSERT_GE(0, r); + ASSERT_TRUE(fifo); + cb::list bl; + encode(i, bl); + r = fifo->push(&dp, bl, null_yield); + ASSERT_GE(0, r); + } + + void assert_empty() { + std::vector<lr::ObjectItem> result; + lr::ObjectCursor next; + auto r = ioctx.object_list(ioctx.object_list_begin(), ioctx.object_list_end(), + 100, {}, &result, &next); + ASSERT_GE(r, 0); + ASSERT_TRUE(result.empty()); + } +}; + +TEST_F(LogBacking, TestOmap) +{ + make_omap(); + auto stat = log_backing_type(&dp, ioctx, log_type::fifo, SHARDS, + [this](int shard){ return get_oid(0, shard); }, + null_yield); + ASSERT_EQ(log_type::omap, *stat); +} + +TEST_F(LogBacking, TestOmapEmpty) +{ + auto stat = log_backing_type(&dp, ioctx, log_type::omap, SHARDS, + [this](int shard){ return get_oid(0, shard); }, + null_yield); + ASSERT_EQ(log_type::omap, *stat); +} + +TEST_F(LogBacking, TestFIFO) +{ + make_fifo(); + auto stat = log_backing_type(&dp, ioctx, log_type::fifo, SHARDS, + [this](int shard){ return get_oid(0, shard); }, + null_yield); + ASSERT_EQ(log_type::fifo, *stat); +} + +TEST_F(LogBacking, TestFIFOEmpty) +{ + auto stat = log_backing_type(&dp, ioctx, log_type::fifo, SHARDS, + [this](int shard){ return get_oid(0, shard); }, + null_yield); + ASSERT_EQ(log_type::fifo, *stat); +} + +TEST(CursorGen, RoundTrip) { + const std::string_view pcurs = "fded"; + { + auto gc = gencursor(0, pcurs); + ASSERT_EQ(pcurs, gc); + auto [gen, cursor] = cursorgen(gc); + ASSERT_EQ(0, gen); + ASSERT_EQ(pcurs, cursor); + } + { + auto gc = gencursor(53, pcurs); + ASSERT_NE(pcurs, gc); + auto [gen, cursor] = cursorgen(gc); + ASSERT_EQ(53, gen); + ASSERT_EQ(pcurs, cursor); + } +} + +class generations final : public logback_generations { +public: + + entries_t got_entries; + std::optional<uint64_t> tail; + + using logback_generations::logback_generations; + + bs::error_code handle_init(entries_t e) noexcept { + got_entries = e; + return {}; + } + + bs::error_code handle_new_gens(entries_t e) noexcept { + got_entries = e; + return {}; + } + + bs::error_code handle_empty_to(uint64_t new_tail) noexcept { + tail = new_tail; + return {}; + } +}; + +TEST_F(LogBacking, GenerationSingle) +{ + auto lgr = logback_generations::init<generations>( + &dp, ioctx, "foobar", [this](uint64_t gen_id, int shard) { + return get_oid(gen_id, shard); + }, SHARDS, log_type::fifo, null_yield); + ASSERT_TRUE(lgr); + + auto lg = std::move(*lgr); + + ASSERT_EQ(0, lg->got_entries.begin()->first); + + ASSERT_EQ(0, lg->got_entries[0].gen_id); + ASSERT_EQ(log_type::fifo, lg->got_entries[0].type); + ASSERT_FALSE(lg->got_entries[0].pruned); + + auto ec = lg->empty_to(&dp, 0, null_yield); + ASSERT_TRUE(ec); + + lg.reset(); + + lg = *logback_generations::init<generations>( + &dp, ioctx, "foobar", [this](uint64_t gen_id, int shard) { + return get_oid(gen_id, shard); + }, SHARDS, log_type::fifo, null_yield); + + ASSERT_EQ(0, lg->got_entries.begin()->first); + + ASSERT_EQ(0, lg->got_entries[0].gen_id); + ASSERT_EQ(log_type::fifo, lg->got_entries[0].type); + ASSERT_FALSE(lg->got_entries[0].pruned); + + lg->got_entries.clear(); + + ec = lg->new_backing(&dp, log_type::omap, null_yield); + ASSERT_FALSE(ec); + + ASSERT_EQ(1, lg->got_entries.size()); + ASSERT_EQ(1, lg->got_entries[1].gen_id); + ASSERT_EQ(log_type::omap, lg->got_entries[1].type); + ASSERT_FALSE(lg->got_entries[1].pruned); + + lg.reset(); + + lg = *logback_generations::init<generations>( + &dp, ioctx, "foobar", [this](uint64_t gen_id, int shard) { + return get_oid(gen_id, shard); + }, SHARDS, log_type::fifo, null_yield); + + ASSERT_EQ(2, lg->got_entries.size()); + ASSERT_EQ(0, lg->got_entries[0].gen_id); + ASSERT_EQ(log_type::fifo, lg->got_entries[0].type); + ASSERT_FALSE(lg->got_entries[0].pruned); + + ASSERT_EQ(1, lg->got_entries[1].gen_id); + ASSERT_EQ(log_type::omap, lg->got_entries[1].type); + ASSERT_FALSE(lg->got_entries[1].pruned); + + ec = lg->empty_to(&dp, 0, null_yield); + ASSERT_FALSE(ec); + + ASSERT_EQ(0, *lg->tail); + + lg.reset(); + + lg = *logback_generations::init<generations>( + &dp, ioctx, "foobar", [this](uint64_t gen_id, int shard) { + return get_oid(gen_id, shard); + }, SHARDS, log_type::fifo, null_yield); + + ASSERT_EQ(1, lg->got_entries.size()); + ASSERT_EQ(1, lg->got_entries[1].gen_id); + ASSERT_EQ(log_type::omap, lg->got_entries[1].type); + ASSERT_FALSE(lg->got_entries[1].pruned); +} + +TEST_F(LogBacking, GenerationWN) +{ + auto lg1 = *logback_generations::init<generations>( + &dp, ioctx, "foobar", [this](uint64_t gen_id, int shard) { + return get_oid(gen_id, shard); + }, SHARDS, log_type::fifo, null_yield); + + auto ec = lg1->new_backing(&dp, log_type::omap, null_yield); + ASSERT_FALSE(ec); + + ASSERT_EQ(1, lg1->got_entries.size()); + ASSERT_EQ(1, lg1->got_entries[1].gen_id); + ASSERT_EQ(log_type::omap, lg1->got_entries[1].type); + ASSERT_FALSE(lg1->got_entries[1].pruned); + + lg1->got_entries.clear(); + + auto lg2 = *logback_generations::init<generations>( + &dp, ioctx2, "foobar", [this](uint64_t gen_id, int shard) { + return get_oid(gen_id, shard); + }, SHARDS, log_type::fifo, null_yield); + + ASSERT_EQ(2, lg2->got_entries.size()); + + ASSERT_EQ(0, lg2->got_entries[0].gen_id); + ASSERT_EQ(log_type::fifo, lg2->got_entries[0].type); + ASSERT_FALSE(lg2->got_entries[0].pruned); + + ASSERT_EQ(1, lg2->got_entries[1].gen_id); + ASSERT_EQ(log_type::omap, lg2->got_entries[1].type); + ASSERT_FALSE(lg2->got_entries[1].pruned); + + lg2->got_entries.clear(); + + ec = lg1->new_backing(&dp, log_type::fifo, null_yield); + ASSERT_FALSE(ec); + + ASSERT_EQ(1, lg1->got_entries.size()); + ASSERT_EQ(2, lg1->got_entries[2].gen_id); + ASSERT_EQ(log_type::fifo, lg1->got_entries[2].type); + ASSERT_FALSE(lg1->got_entries[2].pruned); + + ASSERT_EQ(1, lg2->got_entries.size()); + ASSERT_EQ(2, lg2->got_entries[2].gen_id); + ASSERT_EQ(log_type::fifo, lg2->got_entries[2].type); + ASSERT_FALSE(lg2->got_entries[2].pruned); + + lg1->got_entries.clear(); + lg2->got_entries.clear(); + + ec = lg2->empty_to(&dp, 1, null_yield); + ASSERT_FALSE(ec); + + ASSERT_EQ(1, *lg1->tail); + ASSERT_EQ(1, *lg2->tail); + + lg1->tail.reset(); + lg2->tail.reset(); +} diff --git a/src/test/rgw/test_multen.py b/src/test/rgw/test_multen.py new file mode 100644 index 000000000..91464d333 --- /dev/null +++ b/src/test/rgw/test_multen.py @@ -0,0 +1,400 @@ +# Test of mult-tenancy + +import json +import sys + +from boto.s3.connection import S3Connection, OrdinaryCallingFormat + +# XXX once we're done, break out the common code into a library module +# See https://github.com/ceph/ceph/pull/8646 +import test_multi as t + +class TestException(Exception): + pass + +# +# Create a traditional user, S3-only, global (empty) tenant +# +def test2(cluster): + uid = "tester2" + display_name = "'Test User 2'" + access_key = "tester2KEY" + s3_secret = "test3pass" + cmd = t.build_cmd('--uid', uid, + '--display-name', display_name, + '--access-key', access_key, + '--secret', s3_secret, + "user create") + out, ret = cluster.rgw_admin(cmd, check_retcode=False) + if ret != 0: + raise TestException("failed command: user create --uid %s" % uid) + + try: + outj = json.loads(out.decode('utf-8')) + except ValueError: + raise TestException("invalid json after: user create --uid %s" % uid) + if not isinstance(outj, dict): + raise TestException("bad json after: user create --uid %s" % uid) + if outj['user_id'] != uid: + raise TestException( + "command: user create --uid %s, returned user_id %s" % + (uid, outj['user_id'])) + +# +# Create a tenantized user with --tenant foo +# +def test3(cluster): + tid = "testx3" + uid = "tester3" + display_name = "Test_User_3" + access_key = "tester3KEY" + s3_secret = "test3pass" + cmd = t.build_cmd( + '--tenant', tid, + '--uid', uid, + '--display-name', display_name, + '--access-key', access_key, + '--secret', s3_secret, + "user create") + out, ret = cluster.rgw_admin(cmd, check_retcode=False) + if ret != 0: + raise TestException("failed command: user create --uid %s" % uid) + + try: + outj = json.loads(out.decode('utf-8')) + except ValueError: + raise TestException("invalid json after: user create --uid %s" % uid) + if not isinstance(outj, dict): + raise TestException("bad json after: user create --uid %s" % uid) + tid_uid = "%s$%s" % (tid, uid) + if outj['user_id'] != tid_uid: + raise TestException( + "command: user create --uid %s, returned user_id %s" % + (tid_uid, outj['user_id'])) + +# +# Create a tenantized user with a subuser +# +# N.B. The aim of this test is not just to create a subuser, but to create +# the key with a separate command, which does not use --tenant, but extracts +# the tenant from the subuser. No idea why we allow this. There was some kind +# of old script that did this. +# +def test4(cluster): + tid = "testx4" + uid = "tester4" + subid = "test4" + + display_name = "Test_User_4" + cmd = t.build_cmd( + '--tenant', tid, + '--uid', uid, + '--display-name', display_name, + '--subuser', '%s:%s' % (uid, subid), + '--key-type', 'swift', + '--access', 'full', + "user create") + out, ret = cluster.rgw_admin(cmd, check_retcode=False) + if ret != 0: + raise TestException("failed command: user create --uid %s" % uid) + + try: + outj = json.loads(out.decode('utf-8')) + except ValueError: + raise TestException("invalid json after: user create --uid %s" % uid) + if not isinstance(outj, dict): + raise TestException("bad json after: user create --uid %s" % uid) + tid_uid = "%s$%s" % (tid, uid) + if outj['user_id'] != tid_uid: + raise TestException( + "command: user create --uid %s, returned user_id %s" % + (tid_uid, outj['user_id'])) + + # Note that this tests a way to identify a fully-qualified subuser + # without --tenant and --uid. This is a historic use that we support. + swift_secret = "test3pass" + cmd = t.build_cmd( + '--subuser', "'%s$%s:%s'" % (tid, uid, subid), + '--key-type', 'swift', + '--secret', swift_secret, + "key create") + out, ret = cluster.rgw_admin(cmd, check_retcode=False) + if ret != 0: + raise TestException("failed command: key create --uid %s" % uid) + + try: + outj = json.loads(out.decode('utf-8')) + except ValueError: + raise TestException("invalid json after: key create --uid %s" % uid) + if not isinstance(outj, dict): + raise TestException("bad json after: key create --uid %s" % uid) + tid_uid = "%s$%s" % (tid, uid) + if outj['user_id'] != tid_uid: + raise TestException( + "command: key create --uid %s, returned user_id %s" % + (tid_uid, outj['user_id'])) + # These tests easily can throw KeyError, needs a try: XXX + skj = outj['swift_keys'][0] + if skj['secret_key'] != swift_secret: + raise TestException( + "command: key create --uid %s, returned swift key %s" % + (tid_uid, skj['secret_key'])) + +# +# Access the cluster, create containers in two tenants, verify it all works. +# + +def test5_add_s3_key(cluster, tid, uid): + secret = "%spass" % uid + if tid: + tid_uid = "%s$%s" % (tid, uid) + else: + tid_uid = uid + + cmd = t.build_cmd( + '--uid', "'%s'" % (tid_uid,), + '--access-key', uid, + '--secret', secret, + "key create") + out, ret = cluster.rgw_admin(cmd, check_retcode=False) + if ret != 0: + raise TestException("failed command: key create --uid %s" % uid) + + try: + outj = json.loads(out.decode('utf-8')) + except ValueError: + raise TestException("invalid json after: key create --uid %s" % uid) + if not isinstance(outj, dict): + raise TestException("bad json after: key create --uid %s" % uid) + if outj['user_id'] != tid_uid: + raise TestException( + "command: key create --uid %s, returned user_id %s" % + (uid, outj['user_id'])) + skj = outj['keys'][0] + if skj['secret_key'] != secret: + raise TestException( + "command: key create --uid %s, returned s3 key %s" % + (uid, skj['secret_key'])) + +def test5_add_swift_key(cluster, tid, uid, subid): + secret = "%spass" % uid + if tid: + tid_uid = "%s$%s" % (tid, uid) + else: + tid_uid = uid + + cmd = t.build_cmd( + '--subuser', "'%s:%s'" % (tid_uid, subid), + '--key-type', 'swift', + '--secret', secret, + "key create") + out, ret = cluster.rgw_admin(cmd, check_retcode=False) + if ret != 0: + raise TestException("failed command: key create --uid %s" % uid) + + try: + outj = json.loads(out.decode('utf-8')) + except ValueError: + raise TestException("invalid json after: key create --uid %s" % uid) + if not isinstance(outj, dict): + raise TestException("bad json after: key create --uid %s" % uid) + if outj['user_id'] != tid_uid: + raise TestException( + "command: key create --uid %s, returned user_id %s" % + (uid, outj['user_id'])) + # XXX checking wrong thing here (S3 key) + skj = outj['keys'][0] + if skj['secret_key'] != secret: + raise TestException( + "command: key create --uid %s, returned s3 key %s" % + (uid, skj['secret_key'])) + +def test5_make_user(cluster, tid, uid, subid): + """ + :param tid: Tenant ID string or None for the legacy tenant + :param uid: User ID string + :param subid: Subuser ID, may be None for S3-only users + """ + display_name = "'Test User %s'" % uid + + cmd = "" + if tid: + cmd = t.build_cmd(cmd, + '--tenant', tid) + cmd = t.build_cmd(cmd, + '--uid', uid, + '--display-name', display_name) + if subid: + cmd = t.build_cmd(cmd, + '--subuser', '%s:%s' % (uid, subid), + '--key-type', 'swift') + cmd = t.build_cmd(cmd, + '--access', 'full', + "user create") + + out, ret = cluster.rgw_admin(cmd, check_retcode=False) + if ret != 0: + raise TestException("failed command: user create --uid %s" % uid) + try: + outj = json.loads(out.decode('utf-8')) + except ValueError: + raise TestException("invalid json after: user create --uid %s" % uid) + if not isinstance(outj, dict): + raise TestException("bad json after: user create --uid %s" % uid) + if tid: + tid_uid = "%s$%s" % (tid, uid) + else: + tid_uid = uid + if outj['user_id'] != tid_uid: + raise TestException( + "command: user create --uid %s, returned user_id %s" % + (tid_uid, outj['user_id'])) + + # + # For now, this uses hardcoded passwords based on uid. + # They are all different for ease of debugging in case something crosses. + # + test5_add_s3_key(cluster, tid, uid) + if subid: + test5_add_swift_key(cluster, tid, uid, subid) + +def test5_poke_s3(cluster): + + bucketname = "test5cont1" + objname = "obj1" + + # Not sure if we like useless information printed, but the rest of the + # test framework is insanely talkative when it executes commands. + # So, to keep it in line and have a marker when things go wrong, this. + print("PUT bucket %s object %s for tenant A (empty)" % + (bucketname, objname)) + c = S3Connection( + aws_access_key_id="tester5a", + aws_secret_access_key="tester5apass", + is_secure=False, + host="localhost", + port = cluster.port, + calling_format = OrdinaryCallingFormat()) + + bucket = c.create_bucket(bucketname) + + key = bucket.new_key(objname) + headers = { "Content-Type": "text/plain" } + key.set_contents_from_string(b"Test5A\n", headers) + key.set_acl('public-read') + + # + # Now it's getting interesting. We're logging into a tenantized user. + # + print("PUT bucket %s object %s for tenant B" % (bucketname, objname)) + c = S3Connection( + aws_access_key_id="tester5b1", + aws_secret_access_key="tester5b1pass", + is_secure=False, + host="localhost", + port = cluster.port, + calling_format = OrdinaryCallingFormat()) + + bucket = c.create_bucket(bucketname) + bucket.set_canned_acl('public-read') + + key = bucket.new_key(objname) + headers = { "Content-Type": "text/plain" } + key.set_contents_from_string(b"Test5B\n", headers) + key.set_acl('public-read') + + # + # Finally, let's fetch a couple of objects and verify that they + # are what they should be and we didn't get them overwritten. + # Note that we access one of objects across tenants using the colon. + # + print("GET bucket %s object %s for tenants A and B" % + (bucketname, objname)) + c = S3Connection( + aws_access_key_id="tester5a", + aws_secret_access_key="tester5apass", + is_secure=False, + host="localhost", + port = cluster.port, + calling_format = OrdinaryCallingFormat()) + + bucket = c.get_bucket(bucketname) + + key = bucket.get_key(objname) + body = key.get_contents_as_string() + if body != b"Test5A\n": + raise TestException("failed body check, bucket %s object %s" % + (bucketname, objname)) + + bucket = c.get_bucket("test5b:"+bucketname) + key = bucket.get_key(objname) + body = key.get_contents_as_string() + if body != b"Test5B\n": + raise TestException( + "failed body check, tenant %s bucket %s object %s" % + ("test5b", bucketname, objname)) + + print("Poke OK") + + +def test5(cluster): + # Plan: + # 0. create users tester5a and test5b$tester5b1 test5b$tester5b2 + # 1. create buckets "test5cont" under test5a and test5b + # 2. create objects in the buckets + # 3. access objects (across users in container test5b) + + test5_make_user(cluster, None, "tester5a", "test5a") + test5_make_user(cluster, "test5b", "tester5b1", "test5b1") + test5_make_user(cluster, "test5b", "tester5b2", "test5b2") + + test5_poke_s3(cluster) + + +# XXX this parse_args boolean makes no sense. we should pass argv[] instead, +# possibly empty. (copied from test_multi, correct it there too) +def init(parse_args): + + #argv = [] + #if parse_args: + # argv = sys.argv[1:] + #args = parser.parse_args(argv) + + #rgw_multi = RGWMulti(int(args.num_zones)) + #rgw_multi.setup(not args.no_bootstrap) + + # __init__(): + port = 8001 + clnum = 1 # number of clusters + clid = 1 # 1-based + cluster = t.RGWCluster(clid, port) + + # setup(): + cluster.start() + cluster.start_rgw() + + # The cluster is always reset at this point, so we don't need to list + # users or delete pre-existing users. + + try: + test2(cluster) + test3(cluster) + test4(cluster) + test5(cluster) + except TestException as e: + cluster.stop_rgw() + cluster.stop() + sys.stderr.write("FAIL\n") + sys.stderr.write("%s\n" % str(e)) + return 1 + + # teardown(): + cluster.stop_rgw() + cluster.stop() + return 0 + +def setup_module(): + return init(False) + +if __name__ == "__main__": + sys.exit(init(True)) diff --git a/src/test/rgw/test_multi.md b/src/test/rgw/test_multi.md new file mode 100644 index 000000000..f2c128530 --- /dev/null +++ b/src/test/rgw/test_multi.md @@ -0,0 +1,56 @@ +# Multi Site Test Framework +This framework allows you to write and run tests against a **local** multi-cluster environment. The framework is using the `mstart.sh` script in order to setup the environment according to a configuration file, and then uses the [nose](https://nose.readthedocs.io/en/latest/) test framework to actually run the tests. +Tests are written in python2.7, but can invoke shell scripts, binaries etc. +## Running Tests +Entry point for all tests is `/path/to/ceph/src/test/rgw/test_multi.py`. And the actual tests are located inside the `/path/to/ceph/src/test/rgw/rgw_multi` subdirectory. +So, to run all tests use: +``` +$ cd /path/to/ceph/src/test/rgw/ +$ nosetests test_multi.py +``` +This will assume a configuration file called `/path/to/ceph/src/test/rgw/test_multi.conf` exists. +To use a different configuration file, set the `RGW_MULTI_TEST_CONF` environment variable to point to that file. +Since we use the same entry point file for all tests, running specific tests is possible using the following format: +``` +$ nosetests test_multi.py:<specific_test_name> +``` +To run miltiple tests based on wildcard string, use the following format: +``` +$ nosetests test_multi.py -m "<wildcard string>" +``` +Note that the test to run, does not have to be inside the `test_multi.py` file. +Note that different options for running specific and multiple tests exists in the [nose documentation](https://nose.readthedocs.io/en/latest/usage.html#options), as well as other options to control the execution of the tests. +## Configuration +### Environment Variables +Following RGW environment variables are taken into consideration when running the tests: + - `RGW_FRONTEND`: used to change frontend to 'civetweb' or 'beast' (default) + - `RGW_VALGRIND`: used to run the radosgw under valgrind. e.g. RGW_VALGRIND=yes +Other environment variables used to configure elements other than RGW can also be used as they are used in vstart.sh. E.g. MON, OSD, MGR, MSD +The configuration file for the run has 3 sections: +### Default +This section holds the following parameters: + - `num_zonegroups`: number of zone groups (integer, default 1) + - `num_zones`: number of regular zones in each group (integer, default 3) + - `num_az_zones`: number of archive zones (integer, default 0, max value 1) + - `gateways_per_zone`: number of RADOS gateways per zone (integer, default 2) + - `no_bootstrap`: whether to assume that the cluster is already up and does not need to be setup again. If set to "false", it will try to re-run the cluster, so, `mstop.sh` must be called beforehand. Should be set to false, anytime the configuration is changed. Otherwise, and assuming the cluster is already up, it should be set to "true" to save on execution time (boolean, default false) + - `log_level`: console log level of the logs in the tests, note that any program invoked from the test my emit logs regardless of that setting (integer, default 20) + - 20 and up -> DEBUG + - 10 and up -> INFO + - 5 and up -> WARNING + - 1 and up -> ERROR + - CRITICAL is always logged +- `log_file`: log file name. If not set, only console logs exists (string, default None) +- `file_log_level`: file log level of the logs in the tests. Similar to `log_level` +- `tenant`: name of tenant (string, default None) +- `checkpoint_retries`: *TODO* (integer, default 60) +- `checkpoint_delay`: *TODO* (integer, default 5) +- `reconfigure_delay`: *TODO* (integer, default 5) +### Elasticsearch +*TODO* +### Cloud +*TODO* +## Writing Tests +New tests should be added into the `/path/to/ceph/src/test/rgw/rgw_multi` subdirectory. +- Base classes are in: `/path/to/ceph/src/test/rgw/rgw_multi/multisite.py` +- `/path/to/ceph/src/test/rgw/rgw_multi/tests.py` holds the majority of the tests, but also many utility and infrastructure functions that could be used in other tests files diff --git a/src/test/rgw/test_multi.py b/src/test/rgw/test_multi.py new file mode 100644 index 000000000..57d27343e --- /dev/null +++ b/src/test/rgw/test_multi.py @@ -0,0 +1,410 @@ +import subprocess +import os +import random +import string +import argparse +import sys +import logging +try: + import configparser +except ImportError: + import ConfigParser as configparser + +import nose.core + +from rgw_multi import multisite +from rgw_multi.zone_rados import RadosZone as RadosZone +from rgw_multi.zone_es import ESZone as ESZone +from rgw_multi.zone_es import ESZoneConfig as ESZoneConfig +from rgw_multi.zone_cloud import CloudZone as CloudZone +from rgw_multi.zone_cloud import CloudZoneConfig as CloudZoneConfig +from rgw_multi.zone_az import AZone as AZone +from rgw_multi.zone_az import AZoneConfig as AZoneConfig + +# make tests from rgw_multi.tests available to nose +from rgw_multi.tests import * +from rgw_multi.tests_es import * +from rgw_multi.tests_az import * + +mstart_path = os.getenv('MSTART_PATH') +if mstart_path is None: + mstart_path = os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + '/../..') + '/' + +test_path = os.path.normpath(os.path.dirname(os.path.realpath(__file__))) + '/' + +# configure logging for the tests module +log = logging.getLogger('rgw_multi.tests') + +def bash(cmd, **kwargs): + log.debug('running cmd: %s', ' '.join(cmd)) + check_retcode = kwargs.pop('check_retcode', True) + kwargs['stdout'] = subprocess.PIPE + process = subprocess.Popen(cmd, **kwargs) + s = process.communicate()[0].decode('utf-8') + log.debug('command returned status=%d stdout=%s', process.returncode, s) + if check_retcode: + assert(process.returncode == 0) + return (s, process.returncode) + +class Cluster(multisite.Cluster): + """ cluster implementation based on mstart/mrun scripts """ + def __init__(self, cluster_id): + super(Cluster, self).__init__() + self.cluster_id = cluster_id + self.needs_reset = True + + def admin(self, args = None, **kwargs): + """ radosgw-admin command """ + cmd = [test_path + 'test-rgw-call.sh', 'call_rgw_admin', self.cluster_id] + if args: + cmd += args + cmd += ['--debug-rgw=' + str(kwargs.pop('debug_rgw', 0))] + cmd += ['--debug-ms=' + str(kwargs.pop('debug_ms', 0))] + if kwargs.pop('read_only', False): + cmd += ['--rgw-cache-enabled=false'] + return bash(cmd, **kwargs) + + def start(self): + cmd = [mstart_path + 'mstart.sh', self.cluster_id] + env = None + if self.needs_reset: + env = os.environ.copy() + env['CEPH_NUM_MDS'] = '0' + cmd += ['-n'] + # cmd += ['-o'] + # cmd += ['rgw_cache_enabled=false'] + bash(cmd, env=env) + self.needs_reset = False + + def stop(self): + cmd = [mstart_path + 'mstop.sh', self.cluster_id] + bash(cmd) + +class Gateway(multisite.Gateway): + """ gateway implementation based on mrgw/mstop scripts """ + def __init__(self, client_id = None, *args, **kwargs): + super(Gateway, self).__init__(*args, **kwargs) + self.id = client_id + + def start(self, args = None): + """ start the gateway """ + assert(self.cluster) + env = os.environ.copy() + # to change frontend, set RGW_FRONTEND env variable + # e.g. RGW_FRONTEND=civetweb + # to run test under valgrind memcheck, set RGW_VALGRIND to 'yes' + # e.g. RGW_VALGRIND=yes + cmd = [mstart_path + 'mrgw.sh', self.cluster.cluster_id, str(self.port), str(self.ssl_port)] + if self.id: + cmd += ['-i', self.id] + cmd += ['--debug-rgw=20', '--debug-ms=1'] + if args: + cmd += args + bash(cmd, env=env) + + def stop(self): + """ stop the gateway """ + assert(self.cluster) + cmd = [mstart_path + 'mstop.sh', self.cluster.cluster_id, 'radosgw', str(self.port)] + bash(cmd) + +def gen_access_key(): + return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(16)) + +def gen_secret(): + return ''.join(random.choice(string.ascii_uppercase + string.ascii_lowercase + string.digits) for _ in range(32)) + +def gen_credentials(): + return multisite.Credentials(gen_access_key(), gen_secret()) + +def cluster_name(cluster_num): + return 'c' + str(cluster_num) + +def zonegroup_name(zonegroup_num): + return string.ascii_lowercase[zonegroup_num] + +def zone_name(zonegroup_num, zone_num): + return zonegroup_name(zonegroup_num) + str(zone_num + 1) + +def gateway_port(zonegroup_num, gateway_num): + return 8000 + 100 * zonegroup_num + gateway_num + +def gateway_name(zonegroup_num, zone_num, gateway_num): + return zone_name(zonegroup_num, zone_num) + '-' + str(gateway_num + 1) + +def zone_endpoints(zonegroup_num, zone_num, gateways_per_zone): + endpoints = [] + base = gateway_port(zonegroup_num, zone_num * gateways_per_zone) + for i in range(0, gateways_per_zone): + endpoints.append('http://localhost:' + str(base + i)) + return endpoints + +def get_log_level(log_level): + if log_level >= 20: + return logging.DEBUG + if log_level >= 10: + return logging.INFO + if log_level >= 5: + return logging.WARN + if log_level >= 1: + return logging.ERROR + return logging.CRITICAL + +def setup_logging(log_level_console, log_file, log_level_file): + if log_file: + formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s') + fh = logging.FileHandler(log_file) + fh.setFormatter(formatter) + fh.setLevel(get_log_level(log_level_file)) + log.addHandler(fh) + + formatter = logging.Formatter('%(levelname)s %(message)s') + ch = logging.StreamHandler() + ch.setFormatter(formatter) + ch.setLevel(get_log_level(log_level_console)) + log.addHandler(ch) + log.setLevel(get_log_level(log_level_console)) + +def init(parse_args): + cfg = configparser.RawConfigParser({ + 'num_zonegroups': 1, + 'num_zones': 3, + 'num_az_zones': 0, + 'gateways_per_zone': 2, + 'no_bootstrap': 'false', + 'log_level': 20, + 'log_file': None, + 'file_log_level': 20, + 'tenant': None, + 'checkpoint_retries': 60, + 'checkpoint_delay': 5, + 'reconfigure_delay': 5, + 'use_ssl': 'false', + }) + try: + path = os.environ['RGW_MULTI_TEST_CONF'] + except KeyError: + path = test_path + 'test_multi.conf' + + try: + with open(path) as f: + cfg.readfp(f) + except: + print('WARNING: error reading test config. Path can be set through the RGW_MULTI_TEST_CONF env variable') + pass + + parser = argparse.ArgumentParser( + description='Run rgw multi-site tests', + usage='test_multi [--num-zonegroups <num>] [--num-zones <num>] [--no-bootstrap]') + + section = 'DEFAULT' + parser.add_argument('--num-zonegroups', type=int, default=cfg.getint(section, 'num_zonegroups')) + parser.add_argument('--num-zones', type=int, default=cfg.getint(section, 'num_zones')) + parser.add_argument('--gateways-per-zone', type=int, default=cfg.getint(section, 'gateways_per_zone')) + parser.add_argument('--no-bootstrap', action='store_true', default=cfg.getboolean(section, 'no_bootstrap')) + parser.add_argument('--log-level', type=int, default=cfg.getint(section, 'log_level')) + parser.add_argument('--log-file', type=str, default=cfg.get(section, 'log_file')) + parser.add_argument('--file-log-level', type=int, default=cfg.getint(section, 'file_log_level')) + parser.add_argument('--tenant', type=str, default=cfg.get(section, 'tenant')) + parser.add_argument('--checkpoint-retries', type=int, default=cfg.getint(section, 'checkpoint_retries')) + parser.add_argument('--checkpoint-delay', type=int, default=cfg.getint(section, 'checkpoint_delay')) + parser.add_argument('--reconfigure-delay', type=int, default=cfg.getint(section, 'reconfigure_delay')) + parser.add_argument('--use-ssl', type=bool, default=cfg.getboolean(section, 'use_ssl')) + + + es_cfg = [] + cloud_cfg = [] + az_cfg = [] + + for s in cfg.sections(): + if s.startswith('elasticsearch'): + es_cfg.append(ESZoneConfig(cfg, s)) + elif s.startswith('cloud'): + cloud_cfg.append(CloudZoneConfig(cfg, s)) + elif s.startswith('archive'): + az_cfg.append(AZoneConfig(cfg, s)) + + + argv = [] + + if parse_args: + argv = sys.argv[1:] + + args = parser.parse_args(argv) + bootstrap = not args.no_bootstrap + + setup_logging(args.log_level, args.log_file, args.file_log_level) + + # start first cluster + c1 = Cluster(cluster_name(1)) + if bootstrap: + c1.start() + clusters = [] + clusters.append(c1) + + admin_creds = gen_credentials() + admin_user = multisite.User('zone.user') + + user_creds = gen_credentials() + user = multisite.User('tester', tenant=args.tenant) + + realm = multisite.Realm('r') + if bootstrap: + # create the realm on c1 + realm.create(c1) + else: + realm.get(c1) + period = multisite.Period(realm=realm) + realm.current_period = period + + num_es_zones = len(es_cfg) + num_cloud_zones = len(cloud_cfg) + num_az_zones = cfg.getint(section, 'num_az_zones') + + num_zones = args.num_zones + num_es_zones + num_cloud_zones + num_az_zones + + use_ssl = cfg.getboolean(section, 'use_ssl') + + if use_ssl and bootstrap: + cmd = ['openssl', 'req', + '-x509', + '-newkey', 'rsa:4096', + '-sha256', + '-nodes', + '-keyout', 'key.pem', + '-out', 'cert.pem', + '-subj', '/CN=localhost', + '-days', '3650'] + bash(cmd) + # append key to cert + fkey = open('./key.pem', 'r') + if fkey.mode == 'r': + fcert = open('./cert.pem', 'a') + fcert.write(fkey.read()) + fcert.close() + fkey.close() + + for zg in range(0, args.num_zonegroups): + zonegroup = multisite.ZoneGroup(zonegroup_name(zg), period) + period.zonegroups.append(zonegroup) + + is_master_zg = zg == 0 + if is_master_zg: + period.master_zonegroup = zonegroup + + for z in range(0, num_zones): + is_master = z == 0 + # start a cluster, or use c1 for first zone + cluster = None + if is_master_zg and is_master: + cluster = c1 + else: + cluster = Cluster(cluster_name(len(clusters) + 1)) + clusters.append(cluster) + if bootstrap: + cluster.start() + # pull realm configuration from the master's gateway + gateway = realm.meta_master_zone().gateways[0] + realm.pull(cluster, gateway, admin_creds) + + endpoints = zone_endpoints(zg, z, args.gateways_per_zone) + if is_master: + if bootstrap: + # create the zonegroup on its first zone's cluster + arg = [] + if is_master_zg: + arg += ['--master'] + if len(endpoints): # use master zone's endpoints + arg += ['--endpoints', ','.join(endpoints)] + zonegroup.create(cluster, arg) + else: + zonegroup.get(cluster) + + es_zone = (z >= args.num_zones and z < args.num_zones + num_es_zones) + cloud_zone = (z >= args.num_zones + num_es_zones and z < args.num_zones + num_es_zones + num_cloud_zones) + az_zone = (z >= args.num_zones + num_es_zones + num_cloud_zones) + + # create the zone in its zonegroup + zone = multisite.Zone(zone_name(zg, z), zonegroup, cluster) + if es_zone: + zone_index = z - args.num_zones + zone = ESZone(zone_name(zg, z), es_cfg[zone_index].endpoint, zonegroup, cluster) + elif cloud_zone: + zone_index = z - args.num_zones - num_es_zones + ccfg = cloud_cfg[zone_index] + zone = CloudZone(zone_name(zg, z), ccfg.endpoint, ccfg.credentials, ccfg.source_bucket, + ccfg.target_path, zonegroup, cluster) + elif az_zone: + zone_index = z - args.num_zones - num_es_zones - num_cloud_zones + zone = AZone(zone_name(zg, z), zonegroup, cluster) + else: + zone = RadosZone(zone_name(zg, z), zonegroup, cluster) + + if bootstrap: + arg = admin_creds.credential_args() + if is_master: + arg += ['--master'] + if len(endpoints): + arg += ['--endpoints', ','.join(endpoints)] + zone.create(cluster, arg) + else: + zone.get(cluster) + zonegroup.zones.append(zone) + if is_master: + zonegroup.master_zone = zone + + zonegroup.zones_by_type.setdefault(zone.tier_type(), []).append(zone) + + if zone.is_read_only(): + zonegroup.ro_zones.append(zone) + else: + zonegroup.rw_zones.append(zone) + + # update/commit the period + if bootstrap: + period.update(zone, commit=True) + + ssl_port_offset = 1000 + # start the gateways + for g in range(0, args.gateways_per_zone): + port = gateway_port(zg, g + z * args.gateways_per_zone) + client_id = gateway_name(zg, z, g) + gateway = Gateway(client_id, 'localhost', port, cluster, zone, + ssl_port = port+ssl_port_offset if use_ssl else 0) + if bootstrap: + gateway.start() + zone.gateways.append(gateway) + + if is_master_zg and is_master: + if bootstrap: + # create admin user + arg = ['--display-name', '"Zone User"', '--system'] + arg += admin_creds.credential_args() + admin_user.create(zone, arg) + # create test user + arg = ['--display-name', '"Test User"', '--caps', 'roles=*'] + arg += user_creds.credential_args() + user.create(zone, arg) + else: + # read users and update keys + admin_user.info(zone) + admin_creds = admin_user.credentials[0] + arg = [] + user.info(zone, arg) + user_creds = user.credentials[0] + + if not bootstrap: + period.get(c1) + + config = Config(checkpoint_retries=args.checkpoint_retries, + checkpoint_delay=args.checkpoint_delay, + reconfigure_delay=args.reconfigure_delay, + tenant=args.tenant) + init_multi(realm, user, config) + +def setup_module(): + init(False) + +if __name__ == "__main__": + init(True) + diff --git a/src/test/rgw/test_rgw_amqp.cc b/src/test/rgw/test_rgw_amqp.cc new file mode 100644 index 000000000..f49d309c7 --- /dev/null +++ b/src/test/rgw/test_rgw_amqp.cc @@ -0,0 +1,529 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#include "rgw_amqp.h" +#include "common/ceph_context.h" +#include "amqp_mock.h" +#include <gtest/gtest.h> +#include <chrono> +#include <thread> +#include <atomic> + +using namespace rgw; + +const std::chrono::milliseconds wait_time(10); +const std::chrono::milliseconds long_wait_time = wait_time*50; +const std::chrono::seconds idle_time(35); + + +class CctCleaner { + CephContext* cct; +public: + CctCleaner(CephContext* _cct) : cct(_cct) {} + ~CctCleaner() { +#ifdef WITH_SEASTAR + delete cct; +#else + cct->put(); +#endif + } +}; + +auto cct = new CephContext(CEPH_ENTITY_TYPE_CLIENT); + +CctCleaner cleaner(cct); + +class TestAMQP : public ::testing::Test { +protected: + amqp::connection_id_t conn_id; + unsigned current_dequeued = 0U; + + void SetUp() override { + ASSERT_TRUE(amqp::init(cct)); + } + + void TearDown() override { + amqp::shutdown(); + } + + // wait for at least one new (since last drain) message to be dequeueud + // and then wait for all pending answers to be received + void wait_until_drained() { + while (amqp::get_dequeued() == current_dequeued) { + std::this_thread::sleep_for(wait_time); + } + while (amqp::get_inflight() > 0) { + std::this_thread::sleep_for(wait_time); + } + current_dequeued = amqp::get_dequeued(); + } +}; + +std::atomic<bool> callback_invoked = false; + +std::atomic<int> callbacks_invoked = 0; + +// note: because these callback are shared among different "publish" calls +// they should be used on different connections + +void my_callback_expect_ack(int rc) { + EXPECT_EQ(0, rc); + callback_invoked = true; +} + +void my_callback_expect_nack(int rc) { + EXPECT_LT(rc, 0); + callback_invoked = true; +} + +void my_callback_expect_multiple_acks(int rc) { + EXPECT_EQ(0, rc); + ++callbacks_invoked; +} + +class dynamic_callback_wrapper { + dynamic_callback_wrapper() = default; +public: + static dynamic_callback_wrapper* create() { + return new dynamic_callback_wrapper; + } + void callback(int rc) { + EXPECT_EQ(0, rc); + ++callbacks_invoked; + delete this; + } +}; + +void my_callback_expect_close_or_ack(int rc) { + // deleting the connection should trigger the callback with -4098 + // but due to race conditions, some my get an ack + EXPECT_TRUE(-4098 == rc || 0 == rc); +} + +TEST_F(TestAMQP, ConnectionOK) +{ + const auto connection_number = amqp::get_connection_count(); + auto rc = amqp::connect(conn_id, "amqp://localhost", "ex1", false, false, boost::none); + EXPECT_TRUE(rc); + EXPECT_EQ(amqp::get_connection_count(), connection_number + 1); + rc = amqp::publish(conn_id, "topic", "message"); + EXPECT_EQ(rc, 0); +} + +TEST_F(TestAMQP, SSLConnectionOK) +{ + const int port = 5671; + const auto connection_number = amqp::get_connection_count(); + amqp_mock::set_valid_port(port); + auto rc = amqp::connect(conn_id, "amqps://localhost", "ex1", false, false, boost::none); + EXPECT_TRUE(rc); + EXPECT_EQ(amqp::get_connection_count(), connection_number + 1); + rc = amqp::publish(conn_id, "topic", "message"); + EXPECT_EQ(rc, 0); + amqp_mock::set_valid_port(5672); +} + +TEST_F(TestAMQP, PlainAndSSLConnectionsOK) +{ + const int port = 5671; + const auto connection_number = amqp::get_connection_count(); + amqp_mock::set_valid_port(port); + amqp::connection_id_t conn_id1; + auto rc = amqp::connect(conn_id1, "amqps://localhost", "ex1", false, false, boost::none); + EXPECT_TRUE(rc); + EXPECT_EQ(amqp::get_connection_count(), connection_number + 1); + rc = amqp::publish(conn_id1, "topic", "message"); + EXPECT_EQ(rc, 0); + EXPECT_EQ(amqp::to_string(conn_id1), "amqps://localhost:5671/?exchange=ex1"); + amqp_mock::set_valid_port(5672); + amqp::connection_id_t conn_id2; + rc = amqp::connect(conn_id2, "amqp://localhost", "ex1", false, false, boost::none); + EXPECT_TRUE(rc); + EXPECT_EQ(amqp::to_string(conn_id2), "amqp://localhost:5672/?exchange=ex1"); + EXPECT_EQ(amqp::get_connection_count(), connection_number + 2); + rc = amqp::publish(conn_id2, "topic", "message"); + EXPECT_EQ(rc, 0); +} + +TEST_F(TestAMQP, ConnectionReuse) +{ + amqp::connection_id_t conn_id1; + auto rc = amqp::connect(conn_id1, "amqp://localhost", "ex1", false, false, boost::none); + EXPECT_TRUE(rc); + const auto connection_number = amqp::get_connection_count(); + amqp::connection_id_t conn_id2; + rc = amqp::connect(conn_id2, "amqp://localhost", "ex1", false, false, boost::none); + EXPECT_TRUE(rc); + EXPECT_EQ(amqp::get_connection_count(), connection_number); + rc = amqp::publish(conn_id1, "topic", "message"); + EXPECT_EQ(rc, 0); +} + +TEST_F(TestAMQP, NameResolutionFail) +{ + callback_invoked = false; + const auto connection_number = amqp::get_connection_count(); + amqp::connection_id_t conn_id; + auto rc = amqp::connect(conn_id, "amqp://kaboom", "ex1", false, false, boost::none); + EXPECT_TRUE(rc); + EXPECT_EQ(amqp::get_connection_count(), connection_number + 1); + rc = publish_with_confirm(conn_id, "topic", "message", my_callback_expect_nack); + EXPECT_EQ(rc, 0); + wait_until_drained(); + EXPECT_TRUE(callback_invoked); +} + +TEST_F(TestAMQP, InvalidPort) +{ + callback_invoked = false; + const auto connection_number = amqp::get_connection_count(); + amqp::connection_id_t conn_id; + auto rc = amqp::connect(conn_id, "amqp://localhost:1234", "ex1", false, false, boost::none); + EXPECT_TRUE(rc); + EXPECT_EQ(amqp::get_connection_count(), connection_number + 1); + rc = publish_with_confirm(conn_id, "topic", "message", my_callback_expect_nack); + EXPECT_EQ(rc, 0); + wait_until_drained(); + EXPECT_TRUE(callback_invoked); +} + +TEST_F(TestAMQP, InvalidHost) +{ + callback_invoked = false; + const auto connection_number = amqp::get_connection_count(); + amqp::connection_id_t conn_id; + auto rc = amqp::connect(conn_id, "amqp://0.0.0.1", "ex1", false, false, boost::none); + EXPECT_TRUE(rc); + EXPECT_EQ(amqp::get_connection_count(), connection_number + 1); + EXPECT_EQ(amqp::get_connection_count(), connection_number + 1); + rc = publish_with_confirm(conn_id, "topic", "message", my_callback_expect_nack); + EXPECT_EQ(rc, 0); + wait_until_drained(); + EXPECT_TRUE(callback_invoked); +} + +TEST_F(TestAMQP, InvalidVhost) +{ + callback_invoked = false; + const auto connection_number = amqp::get_connection_count(); + amqp::connection_id_t conn_id; + auto rc = amqp::connect(conn_id, "amqp://localhost/kaboom", "ex1", false, false, boost::none); + EXPECT_TRUE(rc); + EXPECT_EQ(amqp::get_connection_count(), connection_number + 1); + rc = publish_with_confirm(conn_id, "topic", "message", my_callback_expect_nack); + EXPECT_EQ(rc, 0); + wait_until_drained(); + EXPECT_TRUE(callback_invoked); +} + +TEST_F(TestAMQP, UserPassword) +{ + amqp_mock::set_valid_host("127.0.0.1"); + { + callback_invoked = false; + const auto connection_number = amqp::get_connection_count(); + amqp::connection_id_t conn_id; + auto rc = amqp::connect(conn_id, "amqp://foo:bar@127.0.0.1", "ex1", false, false, boost::none); + EXPECT_TRUE(rc); + EXPECT_EQ(amqp::get_connection_count(), connection_number + 1); + rc = publish_with_confirm(conn_id, "topic", "message", my_callback_expect_nack); + EXPECT_EQ(rc, 0); + wait_until_drained(); + EXPECT_TRUE(callback_invoked); + } + // now try the same connection with default user/password + amqp_mock::set_valid_host("127.0.0.2"); + { + callback_invoked = false; + const auto connection_number = amqp::get_connection_count(); + amqp::connection_id_t conn_id; + auto rc = amqp::connect(conn_id, "amqp://guest:guest@127.0.0.2", "ex1", false, false, boost::none); + EXPECT_TRUE(rc); + EXPECT_EQ(amqp::get_connection_count(), connection_number + 1); + rc = publish_with_confirm(conn_id, "topic", "message", my_callback_expect_ack); + EXPECT_EQ(rc, 0); + wait_until_drained(); + EXPECT_TRUE(callback_invoked); + } + amqp_mock::set_valid_host("localhost"); +} + +TEST_F(TestAMQP, URLParseError) +{ + callback_invoked = false; + const auto connection_number = amqp::get_connection_count(); + amqp::connection_id_t conn_id; + auto rc = amqp::connect(conn_id, "http://localhost", "ex1", false, false, boost::none); + EXPECT_FALSE(rc); + EXPECT_EQ(amqp::get_connection_count(), connection_number); + rc = publish_with_confirm(conn_id, "topic", "message", my_callback_expect_nack); + EXPECT_EQ(rc, 0); + wait_until_drained(); + EXPECT_TRUE(callback_invoked); +} + +TEST_F(TestAMQP, ExchangeMismatch) +{ + callback_invoked = false; + const auto connection_number = amqp::get_connection_count(); + amqp::connection_id_t conn_id; + auto rc = amqp::connect(conn_id, "http://localhost", "ex2", false, false, boost::none); + EXPECT_FALSE(rc); + EXPECT_EQ(amqp::get_connection_count(), connection_number); + rc = publish_with_confirm(conn_id, "topic", "message", my_callback_expect_nack); + EXPECT_EQ(rc, 0); + wait_until_drained(); + EXPECT_TRUE(callback_invoked); +} + +TEST_F(TestAMQP, MaxConnections) +{ + // fill up all connections + std::vector<amqp::connection_id_t> connections; + auto remaining_connections = amqp::get_max_connections() - amqp::get_connection_count(); + while (remaining_connections > 0) { + const auto host = "127.10.0." + std::to_string(remaining_connections); + amqp_mock::set_valid_host(host); + amqp::connection_id_t conn_id; + auto rc = amqp::connect(conn_id, "amqp://" + host, "ex1", false, false, boost::none); + EXPECT_TRUE(rc); + rc = publish_with_confirm(conn_id, "topic", "message", my_callback_expect_ack); + EXPECT_EQ(rc, 0); + --remaining_connections; + connections.push_back(conn_id); + } + EXPECT_EQ(amqp::get_connection_count(), amqp::get_max_connections()); + wait_until_drained(); + // try to add another connection + { + const std::string host = "toomany"; + amqp_mock::set_valid_host(host); + amqp::connection_id_t conn_id; + auto rc = amqp::connect(conn_id, "amqp://" + host, "ex1", false, false, boost::none); + EXPECT_FALSE(rc); + rc = publish_with_confirm(conn_id, "topic", "message", my_callback_expect_nack); + EXPECT_EQ(rc, 0); + wait_until_drained(); + } + EXPECT_EQ(amqp::get_connection_count(), amqp::get_max_connections()); + amqp_mock::set_valid_host("localhost"); +} + + +TEST_F(TestAMQP, ReceiveAck) +{ + callback_invoked = false; + const std::string host("localhost1"); + amqp_mock::set_valid_host(host); + amqp::connection_id_t conn_id; + auto rc = amqp::connect(conn_id, "amqp://" + host, "ex1", false, false, boost::none); + EXPECT_TRUE(rc); + rc = publish_with_confirm(conn_id, "topic", "message", my_callback_expect_ack); + EXPECT_EQ(rc, 0); + wait_until_drained(); + EXPECT_TRUE(callback_invoked); + amqp_mock::set_valid_host("localhost"); +} + +TEST_F(TestAMQP, ImplicitConnectionClose) +{ + callback_invoked = false; + const std::string host("localhost1"); + amqp_mock::set_valid_host(host); + amqp::connection_id_t conn_id; + auto rc = amqp::connect(conn_id, "amqp://" + host, "ex1", false, false, boost::none); + EXPECT_TRUE(rc); + const auto NUMBER_OF_CALLS = 2000; + for (auto i = 0; i < NUMBER_OF_CALLS; ++i) { + auto rc = publish_with_confirm(conn_id, "topic", "message", my_callback_expect_close_or_ack); + EXPECT_EQ(rc, 0); + } + wait_until_drained(); + amqp_mock::set_valid_host("localhost"); +} + +TEST_F(TestAMQP, ReceiveMultipleAck) +{ + callbacks_invoked = 0; + const std::string host("localhost1"); + amqp_mock::set_valid_host(host); + amqp::connection_id_t conn_id; + auto rc = amqp::connect(conn_id, "amqp://" + host, "ex1", false, false, boost::none); + EXPECT_TRUE(rc); + const auto NUMBER_OF_CALLS = 100; + for (auto i=0; i < NUMBER_OF_CALLS; ++i) { + auto rc = publish_with_confirm(conn_id, "topic", "message", my_callback_expect_multiple_acks); + EXPECT_EQ(rc, 0); + } + wait_until_drained(); + EXPECT_EQ(callbacks_invoked, NUMBER_OF_CALLS); + callbacks_invoked = 0; + amqp_mock::set_valid_host("localhost"); +} + +TEST_F(TestAMQP, ReceiveAckForMultiple) +{ + callbacks_invoked = 0; + const std::string host("localhost1"); + amqp_mock::set_valid_host(host); + amqp::connection_id_t conn_id; + auto rc = amqp::connect(conn_id, "amqp://" + host, "ex1", false, false, boost::none); + EXPECT_TRUE(rc); + amqp_mock::set_multiple(59); + const auto NUMBER_OF_CALLS = 100; + for (auto i=0; i < NUMBER_OF_CALLS; ++i) { + rc = publish_with_confirm(conn_id, "topic", "message", my_callback_expect_multiple_acks); + EXPECT_EQ(rc, 0); + } + wait_until_drained(); + EXPECT_EQ(callbacks_invoked, NUMBER_OF_CALLS); + callbacks_invoked = 0; + amqp_mock::set_valid_host("localhost"); +} + +TEST_F(TestAMQP, DynamicCallback) +{ + callbacks_invoked = 0; + const std::string host("localhost1"); + amqp_mock::set_valid_host(host); + amqp::connection_id_t conn_id; + auto rc = amqp::connect(conn_id, "amqp://" + host, "ex1", false, false, boost::none); + EXPECT_TRUE(rc); + amqp_mock::set_multiple(59); + const auto NUMBER_OF_CALLS = 100; + for (auto i=0; i < NUMBER_OF_CALLS; ++i) { + rc = publish_with_confirm(conn_id, "topic", "message", + std::bind(&dynamic_callback_wrapper::callback, dynamic_callback_wrapper::create(), std::placeholders::_1)); + EXPECT_EQ(rc, 0); + } + wait_until_drained(); + EXPECT_EQ(callbacks_invoked, NUMBER_OF_CALLS); + callbacks_invoked = 0; + amqp_mock::set_valid_host("localhost"); +} + +TEST_F(TestAMQP, ReceiveNack) +{ + callback_invoked = false; + amqp_mock::REPLY_ACK = false; + const std::string host("localhost2"); + amqp_mock::set_valid_host(host); + amqp::connection_id_t conn_id; + auto rc = amqp::connect(conn_id, "amqp://" + host, "ex1", false, false, boost::none); + EXPECT_TRUE(rc); + rc = publish_with_confirm(conn_id, "topic", "message", my_callback_expect_nack); + EXPECT_EQ(rc, 0); + wait_until_drained(); + EXPECT_TRUE(callback_invoked); + amqp_mock::REPLY_ACK = true; + callback_invoked = false; + amqp_mock::set_valid_host("localhost"); +} + +TEST_F(TestAMQP, FailWrite) +{ + callback_invoked = false; + amqp_mock::FAIL_NEXT_WRITE = true; + const std::string host("localhost2"); + amqp_mock::set_valid_host(host); + amqp::connection_id_t conn_id; + auto rc = amqp::connect(conn_id, "amqp://" + host, "ex1", false, false, boost::none); + EXPECT_TRUE(rc); + rc = publish_with_confirm(conn_id, "topic", "message", my_callback_expect_nack); + EXPECT_EQ(rc, 0); + wait_until_drained(); + EXPECT_TRUE(callback_invoked); + amqp_mock::FAIL_NEXT_WRITE = false; + callback_invoked = false; + amqp_mock::set_valid_host("localhost"); +} + +TEST_F(TestAMQP, RetryInvalidHost) +{ + callback_invoked = false; + const std::string host = "192.168.0.1"; + const auto connection_number = amqp::get_connection_count(); + amqp::connection_id_t conn_id; + auto rc = amqp::connect(conn_id, "amqp://"+host, "ex1", false, false, boost::none); + EXPECT_TRUE(rc); + EXPECT_EQ(amqp::get_connection_count(), connection_number + 1); + rc = publish_with_confirm(conn_id, "topic", "message", my_callback_expect_nack); + EXPECT_EQ(rc, 0); + wait_until_drained(); + EXPECT_TRUE(callback_invoked); + // now next retry should be ok + callback_invoked = false; + amqp_mock::set_valid_host(host); + std::this_thread::sleep_for(long_wait_time); + rc = publish_with_confirm(conn_id, "topic", "message", my_callback_expect_ack); + EXPECT_EQ(rc, 0); + wait_until_drained(); + EXPECT_TRUE(callback_invoked); + amqp_mock::set_valid_host("localhost"); +} + +TEST_F(TestAMQP, RetryInvalidPort) +{ + callback_invoked = false; + const int port = 9999; + const auto connection_number = amqp::get_connection_count(); + amqp::connection_id_t conn_id; + auto rc = amqp::connect(conn_id, "amqp://localhost:" + std::to_string(port), "ex1", false, false, boost::none); + EXPECT_TRUE(rc); + EXPECT_EQ(amqp::get_connection_count(), connection_number + 1); + rc = publish_with_confirm(conn_id, "topic", "message", my_callback_expect_nack); + EXPECT_EQ(rc, 0); + wait_until_drained(); + EXPECT_TRUE(callback_invoked); + // now next retry should be ok + callback_invoked = false; + amqp_mock::set_valid_port(port); + std::this_thread::sleep_for(long_wait_time); + rc = publish_with_confirm(conn_id, "topic", "message", my_callback_expect_ack); + EXPECT_EQ(rc, 0); + wait_until_drained(); + EXPECT_TRUE(callback_invoked); + amqp_mock::set_valid_port(5672); +} + +TEST_F(TestAMQP, RetryFailWrite) +{ + callback_invoked = false; + amqp_mock::FAIL_NEXT_WRITE = true; + const std::string host("localhost2"); + amqp_mock::set_valid_host(host); + amqp::connection_id_t conn_id; + auto rc = amqp::connect(conn_id, "amqp://" + host, "ex1", false, false, boost::none); + EXPECT_TRUE(rc); + rc = publish_with_confirm(conn_id, "topic", "message", my_callback_expect_nack); + EXPECT_EQ(rc, 0); + wait_until_drained(); + EXPECT_TRUE(callback_invoked); + // now next retry should be ok + amqp_mock::FAIL_NEXT_WRITE = false; + callback_invoked = false; + std::this_thread::sleep_for(long_wait_time); + rc = publish_with_confirm(conn_id, "topic", "message", my_callback_expect_ack); + EXPECT_EQ(rc, 0); + wait_until_drained(); + EXPECT_TRUE(callback_invoked); + amqp_mock::set_valid_host("localhost"); +} + +TEST_F(TestAMQP, IdleConnection) +{ + // this test is skipped since it takes 30seconds + //GTEST_SKIP(); + const auto connection_number = amqp::get_connection_count(); + amqp::connection_id_t conn_id; + auto rc = amqp::connect(conn_id, "amqp://localhost", "ex1", false, false, boost::none); + EXPECT_TRUE(rc); + EXPECT_EQ(amqp::get_connection_count(), connection_number + 1); + std::this_thread::sleep_for(idle_time); + EXPECT_EQ(amqp::get_connection_count(), connection_number); + rc = publish_with_confirm(conn_id, "topic", "message", my_callback_expect_nack); + EXPECT_EQ(rc, 0); + wait_until_drained(); + EXPECT_TRUE(callback_invoked); +} + diff --git a/src/test/rgw/test_rgw_arn.cc b/src/test/rgw/test_rgw_arn.cc new file mode 100644 index 000000000..83445a275 --- /dev/null +++ b/src/test/rgw/test_rgw_arn.cc @@ -0,0 +1,107 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#include "rgw_arn.h" +#include <gtest/gtest.h> + +using namespace rgw; + +const int BASIC_ENTRIES = 6; + +const std::string basic_str[BASIC_ENTRIES] = {"arn:aws:s3:us-east-1:12345:resource", + "arn:aws:s3:us-east-1:12345:resourceType/resource", + "arn:aws:s3:us-east-1:12345:resourceType/resource/qualifier", + "arn:aws:s3:us-east-1:12345:resourceType/resource:qualifier", + "arn:aws:s3:us-east-1:12345:resourceType:resource", + "arn:aws:s3:us-east-1:12345:resourceType:resource/qualifier"}; + +const std::string expected_basic_resource[BASIC_ENTRIES] = {"resource", + "resourceType/resource", + "resourceType/resource/qualifier", + "resourceType/resource:qualifier", + "resourceType:resource", + "resourceType:resource/qualifier"}; +TEST(TestARN, Basic) +{ + for (auto i = 0; i < BASIC_ENTRIES; ++i) { + boost::optional<ARN> arn = ARN::parse(basic_str[i]); + ASSERT_TRUE(arn); + EXPECT_EQ(arn->partition, Partition::aws); + EXPECT_EQ(arn->service, Service::s3); + EXPECT_STREQ(arn->region.c_str(), "us-east-1"); + EXPECT_STREQ(arn->account.c_str(), "12345"); + EXPECT_STREQ(arn->resource.c_str(), expected_basic_resource[i].c_str()); + } +} + +TEST(TestARN, ToString) +{ + for (auto i = 0; i < BASIC_ENTRIES; ++i) { + boost::optional<ARN> arn = ARN::parse(basic_str[i]); + ASSERT_TRUE(arn); + EXPECT_STREQ(to_string(*arn).c_str(), basic_str[i].c_str()); + } +} + +const std::string expected_basic_resource_type[BASIC_ENTRIES] = + {"", "resourceType", "resourceType", "resourceType", "resourceType", "resourceType"}; +const std::string expected_basic_qualifier[BASIC_ENTRIES] = + {"", "", "qualifier", "qualifier", "", "qualifier"}; + +TEST(TestARNResource, Basic) +{ + for (auto i = 0; i < BASIC_ENTRIES; ++i) { + boost::optional<ARN> arn = ARN::parse(basic_str[i]); + ASSERT_TRUE(arn); + ASSERT_FALSE(arn->resource.empty()); + boost::optional<ARNResource> resource = ARNResource::parse(arn->resource); + ASSERT_TRUE(resource); + EXPECT_STREQ(resource->resource.c_str(), "resource"); + EXPECT_STREQ(resource->resource_type.c_str(), expected_basic_resource_type[i].c_str()); + EXPECT_STREQ(resource->qualifier.c_str(), expected_basic_qualifier[i].c_str()); + } +} + +const int EMPTY_ENTRIES = 4; + +const std::string empty_str[EMPTY_ENTRIES] = {"arn:aws:s3:::resource", + "arn:aws:s3::12345:resource", + "arn:aws:s3:us-east-1::resource", + "arn:aws:s3:us-east-1:12345:"}; + +TEST(TestARN, Empty) +{ + for (auto i = 0; i < EMPTY_ENTRIES; ++i) { + boost::optional<ARN> arn = ARN::parse(empty_str[i]); + ASSERT_TRUE(arn); + EXPECT_EQ(arn->partition, Partition::aws); + EXPECT_EQ(arn->service, Service::s3); + EXPECT_TRUE(arn->region.empty() || arn->region == "us-east-1"); + EXPECT_TRUE(arn->account.empty() || arn->account == "12345"); + EXPECT_TRUE(arn->resource.empty() || arn->resource == "resource"); + } +} + +const int WILDCARD_ENTRIES = 3; + +const std::string wildcard_str[WILDCARD_ENTRIES] = {"arn:aws:s3:*:*:resource", + "arn:aws:s3:*:12345:resource", + "arn:aws:s3:us-east-1:*:resource"}; + +// FIXME: currently the following: "arn:aws:s3:us-east-1:12345:*" +// does not fail, even if "wildcard" is not set to "true" + +TEST(TestARN, Wildcard) +{ + for (auto i = 0; i < WILDCARD_ENTRIES; ++i) { + EXPECT_FALSE(ARN::parse(wildcard_str[i])); + boost::optional<ARN> arn = ARN::parse(wildcard_str[i], true); + ASSERT_TRUE(arn); + EXPECT_EQ(arn->partition, Partition::aws); + EXPECT_EQ(arn->service, Service::s3); + EXPECT_TRUE(arn->region == "*" || arn->region == "us-east-1"); + EXPECT_TRUE(arn->account == "*" || arn->account == "12345"); + EXPECT_TRUE(arn->resource == "*" || arn->resource == "resource"); + } +} + diff --git a/src/test/rgw/test_rgw_bencode.cc b/src/test/rgw/test_rgw_bencode.cc new file mode 100644 index 000000000..c149d532c --- /dev/null +++ b/src/test/rgw/test_rgw_bencode.cc @@ -0,0 +1,65 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab +#include "gtest/gtest.h" + +#include "rgw_torrent.h" + +using namespace std; + +TEST(Bencode, String) +{ + TorrentBencode decode; + bufferlist bl; + + decode.bencode("foo", bl); + decode.bencode("bar", bl); + decode.bencode("baz", bl); + + string s(bl.c_str(), bl.length()); + + ASSERT_STREQ("3:foo3:bar3:baz", s.c_str()); +} + +TEST(Bencode, Integers) +{ + TorrentBencode decode; + bufferlist bl; + + decode.bencode(0, bl); + decode.bencode(-3, bl); + decode.bencode(7, bl); + + string s(bl.c_str(), bl.length()); + + ASSERT_STREQ("i0ei-3ei7e", s.c_str()); +} + +TEST(Bencode, Dict) +{ + TorrentBencode decode; + bufferlist bl; + + decode.bencode_dict(bl); + decode.bencode("foo", 5, bl); + decode.bencode("bar", "baz", bl); + decode.bencode_end(bl); + + string s(bl.c_str(), bl.length()); + + ASSERT_STREQ("d3:fooi5e3:bar3:baze", s.c_str()); +} + +TEST(Bencode, List) +{ + TorrentBencode decode; + bufferlist bl; + + decode.bencode_list(bl); + decode.bencode("foo", 5, bl); + decode.bencode("bar", "baz", bl); + decode.bencode_end(bl); + + string s(bl.c_str(), bl.length()); + + ASSERT_STREQ("l3:fooi5e3:bar3:baze", s.c_str()); +} diff --git a/src/test/rgw/test_rgw_bucket_sync_cache.cc b/src/test/rgw/test_rgw_bucket_sync_cache.cc new file mode 100644 index 000000000..22ec1005e --- /dev/null +++ b/src/test/rgw/test_rgw_bucket_sync_cache.cc @@ -0,0 +1,189 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab ft=cpp + +/* + * Ceph - scalable distributed file system + * + * Copyright (C) 2020 Red Hat, Inc + * + * This is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License version 2.1, as published by the Free Software + * Foundation. See file COPYING. + */ + +#include "rgw_bucket_sync_cache.h" +#include <gtest/gtest.h> + +using namespace rgw::bucket_sync; + +// helper function to construct rgw_bucket_shard +static rgw_bucket_shard make_key(const std::string& tenant, + const std::string& bucket, int shard) +{ + auto key = rgw_bucket_key{tenant, bucket}; + return rgw_bucket_shard{std::move(key), shard}; +} + +TEST(BucketSyncCache, ReturnCachedPinned) +{ + auto cache = Cache::create(0); + const auto key = make_key("", "1", 0); + auto h1 = cache->get(key, std::nullopt); // pin + h1->counter = 1; + auto h2 = cache->get(key, std::nullopt); + EXPECT_EQ(1, h2->counter); +} + +TEST(BucketSyncCache, ReturnNewUnpinned) +{ + auto cache = Cache::create(0); + const auto key = make_key("", "1", 0); + cache->get(key, std::nullopt)->counter = 1; // pin+unpin + EXPECT_EQ(0, cache->get(key, std::nullopt)->counter); +} + +TEST(BucketSyncCache, DistinctTenant) +{ + auto cache = Cache::create(2); + const auto key1 = make_key("a", "bucket", 0); + const auto key2 = make_key("b", "bucket", 0); + cache->get(key1, std::nullopt)->counter = 1; + EXPECT_EQ(0, cache->get(key2, std::nullopt)->counter); +} + +TEST(BucketSyncCache, DistinctShards) +{ + auto cache = Cache::create(2); + const auto key1 = make_key("", "bucket", 0); + const auto key2 = make_key("", "bucket", 1); + cache->get(key1, std::nullopt)->counter = 1; + EXPECT_EQ(0, cache->get(key2, std::nullopt)->counter); +} + +TEST(BucketSyncCache, DistinctGen) +{ + auto cache = Cache::create(2); + const auto key = make_key("", "bucket", 0); + std::optional<uint64_t> gen1; // empty + std::optional<uint64_t> gen2 = 5; + cache->get(key, gen1)->counter = 1; + EXPECT_EQ(0, cache->get(key, gen2)->counter); +} + +TEST(BucketSyncCache, DontEvictPinned) +{ + auto cache = Cache::create(0); + + const auto key1 = make_key("", "1", 0); + const auto key2 = make_key("", "2", 0); + + auto h1 = cache->get(key1, std::nullopt); + EXPECT_EQ(key1, h1->key.first); + auto h2 = cache->get(key2, std::nullopt); + EXPECT_EQ(key2, h2->key.first); + EXPECT_EQ(key1, h1->key.first); // h1 unchanged +} + +TEST(BucketSyncCache, HandleLifetime) +{ + const auto key = make_key("", "1", 0); + + Handle h; // test that handles keep the cache referenced + { + auto cache = Cache::create(0); + h = cache->get(key, std::nullopt); + } + EXPECT_EQ(key, h->key.first); +} + +TEST(BucketSyncCache, TargetSize) +{ + auto cache = Cache::create(2); + + const auto key1 = make_key("", "1", 0); + const auto key2 = make_key("", "2", 0); + const auto key3 = make_key("", "3", 0); + + // fill cache up to target_size=2 + cache->get(key1, std::nullopt)->counter = 1; + cache->get(key2, std::nullopt)->counter = 2; + // test that each unpinned entry is still cached + EXPECT_EQ(1, cache->get(key1, std::nullopt)->counter); + EXPECT_EQ(2, cache->get(key2, std::nullopt)->counter); + // overflow the cache and recycle key1 + cache->get(key3, std::nullopt)->counter = 3; + // test that the oldest entry was recycled + EXPECT_EQ(0, cache->get(key1, std::nullopt)->counter); +} + +TEST(BucketSyncCache, HandleMoveAssignEmpty) +{ + auto cache = Cache::create(0); + + const auto key1 = make_key("", "1", 0); + const auto key2 = make_key("", "2", 0); + + Handle j1; + { + auto h1 = cache->get(key1, std::nullopt); + j1 = std::move(h1); // assign over empty handle + EXPECT_EQ(key1, j1->key.first); + } + auto h2 = cache->get(key2, std::nullopt); + EXPECT_EQ(key1, j1->key.first); // j1 stays pinned +} + +TEST(BucketSyncCache, HandleMoveAssignExisting) +{ + const auto key1 = make_key("", "1", 0); + const auto key2 = make_key("", "2", 0); + + Handle h1; + { + auto cache1 = Cache::create(0); + h1 = cache1->get(key1, std::nullopt); + } // j1 has the last ref to cache1 + { + auto cache2 = Cache::create(0); + auto h2 = cache2->get(key2, std::nullopt); + h1 = std::move(h2); // assign over existing handle + } + EXPECT_EQ(key2, h1->key.first); +} + +TEST(BucketSyncCache, HandleCopyAssignEmpty) +{ + auto cache = Cache::create(0); + + const auto key1 = make_key("", "1", 0); + const auto key2 = make_key("", "2", 0); + + Handle j1; + { + auto h1 = cache->get(key1, std::nullopt); + j1 = h1; // assign over empty handle + EXPECT_EQ(&*h1, &*j1); + } + auto h2 = cache->get(key2, std::nullopt); + EXPECT_EQ(key1, j1->key.first); // j1 stays pinned +} + +TEST(BucketSyncCache, HandleCopyAssignExisting) +{ + const auto key1 = make_key("", "1", 0); + const auto key2 = make_key("", "2", 0); + + Handle h1; + { + auto cache1 = Cache::create(0); + h1 = cache1->get(key1, std::nullopt); + } // j1 has the last ref to cache1 + { + auto cache2 = Cache::create(0); + auto h2 = cache2->get(key2, std::nullopt); + h1 = h2; // assign over existing handle + EXPECT_EQ(&*h1, &*h2); + } + EXPECT_EQ(key2, h1->key.first); +} diff --git a/src/test/rgw/test_rgw_common.cc b/src/test/rgw/test_rgw_common.cc new file mode 100644 index 000000000..731d624e2 --- /dev/null +++ b/src/test/rgw/test_rgw_common.cc @@ -0,0 +1,91 @@ +#include "test_rgw_common.h" + +void test_rgw_add_placement(RGWZoneGroup *zonegroup, RGWZoneParams *zone_params, const std::string& name, bool is_default) +{ + zonegroup->placement_targets[name] = { name }; + + RGWZonePlacementInfo& pinfo = zone_params->placement_pools[name]; + pinfo.index_pool = rgw_pool(name + ".index").to_str(); + + rgw_pool data_pool(name + ".data"); + pinfo.storage_classes.set_storage_class(RGW_STORAGE_CLASS_STANDARD, &data_pool, nullptr); + pinfo.data_extra_pool = rgw_pool(name + ".extra").to_str(); + + if (is_default) { + zonegroup->default_placement = rgw_placement_rule(name, RGW_STORAGE_CLASS_STANDARD); + } +} + +void test_rgw_init_env(RGWZoneGroup *zonegroup, RGWZoneParams *zone_params) +{ + test_rgw_add_placement(zonegroup, zone_params, "default-placement", true); + +} + +void test_rgw_populate_explicit_placement_bucket(rgw_bucket *b, const char *t, const char *n, const char *dp, const char *ip, const char *m, const char *id) +{ + b->tenant = t; + b->name = n; + b->marker = m; + b->bucket_id = id; + b->explicit_placement.data_pool = rgw_pool(dp); + b->explicit_placement.index_pool = rgw_pool(ip); +} + +void test_rgw_populate_old_bucket(old_rgw_bucket *b, const char *t, const char *n, const char *dp, const char *ip, const char *m, const char *id) +{ + b->tenant = t; + b->name = n; + b->marker = m; + b->bucket_id = id; + b->data_pool = dp; + b->index_pool = ip; +} + +std::string test_rgw_get_obj_oid(const rgw_obj& obj) +{ + std::string oid; + std::string loc; + + get_obj_bucket_and_oid_loc(obj, oid, loc); + return oid; +} + +void test_rgw_init_explicit_placement_bucket(rgw_bucket *bucket, const char *name) +{ + test_rgw_populate_explicit_placement_bucket(bucket, "", name, ".data-pool", ".index-pool", "marker", "bucket-id"); +} + +void test_rgw_init_old_bucket(old_rgw_bucket *bucket, const char *name) +{ + test_rgw_populate_old_bucket(bucket, "", name, ".data-pool", ".index-pool", "marker", "bucket-id"); +} + +void test_rgw_populate_bucket(rgw_bucket *b, const char *t, const char *n, const char *m, const char *id) +{ + b->tenant = t; + b->name = n; + b->marker = m; + b->bucket_id = id; +} + +void test_rgw_init_bucket(rgw_bucket *bucket, const char *name) +{ + test_rgw_populate_bucket(bucket, "", name, "marker", "bucket-id"); +} + +rgw_obj test_rgw_create_obj(const rgw_bucket& bucket, const std::string& name, const std::string& instance, const std::string& ns) +{ + rgw_obj obj(bucket, name); + if (!instance.empty()) { + obj.key.set_instance(instance); + } + if (!ns.empty()) { + obj.key.ns = ns; + } + obj.bucket = bucket; + + return obj; +} + + diff --git a/src/test/rgw/test_rgw_common.h b/src/test/rgw/test_rgw_common.h new file mode 100644 index 000000000..664e0b22e --- /dev/null +++ b/src/test/rgw/test_rgw_common.h @@ -0,0 +1,506 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab +/* + * Ceph - scalable distributed file system + * + * Copyright (C) 2013 eNovance SAS <licensing@enovance.com> + * + * This is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License version 2.1, as published by the Free Software + * Foundation. See file COPYING. + * + */ +#include <iostream> +#include "common/ceph_json.h" +#include "common/Formatter.h" +#include "rgw_common.h" +#include "rgw_rados.h" +#include "rgw_zone.h" + +#ifndef CEPH_TEST_RGW_COMMON_H +#define CEPH_TEST_RGW_COMMON_H + +struct old_rgw_bucket { + std::string tenant; + std::string name; + std::string data_pool; + std::string data_extra_pool; /* if not set, then we should use data_pool instead */ + std::string index_pool; + std::string marker; + std::string bucket_id; + + std::string oid; /* + * runtime in-memory only info. If not empty, points to the bucket instance object + */ + + old_rgw_bucket() { } + // cppcheck-suppress noExplicitConstructor + old_rgw_bucket(const std::string& s) : name(s) { + data_pool = index_pool = s; + marker = ""; + } + explicit old_rgw_bucket(const char *n) : name(n) { + data_pool = index_pool = n; + marker = ""; + } + old_rgw_bucket(const char *t, const char *n, const char *dp, const char *ip, const char *m, const char *id, const char *h) : + tenant(t), name(n), data_pool(dp), index_pool(ip), marker(m), bucket_id(id) {} + + void encode(bufferlist& bl) const { + ENCODE_START(8, 3, bl); + encode(name, bl); + encode(data_pool, bl); + encode(marker, bl); + encode(bucket_id, bl); + encode(index_pool, bl); + encode(data_extra_pool, bl); + encode(tenant, bl); + ENCODE_FINISH(bl); + } + void decode(bufferlist::const_iterator& bl) { + DECODE_START_LEGACY_COMPAT_LEN(8, 3, 3, bl); + decode(name, bl); + decode(data_pool, bl); + if (struct_v >= 2) { + decode(marker, bl); + if (struct_v <= 3) { + uint64_t id; + decode(id, bl); + char buf[16]; + snprintf(buf, sizeof(buf), "%llu", (long long)id); + bucket_id = buf; + } else { + decode(bucket_id, bl); + } + } + if (struct_v >= 5) { + decode(index_pool, bl); + } else { + index_pool = data_pool; + } + if (struct_v >= 7) { + decode(data_extra_pool, bl); + } + if (struct_v >= 8) { + decode(tenant, bl); + } + DECODE_FINISH(bl); + } + + // format a key for the bucket/instance. pass delim=0 to skip a field + std::string get_key(char tenant_delim = '/', + char id_delim = ':') const; + + const std::string& get_data_extra_pool() { + if (data_extra_pool.empty()) { + return data_pool; + } + return data_extra_pool; + } + + void dump(Formatter *f) const; + void decode_json(JSONObj *obj); + static void generate_test_instances(std::list<old_rgw_bucket*>& o); + + bool operator<(const old_rgw_bucket& b) const { + return name.compare(b.name) < 0; + } +}; +WRITE_CLASS_ENCODER(old_rgw_bucket) + +class old_rgw_obj { + std::string orig_obj; + std::string loc; + std::string object; + std::string instance; +public: + const std::string& get_object() const { return object; } + const std::string& get_orig_obj() const { return orig_obj; } + const std::string& get_loc() const { return loc; } + const std::string& get_instance() const { return instance; } + old_rgw_bucket bucket; + std::string ns; + + bool in_extra_data; /* in-memory only member, does not serialize */ + + // Represents the hash index source for this object once it is set (non-empty) + std::string index_hash_source; + + old_rgw_obj() : in_extra_data(false) {} + old_rgw_obj(old_rgw_bucket& b, const std::string& o) : in_extra_data(false) { + init(b, o); + } + old_rgw_obj(old_rgw_bucket& b, const rgw_obj_key& k) : in_extra_data(false) { + from_index_key(b, k); + } + void init(old_rgw_bucket& b, const std::string& o) { + bucket = b; + set_obj(o); + reset_loc(); + } + void init_ns(old_rgw_bucket& b, const std::string& o, const std::string& n) { + bucket = b; + set_ns(n); + set_obj(o); + reset_loc(); + } + int set_ns(const char *n) { + if (!n) + return -EINVAL; + std::string ns_str(n); + return set_ns(ns_str); + } + int set_ns(const std::string& n) { + if (n[0] == '_') + return -EINVAL; + ns = n; + set_obj(orig_obj); + return 0; + } + int set_instance(const std::string& i) { + if (i[0] == '_') + return -EINVAL; + instance = i; + set_obj(orig_obj); + return 0; + } + + int clear_instance() { + return set_instance(std::string()); + } + + void set_loc(const std::string& k) { + loc = k; + } + + void reset_loc() { + loc.clear(); + /* + * For backward compatibility. Older versions used to have object locator on all objects, + * however, the orig_obj was the effective object locator. This had the same effect as not + * having object locator at all for most objects but the ones that started with underscore as + * these were escaped. + */ + if (orig_obj[0] == '_' && ns.empty()) { + loc = orig_obj; + } + } + + bool have_null_instance() { + return instance == "null"; + } + + bool have_instance() { + return !instance.empty(); + } + + bool need_to_encode_instance() { + return have_instance() && !have_null_instance(); + } + + void set_obj(const std::string& o) { + object.reserve(128); + + orig_obj = o; + if (ns.empty() && !need_to_encode_instance()) { + if (o.empty()) { + return; + } + if (o.size() < 1 || o[0] != '_') { + object = o; + return; + } + object = "_"; + object.append(o); + } else { + object = "_"; + object.append(ns); + if (need_to_encode_instance()) { + object.append(std::string(":") + instance); + } + object.append("_"); + object.append(o); + } + reset_loc(); + } + + /* + * get the object's key name as being referred to by the bucket index. + */ + std::string get_index_key_name() const { + if (ns.empty()) { + if (orig_obj.size() < 1 || orig_obj[0] != '_') { + return orig_obj; + } + return std::string("_") + orig_obj; + }; + + char buf[ns.size() + 16]; + snprintf(buf, sizeof(buf), "_%s_", ns.c_str()); + return std::string(buf) + orig_obj; + }; + + void from_index_key(old_rgw_bucket& b, const rgw_obj_key& key) { + if (key.name[0] != '_') { + init(b, key.name); + set_instance(key.instance); + return; + } + if (key.name[1] == '_') { + init(b, key.name.substr(1)); + set_instance(key.instance); + return; + } + ssize_t pos = key.name.find('_', 1); + if (pos < 0) { + /* shouldn't happen, just use key */ + init(b, key.name); + set_instance(key.instance); + return; + } + + init_ns(b, key.name.substr(pos + 1), key.name.substr(1, pos -1)); + set_instance(key.instance); + } + + void get_index_key(rgw_obj_key *key) const { + key->name = get_index_key_name(); + key->instance = instance; + } + + static void parse_ns_field(std::string& ns, std::string& instance) { + int pos = ns.find(':'); + if (pos >= 0) { + instance = ns.substr(pos + 1); + ns = ns.substr(0, pos); + } else { + instance.clear(); + } + } + + std::string& get_hash_object() { + return index_hash_source.empty() ? orig_obj : index_hash_source; + } + /** + * Translate a namespace-mangled object name to the user-facing name + * existing in the given namespace. + * + * If the object is part of the given namespace, it returns true + * and cuts down the name to the unmangled version. If it is not + * part of the given namespace, it returns false. + */ + static bool translate_raw_obj_to_obj_in_ns(std::string& obj, std::string& instance, std::string& ns) { + if (obj[0] != '_') { + if (ns.empty()) { + return true; + } + return false; + } + + std::string obj_ns; + bool ret = parse_raw_oid(obj, &obj, &instance, &obj_ns); + if (!ret) { + return ret; + } + + return (ns == obj_ns); + } + + static bool parse_raw_oid(const std::string& oid, std::string *obj_name, std::string *obj_instance, std::string *obj_ns) { + obj_instance->clear(); + obj_ns->clear(); + if (oid[0] != '_') { + *obj_name = oid; + return true; + } + + if (oid.size() >= 2 && oid[1] == '_') { + *obj_name = oid.substr(1); + return true; + } + + if (oid[0] != '_' || oid.size() < 3) // for namespace, min size would be 3: _x_ + return false; + + int pos = oid.find('_', 1); + if (pos <= 1) // if it starts with __, it's not in our namespace + return false; + + *obj_ns = oid.substr(1, pos - 1); + parse_ns_field(*obj_ns, *obj_instance); + + *obj_name = oid.substr(pos + 1); + return true; + } + + /** + * Given a mangled object name and an empty namespace string, this + * function extracts the namespace into the string and sets the object + * name to be the unmangled version. + * + * It returns true after successfully doing so, or + * false if it fails. + */ + static bool strip_namespace_from_object(std::string& obj, std::string& ns, std::string& instance) { + ns.clear(); + instance.clear(); + if (obj[0] != '_') { + return true; + } + + size_t pos = obj.find('_', 1); + if (pos == std::string::npos) { + return false; + } + + if (obj[1] == '_') { + obj = obj.substr(1); + return true; + } + + size_t period_pos = obj.find('.'); + if (period_pos < pos) { + return false; + } + + ns = obj.substr(1, pos-1); + obj = obj.substr(pos+1, std::string::npos); + + parse_ns_field(ns, instance); + return true; + } + + void set_in_extra_data(bool val) { + in_extra_data = val; + } + + bool is_in_extra_data() const { + return in_extra_data; + } + + void encode(bufferlist& bl) const { + ENCODE_START(5, 3, bl); + encode(bucket.name, bl); + encode(loc, bl); + encode(ns, bl); + encode(object, bl); + encode(bucket, bl); + encode(instance, bl); + if (!ns.empty() || !instance.empty()) { + encode(orig_obj, bl); + } + ENCODE_FINISH(bl); + } + void decode(bufferlist::const_iterator& bl) { + DECODE_START_LEGACY_COMPAT_LEN(5, 3, 3, bl); + decode(bucket.name, bl); + decode(loc, bl); + decode(ns, bl); + decode(object, bl); + if (struct_v >= 2) + decode(bucket, bl); + if (struct_v >= 4) + decode(instance, bl); + if (ns.empty() && instance.empty()) { + if (object[0] != '_') { + orig_obj = object; + } else { + orig_obj = object.substr(1); + } + } else { + if (struct_v >= 5) { + decode(orig_obj, bl); + } else { + ssize_t pos = object.find('_', 1); + if (pos < 0) { + throw buffer::malformed_input(); + } + orig_obj = object.substr(pos); + } + } + DECODE_FINISH(bl); + } + + bool operator==(const old_rgw_obj& o) const { + return (object.compare(o.object) == 0) && + (bucket.name.compare(o.bucket.name) == 0) && + (ns.compare(o.ns) == 0) && + (instance.compare(o.instance) == 0); + } + bool operator<(const old_rgw_obj& o) const { + int r = bucket.name.compare(o.bucket.name); + if (r == 0) { + r = bucket.bucket_id.compare(o.bucket.bucket_id); + if (r == 0) { + r = object.compare(o.object); + if (r == 0) { + r = ns.compare(o.ns); + if (r == 0) { + r = instance.compare(o.instance); + } + } + } + } + + return (r < 0); + } +}; +WRITE_CLASS_ENCODER(old_rgw_obj) + +static inline void prepend_old_bucket_marker(const old_rgw_bucket& bucket, const std::string& orig_oid, std::string& oid) +{ + if (bucket.marker.empty() || orig_oid.empty()) { + oid = orig_oid; + } else { + oid = bucket.marker; + oid.append("_"); + oid.append(orig_oid); + } +} + +void test_rgw_init_env(RGWZoneGroup *zonegroup, RGWZoneParams *zone_params); + +struct test_rgw_env { + RGWZoneGroup zonegroup; + RGWZoneParams zone_params; + rgw_data_placement_target default_placement; + + test_rgw_env() { + test_rgw_init_env(&zonegroup, &zone_params); + default_placement.data_pool = rgw_pool(zone_params.placement_pools[zonegroup.default_placement.name].get_standard_data_pool()); + default_placement.data_extra_pool = rgw_pool(zone_params.placement_pools[zonegroup.default_placement.name].data_extra_pool); + } + + rgw_data_placement_target get_placement(const std::string& placement_id) { + const RGWZonePlacementInfo& pi = zone_params.placement_pools[placement_id]; + rgw_data_placement_target pt; + pt.index_pool = pi.index_pool; + pt.data_pool = pi.get_standard_data_pool(); + pt.data_extra_pool = pi.data_extra_pool; + return pt; + } + + rgw_raw_obj get_raw(const rgw_obj& obj) { + rgw_obj_select s(obj); + return s.get_raw_obj(zonegroup, zone_params); + } + + rgw_raw_obj get_raw(const rgw_obj_select& os) { + return os.get_raw_obj(zonegroup, zone_params); + } +}; + +void test_rgw_add_placement(RGWZoneGroup *zonegroup, RGWZoneParams *zone_params, const std::string& name, bool is_default); +void test_rgw_populate_explicit_placement_bucket(rgw_bucket *b, const char *t, const char *n, const char *dp, const char *ip, const char *m, const char *id); +void test_rgw_populate_old_bucket(old_rgw_bucket *b, const char *t, const char *n, const char *dp, const char *ip, const char *m, const char *id); + +std::string test_rgw_get_obj_oid(const rgw_obj& obj); +void test_rgw_init_explicit_placement_bucket(rgw_bucket *bucket, const char *name); +void test_rgw_init_old_bucket(old_rgw_bucket *bucket, const char *name); +void test_rgw_populate_bucket(rgw_bucket *b, const char *t, const char *n, const char *m, const char *id); +void test_rgw_init_bucket(rgw_bucket *bucket, const char *name); +rgw_obj test_rgw_create_obj(const rgw_bucket& bucket, const std::string& name, const std::string& instance, const std::string& ns); + +#endif + diff --git a/src/test/rgw/test_rgw_compression.cc b/src/test/rgw/test_rgw_compression.cc new file mode 100644 index 000000000..a34653530 --- /dev/null +++ b/src/test/rgw/test_rgw_compression.cc @@ -0,0 +1,186 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab +#include "gtest/gtest.h" + +#include "rgw_compression.h" + +class ut_get_sink : public RGWGetObj_Filter { + bufferlist sink; +public: + ut_get_sink() {} + virtual ~ut_get_sink() {} + + int handle_data(bufferlist& bl, off_t bl_ofs, off_t bl_len) override + { + auto& bl_buffers = bl.buffers(); + auto i = bl_buffers.begin(); + while (bl_len > 0) + { + ceph_assert(i != bl_buffers.end()); + off_t len = std::min<off_t>(bl_len, i->length()); + sink.append(*i, 0, len); + bl_len -= len; + i++; + } + return 0; + } + bufferlist& get_sink() + { + return sink; + } +}; + +class ut_get_sink_size : public RGWGetObj_Filter { + size_t max_size = 0; +public: + ut_get_sink_size() {} + virtual ~ut_get_sink_size() {} + + int handle_data(bufferlist& bl, off_t bl_ofs, off_t bl_len) override + { + if (bl_len > (off_t)max_size) + max_size = bl_len; + return 0; + } + size_t get_size() + { + return max_size; + } +}; + +class ut_put_sink: public rgw::sal::DataProcessor +{ + bufferlist sink; +public: + int process(bufferlist&& bl, uint64_t ofs) override + { + sink.claim_append(bl); + return 0; + } + bufferlist& get_sink() + { + return sink; + } +}; + + +struct MockGetDataCB : public RGWGetObj_Filter { + int handle_data(bufferlist& bl, off_t bl_ofs, off_t bl_len) override { + return 0; + } +} cb; + +using range_t = std::pair<off_t, off_t>; + +// call filter->fixup_range() and return the range as a pair. this makes it easy +// to fit on a single line for ASSERT_EQ() +range_t fixup_range(RGWGetObj_Decompress *filter, off_t ofs, off_t end) +{ + filter->fixup_range(ofs, end); + return {ofs, end}; +} + + +TEST(Decompress, FixupRangePartial) +{ + RGWCompressionInfo cs_info; + + // array of blocks with original len=8, compressed to len=6 + auto& blocks = cs_info.blocks; + blocks.emplace_back(compression_block{0, 0, 6}); + blocks.emplace_back(compression_block{8, 6, 6}); + blocks.emplace_back(compression_block{16, 12, 6}); + blocks.emplace_back(compression_block{24, 18, 6}); + + const bool partial = true; + RGWGetObj_Decompress decompress(g_ceph_context, &cs_info, partial, &cb); + + // test translation from logical ranges to compressed ranges + ASSERT_EQ(range_t(0, 5), fixup_range(&decompress, 0, 1)); + ASSERT_EQ(range_t(0, 5), fixup_range(&decompress, 1, 7)); + ASSERT_EQ(range_t(0, 11), fixup_range(&decompress, 7, 8)); + ASSERT_EQ(range_t(0, 11), fixup_range(&decompress, 0, 9)); + ASSERT_EQ(range_t(0, 11), fixup_range(&decompress, 7, 9)); + ASSERT_EQ(range_t(6, 11), fixup_range(&decompress, 8, 9)); + ASSERT_EQ(range_t(6, 17), fixup_range(&decompress, 8, 16)); + ASSERT_EQ(range_t(6, 17), fixup_range(&decompress, 8, 17)); + ASSERT_EQ(range_t(12, 23), fixup_range(&decompress, 16, 24)); + ASSERT_EQ(range_t(12, 23), fixup_range(&decompress, 16, 999)); + ASSERT_EQ(range_t(18, 23), fixup_range(&decompress, 998, 999)); +} + +TEST(Compress, LimitedChunkSize) +{ + CompressorRef plugin; + plugin = Compressor::create(g_ceph_context, Compressor::COMP_ALG_ZLIB); + ASSERT_NE(plugin.get(), nullptr); + + for (size_t s = 100 ; s < 10000000 ; s = s*5/4) + { + bufferptr bp(s); + bufferlist bl; + bl.append(bp); + + ut_put_sink c_sink; + RGWPutObj_Compress compressor(g_ceph_context, plugin, &c_sink); + compressor.process(std::move(bl), 0); + compressor.process({}, s); // flush + + RGWCompressionInfo cs_info; + cs_info.compression_type = plugin->get_type_name(); + cs_info.orig_size = s; + cs_info.compressor_message = compressor.get_compressor_message(); + cs_info.blocks = move(compressor.get_compression_blocks()); + + ut_get_sink_size d_sink; + RGWGetObj_Decompress decompress(g_ceph_context, &cs_info, false, &d_sink); + + off_t f_begin = 0; + off_t f_end = s - 1; + decompress.fixup_range(f_begin, f_end); + + decompress.handle_data(c_sink.get_sink(), 0, c_sink.get_sink().length()); + bufferlist empty; + decompress.handle_data(empty, 0, 0); + + ASSERT_LE(d_sink.get_size(), (size_t)g_ceph_context->_conf->rgw_max_chunk_size); + } +} + + +TEST(Compress, BillionZeros) +{ + CompressorRef plugin; + ut_put_sink c_sink; + plugin = Compressor::create(g_ceph_context, Compressor::COMP_ALG_ZLIB); + ASSERT_NE(plugin.get(), nullptr); + RGWPutObj_Compress compressor(g_ceph_context, plugin, &c_sink); + + constexpr size_t size = 1000000; + bufferptr bp(size); + bufferlist bl; + bl.append(bp); + + for (int i=0; i<1000;i++) + compressor.process(bufferlist{bl}, size*i); + compressor.process({}, size*1000); // flush + + RGWCompressionInfo cs_info; + cs_info.compression_type = plugin->get_type_name(); + cs_info.orig_size = size*1000; + cs_info.compressor_message = compressor.get_compressor_message(); + cs_info.blocks = move(compressor.get_compression_blocks()); + + ut_get_sink d_sink; + RGWGetObj_Decompress decompress(g_ceph_context, &cs_info, false, &d_sink); + + off_t f_begin = 0; + off_t f_end = size*1000 - 1; + decompress.fixup_range(f_begin, f_end); + + decompress.handle_data(c_sink.get_sink(), 0, c_sink.get_sink().length()); + bufferlist empty; + decompress.handle_data(empty, 0, 0); + + ASSERT_EQ(d_sink.get_sink().length() , size*1000); +} diff --git a/src/test/rgw/test_rgw_crypto.cc b/src/test/rgw/test_rgw_crypto.cc new file mode 100644 index 000000000..b85c78eb2 --- /dev/null +++ b/src/test/rgw/test_rgw_crypto.cc @@ -0,0 +1,816 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab +/* + * Ceph - scalable distributed file system + * + * Copyright (C) 2016 Mirantis <akupczyk@mirantis.com> + * + * This is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License version 2.1, as published by the Free Software + * Foundation. See file COPYING. + * + */ +#include <iostream> +#include "global/global_init.h" +#include "common/ceph_argparse.h" +#include "rgw_common.h" +#include "rgw_rados.h" +#include "rgw_crypt.h" +#include <gtest/gtest.h> +#include "include/ceph_assert.h" +#define dout_subsys ceph_subsys_rgw + +using namespace std; + + +std::unique_ptr<BlockCrypt> AES_256_CBC_create(const DoutPrefixProvider *dpp, CephContext* cct, const uint8_t* key, size_t len); + +class ut_get_sink : public RGWGetObj_Filter { + std::stringstream sink; +public: + ut_get_sink() {} + virtual ~ut_get_sink() {} + + int handle_data(bufferlist& bl, off_t bl_ofs, off_t bl_len) override + { + sink << std::string_view(bl.c_str()+bl_ofs, bl_len); + return 0; + } + std::string get_sink() + { + return sink.str(); + } +}; + +class ut_put_sink: public rgw::sal::DataProcessor +{ + std::stringstream sink; +public: + int process(bufferlist&& bl, uint64_t ofs) override + { + sink << std::string_view(bl.c_str(),bl.length()); + return 0; + } + std::string get_sink() + { + return sink.str(); + } +}; + + +class BlockCryptNone: public BlockCrypt { + size_t block_size = 256; +public: + BlockCryptNone(){}; + BlockCryptNone(size_t sz) : block_size(sz) {} + virtual ~BlockCryptNone(){}; + size_t get_block_size() override + { + return block_size; + } + bool encrypt(bufferlist& input, + off_t in_ofs, + size_t size, + bufferlist& output, + off_t stream_offset) override + { + output.clear(); + output.append(input.c_str(), input.length()); + return true; + } + bool decrypt(bufferlist& input, + off_t in_ofs, + size_t size, + bufferlist& output, + off_t stream_offset) override + { + output.clear(); + output.append(input.c_str(), input.length()); + return true; + } +}; + +TEST(TestRGWCrypto, verify_AES_256_CBC_identity) +{ + const NoDoutPrefix no_dpp(g_ceph_context, dout_subsys); + //create some input for encryption + const off_t test_range = 1024*1024; + buffer::ptr buf(test_range); + char* p = buf.c_str(); + for(size_t i = 0; i < buf.length(); i++) + p[i] = i + i*i + (i >> 2); + + bufferlist input; + input.append(buf); + + for (unsigned int step : {1, 2, 3, 5, 7, 11, 13, 17}) + { + //make some random key + uint8_t key[32]; + for(size_t i=0;i<sizeof(key);i++) + key[i]=i*step; + + auto aes(AES_256_CBC_create(&no_dpp, g_ceph_context, &key[0], 32)); + ASSERT_NE(aes.get(), nullptr); + + size_t block_size = aes->get_block_size(); + ASSERT_NE(block_size, 0u); + + for (size_t r = 97; r < 123 ; r++) + { + off_t begin = (r*r*r*r*r % test_range); + begin = begin - begin % block_size; + off_t end = begin + r*r*r*r*r*r*r % (test_range - begin); + if (r % 3) + end = end - end % block_size; + off_t offset = r*r*r*r*r*r*r*r % (1000*1000*1000); + offset = offset - offset % block_size; + + ASSERT_EQ(begin % block_size, 0u); + ASSERT_LE(end, test_range); + ASSERT_EQ(offset % block_size, 0u); + + bufferlist encrypted; + ASSERT_TRUE(aes->encrypt(input, begin, end - begin, encrypted, offset)); + bufferlist decrypted; + ASSERT_TRUE(aes->decrypt(encrypted, 0, end - begin, decrypted, offset)); + + ASSERT_EQ(decrypted.length(), end - begin); + ASSERT_EQ(std::string_view(input.c_str() + begin, end - begin), + std::string_view(decrypted.c_str(), end - begin) ); + } + } +} + + +TEST(TestRGWCrypto, verify_AES_256_CBC_identity_2) +{ + const NoDoutPrefix no_dpp(g_ceph_context, dout_subsys); + //create some input for encryption + const off_t test_range = 1024*1024; + buffer::ptr buf(test_range); + char* p = buf.c_str(); + for(size_t i = 0; i < buf.length(); i++) + p[i] = i + i*i + (i >> 2); + + bufferlist input; + input.append(buf); + + for (unsigned int step : {1, 2, 3, 5, 7, 11, 13, 17}) + { + //make some random key + uint8_t key[32]; + for(size_t i=0;i<sizeof(key);i++) + key[i]=i*step; + + auto aes(AES_256_CBC_create(&no_dpp, g_ceph_context, &key[0], 32)); + ASSERT_NE(aes.get(), nullptr); + + size_t block_size = aes->get_block_size(); + ASSERT_NE(block_size, 0u); + + for (off_t end = 1; end < 6096 ; end+=3) + { + off_t begin = 0; + off_t offset = end*end*end*end*end % (1000*1000*1000); + offset = offset - offset % block_size; + + ASSERT_EQ(begin % block_size, 0u); + ASSERT_LE(end, test_range); + ASSERT_EQ(offset % block_size, 0u); + + bufferlist encrypted; + ASSERT_TRUE(aes->encrypt(input, begin, end, encrypted, offset)); + bufferlist decrypted; + ASSERT_TRUE(aes->decrypt(encrypted, 0, end, decrypted, offset)); + + ASSERT_EQ(decrypted.length(), end); + ASSERT_EQ(std::string_view(input.c_str(), end), + std::string_view(decrypted.c_str(), end) ); + } + } +} + + +TEST(TestRGWCrypto, verify_AES_256_CBC_identity_3) +{ + const NoDoutPrefix no_dpp(g_ceph_context, dout_subsys); + //create some input for encryption + const off_t test_range = 1024*1024; + buffer::ptr buf(test_range); + char* p = buf.c_str(); + for(size_t i = 0; i < buf.length(); i++) + p[i] = i + i*i + (i >> 2); + + bufferlist input; + input.append(buf); + + for (unsigned int step : {1, 2, 3, 5, 7, 11, 13, 17}) + { + //make some random key + uint8_t key[32]; + for(size_t i=0;i<sizeof(key);i++) + key[i]=i*step; + + auto aes(AES_256_CBC_create(&no_dpp, g_ceph_context, &key[0], 32)); + ASSERT_NE(aes.get(), nullptr); + + size_t block_size = aes->get_block_size(); + ASSERT_NE(block_size, 0u); + size_t rr = 111; + for (size_t r = 97; r < 123 ; r++) + { + off_t begin = 0; + off_t end = begin + r*r*r*r*r*r*r % (test_range - begin); + //sometimes make aligned + if (r % 3) + end = end - end % block_size; + off_t offset = r*r*r*r*r*r*r*r % (1000*1000*1000); + offset = offset - offset % block_size; + + ASSERT_EQ(begin % block_size, 0u); + ASSERT_LE(end, test_range); + ASSERT_EQ(offset % block_size, 0u); + + bufferlist encrypted1; + bufferlist encrypted2; + + off_t pos = begin; + off_t chunk; + while (pos < end) { + chunk = block_size + (rr/3)*(rr+17)*(rr+71)*(rr+123)*(rr+131) % 50000; + chunk = chunk - chunk % block_size; + if (pos + chunk > end) + chunk = end - pos; + bufferlist tmp; + ASSERT_TRUE(aes->encrypt(input, pos, chunk, tmp, offset + pos)); + encrypted1.append(tmp); + pos += chunk; + rr++; + } + + pos = begin; + while (pos < end) { + chunk = block_size + (rr/3)*(rr+97)*(rr+151)*(rr+213)*(rr+251) % 50000; + chunk = chunk - chunk % block_size; + if (pos + chunk > end) + chunk = end - pos; + bufferlist tmp; + ASSERT_TRUE(aes->encrypt(input, pos, chunk, tmp, offset + pos)); + encrypted2.append(tmp); + pos += chunk; + rr++; + } + ASSERT_EQ(encrypted1.length(), end); + ASSERT_EQ(encrypted2.length(), end); + ASSERT_EQ(std::string_view(encrypted1.c_str(), end), + std::string_view(encrypted2.c_str(), end) ); + } + } +} + + +TEST(TestRGWCrypto, verify_AES_256_CBC_size_0_15) +{ + const NoDoutPrefix no_dpp(g_ceph_context, dout_subsys); + //create some input for encryption + const off_t test_range = 1024*1024; + buffer::ptr buf(test_range); + char* p = buf.c_str(); + for(size_t i = 0; i < buf.length(); i++) + p[i] = i + i*i + (i >> 2); + + bufferlist input; + input.append(buf); + + for (unsigned int step : {1, 2, 3, 5, 7, 11, 13, 17}) + { + //make some random key + uint8_t key[32]; + for(size_t i=0;i<sizeof(key);i++) + key[i]=i*step; + + auto aes(AES_256_CBC_create(&no_dpp, g_ceph_context, &key[0], 32)); + ASSERT_NE(aes.get(), nullptr); + + size_t block_size = aes->get_block_size(); + ASSERT_NE(block_size, 0u); + for (size_t r = 97; r < 123 ; r++) + { + off_t begin = 0; + off_t end = begin + r*r*r*r*r*r*r % (16); + + off_t offset = r*r*r*r*r*r*r*r % (1000*1000*1000); + offset = offset - offset % block_size; + + ASSERT_EQ(begin % block_size, 0u); + ASSERT_LE(end, test_range); + ASSERT_EQ(offset % block_size, 0u); + + bufferlist encrypted; + bufferlist decrypted; + ASSERT_TRUE(aes->encrypt(input, 0, end, encrypted, offset)); + ASSERT_TRUE(aes->encrypt(encrypted, 0, end, decrypted, offset)); + ASSERT_EQ(encrypted.length(), end); + ASSERT_EQ(decrypted.length(), end); + ASSERT_EQ(std::string_view(input.c_str(), end), + std::string_view(decrypted.c_str(), end) ); + } + } +} + + +TEST(TestRGWCrypto, verify_AES_256_CBC_identity_last_block) +{ + const NoDoutPrefix no_dpp(g_ceph_context, dout_subsys); + //create some input for encryption + const off_t test_range = 1024*1024; + buffer::ptr buf(test_range); + char* p = buf.c_str(); + for(size_t i = 0; i < buf.length(); i++) + p[i] = i + i*i + (i >> 2); + + bufferlist input; + input.append(buf); + + for (unsigned int step : {1, 2, 3, 5, 7, 11, 13, 17}) + { + //make some random key + uint8_t key[32]; + for(size_t i=0;i<sizeof(key);i++) + key[i]=i*step; + + auto aes(AES_256_CBC_create(&no_dpp, g_ceph_context, &key[0], 32)); + ASSERT_NE(aes.get(), nullptr); + + size_t block_size = aes->get_block_size(); + ASSERT_NE(block_size, 0u); + size_t rr = 111; + for (size_t r = 97; r < 123 ; r++) + { + off_t begin = 0; + off_t end = r*r*r*r*r*r*r % (test_range - 16); + end = end - end % block_size; + end = end + (r+3)*(r+5)*(r+7) % 16; + + off_t offset = r*r*r*r*r*r*r*r % (1000*1000*1000); + offset = offset - offset % block_size; + + ASSERT_EQ(begin % block_size, 0u); + ASSERT_LE(end, test_range); + ASSERT_EQ(offset % block_size, 0u); + + bufferlist encrypted1; + bufferlist encrypted2; + + off_t pos = begin; + off_t chunk; + while (pos < end) { + chunk = block_size + (rr/3)*(rr+17)*(rr+71)*(rr+123)*(rr+131) % 50000; + chunk = chunk - chunk % block_size; + if (pos + chunk > end) + chunk = end - pos; + bufferlist tmp; + ASSERT_TRUE(aes->encrypt(input, pos, chunk, tmp, offset + pos)); + encrypted1.append(tmp); + pos += chunk; + rr++; + } + pos = begin; + while (pos < end) { + chunk = block_size + (rr/3)*(rr+97)*(rr+151)*(rr+213)*(rr+251) % 50000; + chunk = chunk - chunk % block_size; + if (pos + chunk > end) + chunk = end - pos; + bufferlist tmp; + ASSERT_TRUE(aes->encrypt(input, pos, chunk, tmp, offset + pos)); + encrypted2.append(tmp); + pos += chunk; + rr++; + } + ASSERT_EQ(encrypted1.length(), end); + ASSERT_EQ(encrypted2.length(), end); + ASSERT_EQ(std::string_view(encrypted1.c_str(), end), + std::string_view(encrypted2.c_str(), end) ); + } + } +} + + +TEST(TestRGWCrypto, verify_RGWGetObj_BlockDecrypt_ranges) +{ + const NoDoutPrefix no_dpp(g_ceph_context, dout_subsys); + //create some input for encryption + const off_t test_range = 1024*1024; + bufferptr buf(test_range); + char* p = buf.c_str(); + for(size_t i = 0; i < buf.length(); i++) + p[i] = i + i*i + (i >> 2); + + bufferlist input; + input.append(buf); + + uint8_t key[32]; + for(size_t i=0;i<sizeof(key);i++) + key[i] = i; + + auto cbc = AES_256_CBC_create(&no_dpp, g_ceph_context, &key[0], 32); + ASSERT_NE(cbc.get(), nullptr); + bufferlist encrypted; + ASSERT_TRUE(cbc->encrypt(input, 0, test_range, encrypted, 0)); + + + for (off_t r = 93; r < 150; r++ ) + { + ut_get_sink get_sink; + auto cbc = AES_256_CBC_create(&no_dpp, g_ceph_context, &key[0], 32); + ASSERT_NE(cbc.get(), nullptr); + RGWGetObj_BlockDecrypt decrypt(&no_dpp, g_ceph_context, &get_sink, std::move(cbc), {}); + + //random ranges + off_t begin = (r/3)*r*(r+13)*(r+23)*(r+53)*(r+71) % test_range; + off_t end = begin + (r/5)*(r+7)*(r+13)*(r+101)*(r*103) % (test_range - begin) - 1; + + off_t f_begin = begin; + off_t f_end = end; + decrypt.fixup_range(f_begin, f_end); + decrypt.handle_data(encrypted, f_begin, f_end - f_begin + 1); + decrypt.flush(); + const std::string& decrypted = get_sink.get_sink(); + size_t expected_len = end - begin + 1; + ASSERT_EQ(decrypted.length(), expected_len); + ASSERT_EQ(decrypted, std::string_view(input.c_str()+begin, expected_len)); + } +} + + +TEST(TestRGWCrypto, verify_RGWGetObj_BlockDecrypt_chunks) +{ + const NoDoutPrefix no_dpp(g_ceph_context, dout_subsys); + //create some input for encryption + const off_t test_range = 1024*1024; + bufferptr buf(test_range); + char* p = buf.c_str(); + for(size_t i = 0; i < buf.length(); i++) + p[i] = i + i*i + (i >> 2); + + bufferlist input; + input.append(buf); + + uint8_t key[32]; + for(size_t i=0;i<sizeof(key);i++) + key[i] = i; + + auto cbc = AES_256_CBC_create(&no_dpp, g_ceph_context, &key[0], 32); + ASSERT_NE(cbc.get(), nullptr); + bufferlist encrypted; + ASSERT_TRUE(cbc->encrypt(input, 0, test_range, encrypted, 0)); + + for (off_t r = 93; r < 150; r++ ) + { + ut_get_sink get_sink; + auto cbc = AES_256_CBC_create(&no_dpp, g_ceph_context, &key[0], 32); + ASSERT_NE(cbc.get(), nullptr); + RGWGetObj_BlockDecrypt decrypt(&no_dpp, g_ceph_context, &get_sink, std::move(cbc), {}); + + //random + off_t begin = (r/3)*r*(r+13)*(r+23)*(r+53)*(r+71) % test_range; + off_t end = begin + (r/5)*(r+7)*(r+13)*(r+101)*(r*103) % (test_range - begin) - 1; + + off_t f_begin = begin; + off_t f_end = end; + decrypt.fixup_range(f_begin, f_end); + off_t pos = f_begin; + do + { + off_t size = 2 << ((pos * 17 + pos / 113 + r) % 16); + size = (pos + 1117) * (pos + 2229) % size + 1; + if (pos + size > f_end + 1) + size = f_end + 1 - pos; + + decrypt.handle_data(encrypted, pos, size); + pos = pos + size; + } while (pos < f_end + 1); + decrypt.flush(); + + const std::string& decrypted = get_sink.get_sink(); + size_t expected_len = end - begin + 1; + ASSERT_EQ(decrypted.length(), expected_len); + ASSERT_EQ(decrypted, std::string_view(input.c_str()+begin, expected_len)); + } +} + + +using range_t = std::pair<off_t, off_t>; + +// call filter->fixup_range() and return the range as a pair. this makes it easy +// to fit on a single line for ASSERT_EQ() +range_t fixup_range(RGWGetObj_BlockDecrypt *decrypt, off_t ofs, off_t end) +{ + decrypt->fixup_range(ofs, end); + return {ofs, end}; +} + +TEST(TestRGWCrypto, check_RGWGetObj_BlockDecrypt_fixup) +{ + const NoDoutPrefix no_dpp(g_ceph_context, dout_subsys); + ut_get_sink get_sink; + auto nonecrypt = std::unique_ptr<BlockCrypt>(new BlockCryptNone); + RGWGetObj_BlockDecrypt decrypt(&no_dpp, g_ceph_context, &get_sink, + std::move(nonecrypt), {}); + ASSERT_EQ(fixup_range(&decrypt,0,0), range_t(0,255)); + ASSERT_EQ(fixup_range(&decrypt,1,256), range_t(0,511)); + ASSERT_EQ(fixup_range(&decrypt,0,255), range_t(0,255)); + ASSERT_EQ(fixup_range(&decrypt,255,256), range_t(0,511)); + ASSERT_EQ(fixup_range(&decrypt,511,1023), range_t(256,1023)); + ASSERT_EQ(fixup_range(&decrypt,513,1024), range_t(512,1024+255)); +} + +std::vector<size_t> create_mp_parts(size_t obj_size, size_t mp_part_len){ + std::vector<size_t> parts_len; + size_t part_size; + size_t ofs=0; + + while (ofs < obj_size){ + part_size = std::min(mp_part_len, (obj_size - ofs)); + ofs += part_size; + parts_len.push_back(part_size); + } + return parts_len; +} + +const size_t part_size = 5*1024*1024; +const size_t obj_size = 30*1024*1024; + +TEST(TestRGWCrypto, check_RGWGetObj_BlockDecrypt_fixup_simple) +{ + const NoDoutPrefix no_dpp(g_ceph_context, dout_subsys); + + ut_get_sink get_sink; + auto nonecrypt = std::make_unique<BlockCryptNone>(4096); + RGWGetObj_BlockDecrypt decrypt(&no_dpp, g_ceph_context, &get_sink, + std::move(nonecrypt), + create_mp_parts(obj_size, part_size)); + ASSERT_EQ(fixup_range(&decrypt,0,0), range_t(0,4095)); + ASSERT_EQ(fixup_range(&decrypt,1,4096), range_t(0,8191)); + ASSERT_EQ(fixup_range(&decrypt,0,4095), range_t(0,4095)); + ASSERT_EQ(fixup_range(&decrypt,4095,4096), range_t(0,8191)); + + // ranges are end-end inclusive, we request bytes just spanning short of first + // part to exceeding the first part, part_size - 1 is aligned to a 4095 boundary + ASSERT_EQ(fixup_range(&decrypt, 0, part_size - 2), range_t(0, part_size -1)); + ASSERT_EQ(fixup_range(&decrypt, 0, part_size - 1), range_t(0, part_size -1)); + ASSERT_EQ(fixup_range(&decrypt, 0, part_size), range_t(0, part_size + 4095)); + ASSERT_EQ(fixup_range(&decrypt, 0, part_size + 1), range_t(0, part_size + 4095)); + + // request bytes spanning 2 parts + ASSERT_EQ(fixup_range(&decrypt, part_size -2, part_size + 2), + range_t(part_size - 4096, part_size + 4095)); + + // request last byte + ASSERT_EQ(fixup_range(&decrypt, obj_size - 1, obj_size -1), + range_t(obj_size - 4096, obj_size -1)); + +} + +TEST(TestRGWCrypto, check_RGWGetObj_BlockDecrypt_fixup_non_aligned_obj_size) +{ + const NoDoutPrefix no_dpp(g_ceph_context, dout_subsys); + + const size_t na_obj_size = obj_size + 1; + + ut_get_sink get_sink; + auto nonecrypt = std::make_unique<BlockCryptNone>(4096); + RGWGetObj_BlockDecrypt decrypt(&no_dpp, g_ceph_context, &get_sink, + std::move(nonecrypt), + create_mp_parts(na_obj_size, part_size)); + + // these should be unaffected here + ASSERT_EQ(fixup_range(&decrypt, 0, part_size - 2), range_t(0, part_size -1)); + ASSERT_EQ(fixup_range(&decrypt, 0, part_size - 1), range_t(0, part_size -1)); + ASSERT_EQ(fixup_range(&decrypt, 0, part_size), range_t(0, part_size + 4095)); + ASSERT_EQ(fixup_range(&decrypt, 0, part_size + 1), range_t(0, part_size + 4095)); + + + // request last 2 bytes; spanning 2 parts + ASSERT_EQ(fixup_range(&decrypt, na_obj_size -2 , na_obj_size -1), + range_t(na_obj_size - 1 - 4096, na_obj_size - 1)); + + // request last byte, spans last 1B part only + ASSERT_EQ(fixup_range(&decrypt, na_obj_size -1, na_obj_size - 1), + range_t(na_obj_size - 1, na_obj_size -1)); + +} + +TEST(TestRGWCrypto, check_RGWGetObj_BlockDecrypt_fixup_non_aligned_part_size) +{ + const NoDoutPrefix no_dpp(g_ceph_context, dout_subsys); + + const size_t na_part_size = part_size + 1; + + ut_get_sink get_sink; + auto nonecrypt = std::make_unique<BlockCryptNone>(4096); + RGWGetObj_BlockDecrypt decrypt(&no_dpp, g_ceph_context, &get_sink, + std::move(nonecrypt), + create_mp_parts(obj_size, na_part_size)); + + // na_part_size -2, ie. part_size -1 is aligned to 4095 boundary + ASSERT_EQ(fixup_range(&decrypt, 0, na_part_size - 2), range_t(0, na_part_size -2)); + // even though na_part_size -1 should not align to a 4095 boundary, the range + // should not span the next part + ASSERT_EQ(fixup_range(&decrypt, 0, na_part_size - 1), range_t(0, na_part_size -1)); + + ASSERT_EQ(fixup_range(&decrypt, 0, na_part_size), range_t(0, na_part_size + 4095)); + ASSERT_EQ(fixup_range(&decrypt, 0, na_part_size + 1), range_t(0, na_part_size + 4095)); + + // request spanning 2 parts + ASSERT_EQ(fixup_range(&decrypt, na_part_size - 2, na_part_size + 2), + range_t(na_part_size - 1 - 4096, na_part_size + 4095)); + + // request last byte, this will be interesting, since this a multipart upload + // with 5MB+1 size, the last part is actually 5 bytes short of 5 MB, which + // should be considered for the ranges alignment; an easier way to look at + // this will be that the last offset aligned to a 5MiB part will be 5MiB - + // 4095, this is a part that is 5MiB - 5 B + ASSERT_EQ(fixup_range(&decrypt, obj_size - 1, obj_size -1), + range_t(obj_size +5 -4096, obj_size -1)); + +} + +TEST(TestRGWCrypto, check_RGWGetObj_BlockDecrypt_fixup_non_aligned) +{ + const NoDoutPrefix no_dpp(g_ceph_context, dout_subsys); + + const size_t na_part_size = part_size + 1; + const size_t na_obj_size = obj_size + 7; // (6*(5MiB + 1) + 1) for the last 1B overflow + + ut_get_sink get_sink; + auto nonecrypt = std::make_unique<BlockCryptNone>(4096); + RGWGetObj_BlockDecrypt decrypt(&no_dpp, g_ceph_context, &get_sink, + std::move(nonecrypt), + create_mp_parts(na_obj_size, na_part_size)); + + // na_part_size -2, ie. part_size -1 is aligned to 4095 boundary + ASSERT_EQ(fixup_range(&decrypt, 0, na_part_size - 2), range_t(0, na_part_size -2)); + // even though na_part_size -1 should not align to a 4095 boundary, the range + // should not span the next part + ASSERT_EQ(fixup_range(&decrypt, 0, na_part_size - 1), range_t(0, na_part_size -1)); + + ASSERT_EQ(fixup_range(&decrypt, 0, na_part_size), range_t(0, na_part_size + 4095)); + ASSERT_EQ(fixup_range(&decrypt, 0, na_part_size + 1), range_t(0, na_part_size + 4095)); + + // request last byte, spans last 1B part only + ASSERT_EQ(fixup_range(&decrypt, na_obj_size -1, na_obj_size - 1), + range_t(na_obj_size - 1, na_obj_size -1)); + + ASSERT_EQ(fixup_range(&decrypt, na_obj_size -2, na_obj_size -1), + range_t(na_obj_size - 2, na_obj_size -1)); + +} + +TEST(TestRGWCrypto, check_RGWGetObj_BlockDecrypt_fixup_invalid_ranges) +{ + const NoDoutPrefix no_dpp(g_ceph_context, dout_subsys); + + ut_get_sink get_sink; + auto nonecrypt = std::make_unique<BlockCryptNone>(4096); + RGWGetObj_BlockDecrypt decrypt(&no_dpp, g_ceph_context, &get_sink, + std::move(nonecrypt), + create_mp_parts(obj_size, part_size)); + + + // the ranges below would be mostly unreachable in current code as rgw + // would've returned a 411 before reaching, but we're just doing this to make + // sure we don't have invalid access + ASSERT_EQ(fixup_range(&decrypt, obj_size - 1, obj_size + 100), + range_t(obj_size - 4096, obj_size - 1)); + ASSERT_EQ(fixup_range(&decrypt, obj_size, obj_size + 1), + range_t(obj_size - 1, obj_size - 1)); + ASSERT_EQ(fixup_range(&decrypt, obj_size+1, obj_size + 100), + range_t(obj_size - 1, obj_size - 1)); + +} + +TEST(TestRGWCrypto, verify_RGWPutObj_BlockEncrypt_chunks) +{ + const NoDoutPrefix no_dpp(g_ceph_context, dout_subsys); + //create some input for encryption + const off_t test_range = 1024*1024; + bufferptr buf(test_range); + char* p = buf.c_str(); + for(size_t i = 0; i < buf.length(); i++) + p[i] = i + i*i + (i >> 2); + + bufferlist input; + input.append(buf); + + uint8_t key[32]; + for(size_t i=0;i<sizeof(key);i++) + key[i] = i; + + for (off_t r = 93; r < 150; r++ ) + { + ut_put_sink put_sink; + auto cbc = AES_256_CBC_create(&no_dpp, g_ceph_context, &key[0], 32); + ASSERT_NE(cbc.get(), nullptr); + RGWPutObj_BlockEncrypt encrypt(&no_dpp, g_ceph_context, &put_sink, + std::move(cbc)); + + off_t test_size = (r/5)*(r+7)*(r+13)*(r+101)*(r*103) % (test_range - 1) + 1; + off_t pos = 0; + do + { + off_t size = 2 << ((pos * 17 + pos / 113 + r) % 16); + size = (pos + 1117) * (pos + 2229) % size + 1; + if (pos + size > test_size) + size = test_size - pos; + + bufferlist bl; + bl.append(input.c_str()+pos, size); + encrypt.process(std::move(bl), pos); + + pos = pos + size; + } while (pos < test_size); + encrypt.process({}, pos); + + ASSERT_EQ(put_sink.get_sink().length(), static_cast<size_t>(test_size)); + + cbc = AES_256_CBC_create(&no_dpp, g_ceph_context, &key[0], 32); + ASSERT_NE(cbc.get(), nullptr); + + bufferlist encrypted; + bufferlist decrypted; + encrypted.append(put_sink.get_sink()); + ASSERT_TRUE(cbc->decrypt(encrypted, 0, test_size, decrypted, 0)); + + ASSERT_EQ(decrypted.length(), test_size); + ASSERT_EQ(std::string_view(decrypted.c_str(), test_size), + std::string_view(input.c_str(), test_size)); + } +} + + +TEST(TestRGWCrypto, verify_Encrypt_Decrypt) +{ + const NoDoutPrefix no_dpp(g_ceph_context, dout_subsys); + uint8_t key[32]; + for(size_t i=0;i<sizeof(key);i++) + key[i]=i; + + size_t fi_a = 0; + size_t fi_b = 1; + size_t test_size; + do + { + //fibonacci + size_t tmp = fi_b; + fi_b = fi_a + fi_b; + fi_a = tmp; + + test_size = fi_b; + + uint8_t* test_in = new uint8_t[test_size]; + //fill with something + memset(test_in, test_size & 0xff, test_size); + + ut_put_sink put_sink; + RGWPutObj_BlockEncrypt encrypt(&no_dpp, g_ceph_context, &put_sink, + AES_256_CBC_create(&no_dpp, g_ceph_context, &key[0], 32)); + bufferlist bl; + bl.append((char*)test_in, test_size); + encrypt.process(std::move(bl), 0); + encrypt.process({}, test_size); + ASSERT_EQ(put_sink.get_sink().length(), test_size); + + bl.append(put_sink.get_sink().data(), put_sink.get_sink().length()); + ASSERT_EQ(bl.length(), test_size); + + ut_get_sink get_sink; + RGWGetObj_BlockDecrypt decrypt(&no_dpp, g_ceph_context, &get_sink, + AES_256_CBC_create(&no_dpp, g_ceph_context, &key[0], 32), + {}); + + off_t bl_ofs = 0; + off_t bl_end = test_size - 1; + decrypt.fixup_range(bl_ofs, bl_end); + decrypt.handle_data(bl, 0, bl.length()); + decrypt.flush(); + ASSERT_EQ(get_sink.get_sink().length(), test_size); + ASSERT_EQ(get_sink.get_sink(), std::string_view((char*)test_in,test_size)); + } + while (test_size < 20000); +} + + +int main(int argc, char **argv) { + auto args = argv_to_vec(argc, argv); + auto cct = global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT, + CODE_ENVIRONMENT_UTILITY, + CINIT_FLAG_NO_DEFAULT_CONFIG_FILE); + common_init_finish(g_ceph_context); + + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} + diff --git a/src/test/rgw/test_rgw_dmclock_scheduler.cc b/src/test/rgw/test_rgw_dmclock_scheduler.cc new file mode 100644 index 000000000..92800767c --- /dev/null +++ b/src/test/rgw/test_rgw_dmclock_scheduler.cc @@ -0,0 +1,428 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab +/* + * Ceph - scalable distributed file system + * + * Copyright (C) 2018 Red Hat, Inc. + * + * This is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License version 2.1, as published by the Free Software + * Foundation. See file COPYING. + * + */ + +//#define BOOST_ASIO_ENABLE_HANDLER_TRACKING + +#include "rgw_dmclock_sync_scheduler.h" +#include "rgw_dmclock_async_scheduler.h" + +#include <optional> +#include <spawn/spawn.hpp> +#include <gtest/gtest.h> +#include "acconfig.h" +#include "global/global_context.h" + +namespace rgw::dmclock { + +using boost::system::error_code; + +// return a lambda that can be used as a callback to capture its arguments +auto capture(std::optional<error_code>& opt_ec, + std::optional<PhaseType>& opt_phase) +{ + return [&] (error_code ec, PhaseType phase) { + opt_ec = ec; + opt_phase = phase; + }; +} + +TEST(Queue, SyncRequest) +{ + ClientCounters counters(g_ceph_context); + auto client_info_f = [] (client_id client) -> ClientInfo* { + static ClientInfo clients[] = { + {1, 1, 1}, //admin: satisfy by reservation + {0, 1, 1}, //auth: satisfy by priority + }; + return &clients[static_cast<size_t>(client)]; + }; + std::atomic <bool> ready = false; + auto server_ready_f = [&ready]() -> bool { return ready.load();}; + + SyncScheduler queue(g_ceph_context, std::ref(counters), + client_info_f, server_ready_f, + std::ref(SyncScheduler::handle_request_cb) + ); + + + auto now = get_time(); + ready = true; + queue.add_request(client_id::admin, {}, now, 1); + queue.add_request(client_id::auth, {}, now, 1); + + // We can't see the queue at length 1 as the queue len is decremented as the + //request is processed + EXPECT_EQ(0u, counters(client_id::admin)->get(queue_counters::l_qlen)); + EXPECT_EQ(1u, counters(client_id::admin)->get(queue_counters::l_res)); + EXPECT_EQ(0u, counters(client_id::admin)->get(queue_counters::l_prio)); + EXPECT_EQ(0u, counters(client_id::admin)->get(queue_counters::l_limit)); + EXPECT_EQ(0u, counters(client_id::admin)->get(queue_counters::l_cancel)); + + EXPECT_EQ(0u, counters(client_id::auth)->get(queue_counters::l_qlen)); + EXPECT_EQ(0u, counters(client_id::auth)->get(queue_counters::l_res)); + EXPECT_EQ(1u, counters(client_id::auth)->get(queue_counters::l_prio)); + EXPECT_EQ(0u, counters(client_id::auth)->get(queue_counters::l_limit)); + EXPECT_EQ(0u, counters(client_id::auth)->get(queue_counters::l_cancel)); +} + +TEST(Queue, RateLimit) +{ + boost::asio::io_context context; + ClientCounters counters(g_ceph_context); + AsyncScheduler queue(g_ceph_context, context, std::ref(counters), nullptr, + [] (client_id client) -> ClientInfo* { + static ClientInfo clients[] = { + {1, 1, 1}, // admin + {0, 1, 1}, // auth + }; + return &clients[static_cast<size_t>(client)]; + }, AtLimit::Reject); + + std::optional<error_code> ec1, ec2, ec3, ec4; + std::optional<PhaseType> p1, p2, p3, p4; + + auto now = get_time(); + queue.async_request(client_id::admin, {}, now, 1, capture(ec1, p1)); + queue.async_request(client_id::admin, {}, now, 1, capture(ec2, p2)); + queue.async_request(client_id::auth, {}, now, 1, capture(ec3, p3)); + queue.async_request(client_id::auth, {}, now, 1, capture(ec4, p4)); + EXPECT_FALSE(ec1); + EXPECT_FALSE(ec2); + EXPECT_FALSE(ec3); + EXPECT_FALSE(ec4); + + EXPECT_EQ(1u, counters(client_id::admin)->get(queue_counters::l_qlen)); + EXPECT_EQ(1u, counters(client_id::auth)->get(queue_counters::l_qlen)); + + context.run_for(std::chrono::milliseconds(1)); + EXPECT_TRUE(context.stopped()); + + ASSERT_TRUE(ec1); + EXPECT_EQ(boost::system::errc::success, *ec1); + ASSERT_TRUE(p1); + EXPECT_EQ(PhaseType::reservation, *p1); + + ASSERT_TRUE(ec2); + EXPECT_EQ(boost::system::errc::resource_unavailable_try_again, *ec2); + + ASSERT_TRUE(ec3); + EXPECT_EQ(boost::system::errc::success, *ec3); + ASSERT_TRUE(p3); + EXPECT_EQ(PhaseType::priority, *p3); + + ASSERT_TRUE(ec4); + EXPECT_EQ(boost::system::errc::resource_unavailable_try_again, *ec4); + + EXPECT_EQ(0u, counters(client_id::admin)->get(queue_counters::l_qlen)); + EXPECT_EQ(1u, counters(client_id::admin)->get(queue_counters::l_res)); + EXPECT_EQ(0u, counters(client_id::admin)->get(queue_counters::l_prio)); + EXPECT_EQ(1u, counters(client_id::admin)->get(queue_counters::l_limit)); + EXPECT_EQ(0u, counters(client_id::admin)->get(queue_counters::l_cancel)); + + EXPECT_EQ(0u, counters(client_id::auth)->get(queue_counters::l_qlen)); + EXPECT_EQ(0u, counters(client_id::auth)->get(queue_counters::l_res)); + EXPECT_EQ(1u, counters(client_id::auth)->get(queue_counters::l_prio)); + EXPECT_EQ(1u, counters(client_id::auth)->get(queue_counters::l_limit)); + EXPECT_EQ(0u, counters(client_id::auth)->get(queue_counters::l_cancel)); +} + +TEST(Queue, AsyncRequest) +{ + boost::asio::io_context context; + ClientCounters counters(g_ceph_context); + AsyncScheduler queue(g_ceph_context, context, std::ref(counters), nullptr, + [] (client_id client) -> ClientInfo* { + static ClientInfo clients[] = { + {1, 1, 1}, // admin: satisfy by reservation + {0, 1, 1}, // auth: satisfy by priority + }; + return &clients[static_cast<size_t>(client)]; + }, AtLimit::Reject + ); + + std::optional<error_code> ec1, ec2; + std::optional<PhaseType> p1, p2; + + auto now = get_time(); + queue.async_request(client_id::admin, {}, now, 1, capture(ec1, p1)); + queue.async_request(client_id::auth, {}, now, 1, capture(ec2, p2)); + EXPECT_FALSE(ec1); + EXPECT_FALSE(ec2); + + EXPECT_EQ(1u, counters(client_id::admin)->get(queue_counters::l_qlen)); + EXPECT_EQ(1u, counters(client_id::auth)->get(queue_counters::l_qlen)); + + context.run_for(std::chrono::milliseconds(1)); + EXPECT_TRUE(context.stopped()); + + ASSERT_TRUE(ec1); + EXPECT_EQ(boost::system::errc::success, *ec1); + ASSERT_TRUE(p1); + EXPECT_EQ(PhaseType::reservation, *p1); + + ASSERT_TRUE(ec2); + EXPECT_EQ(boost::system::errc::success, *ec2); + ASSERT_TRUE(p2); + EXPECT_EQ(PhaseType::priority, *p2); + + EXPECT_EQ(0u, counters(client_id::admin)->get(queue_counters::l_qlen)); + EXPECT_EQ(1u, counters(client_id::admin)->get(queue_counters::l_res)); + EXPECT_EQ(0u, counters(client_id::admin)->get(queue_counters::l_prio)); + EXPECT_EQ(0u, counters(client_id::admin)->get(queue_counters::l_limit)); + EXPECT_EQ(0u, counters(client_id::admin)->get(queue_counters::l_cancel)); + + EXPECT_EQ(0u, counters(client_id::auth)->get(queue_counters::l_qlen)); + EXPECT_EQ(0u, counters(client_id::auth)->get(queue_counters::l_res)); + EXPECT_EQ(1u, counters(client_id::auth)->get(queue_counters::l_prio)); + EXPECT_EQ(0u, counters(client_id::auth)->get(queue_counters::l_limit)); + EXPECT_EQ(0u, counters(client_id::auth)->get(queue_counters::l_cancel)); +} + + +TEST(Queue, Cancel) +{ + boost::asio::io_context context; + ClientCounters counters(g_ceph_context); + AsyncScheduler queue(g_ceph_context, context, std::ref(counters), nullptr, + [] (client_id client) -> ClientInfo* { + static ClientInfo info{0, 1, 1}; + return &info; + }); + + std::optional<error_code> ec1, ec2; + std::optional<PhaseType> p1, p2; + + auto now = get_time(); + queue.async_request(client_id::admin, {}, now, 1, capture(ec1, p1)); + queue.async_request(client_id::auth, {}, now, 1, capture(ec2, p2)); + EXPECT_FALSE(ec1); + EXPECT_FALSE(ec2); + + EXPECT_EQ(1u, counters(client_id::admin)->get(queue_counters::l_qlen)); + EXPECT_EQ(1u, counters(client_id::auth)->get(queue_counters::l_qlen)); + + queue.cancel(); + + EXPECT_FALSE(ec1); + EXPECT_FALSE(ec2); + + context.run_for(std::chrono::milliseconds(1)); + EXPECT_TRUE(context.stopped()); + + ASSERT_TRUE(ec1); + EXPECT_EQ(boost::asio::error::operation_aborted, *ec1); + ASSERT_TRUE(ec2); + EXPECT_EQ(boost::asio::error::operation_aborted, *ec2); + + EXPECT_EQ(0u, counters(client_id::admin)->get(queue_counters::l_qlen)); + EXPECT_EQ(0u, counters(client_id::admin)->get(queue_counters::l_res)); + EXPECT_EQ(0u, counters(client_id::admin)->get(queue_counters::l_prio)); + EXPECT_EQ(0u, counters(client_id::admin)->get(queue_counters::l_limit)); + EXPECT_EQ(1u, counters(client_id::admin)->get(queue_counters::l_cancel)); + + EXPECT_EQ(0u, counters(client_id::auth)->get(queue_counters::l_qlen)); + EXPECT_EQ(0u, counters(client_id::auth)->get(queue_counters::l_res)); + EXPECT_EQ(0u, counters(client_id::auth)->get(queue_counters::l_prio)); + EXPECT_EQ(0u, counters(client_id::auth)->get(queue_counters::l_limit)); + EXPECT_EQ(1u, counters(client_id::auth)->get(queue_counters::l_cancel)); +} + +TEST(Queue, CancelClient) +{ + boost::asio::io_context context; + ClientCounters counters(g_ceph_context); + AsyncScheduler queue(g_ceph_context, context, std::ref(counters), nullptr, + [] (client_id client) -> ClientInfo* { + static ClientInfo info{0, 1, 1}; + return &info; + }); + + std::optional<error_code> ec1, ec2; + std::optional<PhaseType> p1, p2; + + auto now = get_time(); + queue.async_request(client_id::admin, {}, now, 1, capture(ec1, p1)); + queue.async_request(client_id::auth, {}, now, 1, capture(ec2, p2)); + EXPECT_FALSE(ec1); + EXPECT_FALSE(ec2); + + EXPECT_EQ(1u, counters(client_id::admin)->get(queue_counters::l_qlen)); + EXPECT_EQ(1u, counters(client_id::auth)->get(queue_counters::l_qlen)); + + queue.cancel(client_id::admin); + + EXPECT_FALSE(ec1); + EXPECT_FALSE(ec2); + + context.run_for(std::chrono::milliseconds(1)); + EXPECT_TRUE(context.stopped()); + + ASSERT_TRUE(ec1); + EXPECT_EQ(boost::asio::error::operation_aborted, *ec1); + + ASSERT_TRUE(ec2); + EXPECT_EQ(boost::system::errc::success, *ec2); + ASSERT_TRUE(p2); + EXPECT_EQ(PhaseType::priority, *p2); + + EXPECT_EQ(0u, counters(client_id::admin)->get(queue_counters::l_qlen)); + EXPECT_EQ(0u, counters(client_id::admin)->get(queue_counters::l_res)); + EXPECT_EQ(0u, counters(client_id::admin)->get(queue_counters::l_prio)); + EXPECT_EQ(0u, counters(client_id::admin)->get(queue_counters::l_limit)); + EXPECT_EQ(1u, counters(client_id::admin)->get(queue_counters::l_cancel)); + + EXPECT_EQ(0u, counters(client_id::auth)->get(queue_counters::l_qlen)); + EXPECT_EQ(0u, counters(client_id::auth)->get(queue_counters::l_res)); + EXPECT_EQ(1u, counters(client_id::auth)->get(queue_counters::l_prio)); + EXPECT_EQ(0u, counters(client_id::auth)->get(queue_counters::l_limit)); + EXPECT_EQ(0u, counters(client_id::auth)->get(queue_counters::l_cancel)); +} + +TEST(Queue, CancelOnDestructor) +{ + boost::asio::io_context context; + + std::optional<error_code> ec1, ec2; + std::optional<PhaseType> p1, p2; + + ClientCounters counters(g_ceph_context); + { + AsyncScheduler queue(g_ceph_context, context, std::ref(counters), nullptr, + [] (client_id client) -> ClientInfo* { + static ClientInfo info{0, 1, 1}; + return &info; + }); + + auto now = get_time(); + queue.async_request(client_id::admin, {}, now, 1, capture(ec1, p1)); + queue.async_request(client_id::auth, {}, now, 1, capture(ec2, p2)); + + EXPECT_EQ(1u, counters(client_id::admin)->get(queue_counters::l_qlen)); + EXPECT_EQ(1u, counters(client_id::auth)->get(queue_counters::l_qlen)); + } + + EXPECT_FALSE(ec1); + EXPECT_FALSE(ec2); + + context.run_for(std::chrono::milliseconds(1)); + EXPECT_TRUE(context.stopped()); + + ASSERT_TRUE(ec1); + EXPECT_EQ(boost::asio::error::operation_aborted, *ec1); + ASSERT_TRUE(ec2); + EXPECT_EQ(boost::asio::error::operation_aborted, *ec2); + + EXPECT_EQ(0u, counters(client_id::admin)->get(queue_counters::l_qlen)); + EXPECT_EQ(0u, counters(client_id::admin)->get(queue_counters::l_res)); + EXPECT_EQ(0u, counters(client_id::admin)->get(queue_counters::l_prio)); + EXPECT_EQ(0u, counters(client_id::admin)->get(queue_counters::l_limit)); + EXPECT_EQ(1u, counters(client_id::admin)->get(queue_counters::l_cancel)); + + EXPECT_EQ(0u, counters(client_id::auth)->get(queue_counters::l_qlen)); + EXPECT_EQ(0u, counters(client_id::auth)->get(queue_counters::l_res)); + EXPECT_EQ(0u, counters(client_id::auth)->get(queue_counters::l_prio)); + EXPECT_EQ(0u, counters(client_id::auth)->get(queue_counters::l_limit)); + EXPECT_EQ(1u, counters(client_id::auth)->get(queue_counters::l_cancel)); +} + +// return a lambda from capture() that's bound to run on the given executor +template <typename Executor> +auto capture(const Executor& ex, std::optional<error_code>& opt_ec, + std::optional<PhaseType>& opt_res) +{ + return boost::asio::bind_executor(ex, capture(opt_ec, opt_res)); +} + +TEST(Queue, CrossExecutorRequest) +{ + boost::asio::io_context queue_context; + ClientCounters counters(g_ceph_context); + AsyncScheduler queue(g_ceph_context, queue_context, std::ref(counters), nullptr, + [] (client_id client) -> ClientInfo* { + static ClientInfo info{0, 1, 1}; + return &info; + }); + + // create a separate execution context to use for all callbacks to test that + // pending requests maintain executor work guards on both executors + boost::asio::io_context callback_context; + auto ex2 = callback_context.get_executor(); + + std::optional<error_code> ec1, ec2; + std::optional<PhaseType> p1, p2; + + auto now = get_time(); + queue.async_request(client_id::admin, {}, now, 1, capture(ex2, ec1, p1)); + queue.async_request(client_id::auth, {}, now, 1, capture(ex2, ec2, p2)); + + EXPECT_EQ(1u, counters(client_id::admin)->get(queue_counters::l_qlen)); + EXPECT_EQ(1u, counters(client_id::auth)->get(queue_counters::l_qlen)); + + callback_context.run_for(std::chrono::milliseconds(1)); + // maintains work on callback executor while in queue + EXPECT_FALSE(callback_context.stopped()); + + EXPECT_FALSE(ec1); + EXPECT_FALSE(ec2); + + queue_context.run_for(std::chrono::milliseconds(1)); + EXPECT_TRUE(queue_context.stopped()); + + EXPECT_FALSE(ec1); // no callbacks until callback executor runs + EXPECT_FALSE(ec2); + + callback_context.run_for(std::chrono::milliseconds(1)); + EXPECT_TRUE(callback_context.stopped()); + + ASSERT_TRUE(ec1); + EXPECT_EQ(boost::system::errc::success, *ec1); + ASSERT_TRUE(p1); + EXPECT_EQ(PhaseType::priority, *p1); + + ASSERT_TRUE(ec2); + EXPECT_EQ(boost::system::errc::success, *ec2); + ASSERT_TRUE(p2); + EXPECT_EQ(PhaseType::priority, *p2); +} + +TEST(Queue, SpawnAsyncRequest) +{ + boost::asio::io_context context; + + spawn::spawn(context, [&] (yield_context yield) { + ClientCounters counters(g_ceph_context); + AsyncScheduler queue(g_ceph_context, context, std::ref(counters), nullptr, + [] (client_id client) -> ClientInfo* { + static ClientInfo clients[] = { + {1, 1, 1}, // admin: satisfy by reservation + {0, 1, 1}, // auth: satisfy by priority + }; + return &clients[static_cast<size_t>(client)]; + }); + + error_code ec1, ec2; + auto p1 = queue.async_request(client_id::admin, {}, get_time(), 1, yield[ec1]); + EXPECT_EQ(boost::system::errc::success, ec1); + EXPECT_EQ(PhaseType::reservation, p1); + + auto p2 = queue.async_request(client_id::auth, {}, get_time(), 1, yield[ec2]); + EXPECT_EQ(boost::system::errc::success, ec2); + EXPECT_EQ(PhaseType::priority, p2); + }); + + context.run_for(std::chrono::milliseconds(1)); + EXPECT_TRUE(context.stopped()); +} + +} // namespace rgw::dmclock diff --git a/src/test/rgw/test_rgw_gc_log.cc b/src/test/rgw/test_rgw_gc_log.cc new file mode 100644 index 000000000..ae8c4d372 --- /dev/null +++ b/src/test/rgw/test_rgw_gc_log.cc @@ -0,0 +1,144 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#include "rgw_gc_log.h" + +#include "test/librados/test_cxx.h" +#include "gtest/gtest.h" + +// creates a rados client and temporary pool +struct RadosEnv : public ::testing::Environment { + static std::optional<std::string> pool_name; + public: + static std::optional<librados::Rados> rados; + + void SetUp() override { + rados.emplace(); + // create pool + std::string name = get_temp_pool_name(); + ASSERT_EQ("", create_one_pool_pp(name, *rados)); + pool_name = name; + } + void TearDown() override { + if (pool_name) { + ASSERT_EQ(0, destroy_one_pool_pp(*pool_name, *rados)); + } + rados.reset(); + } + + static int ioctx_create(librados::IoCtx& ioctx) { + return rados->ioctx_create(pool_name->c_str(), ioctx); + } +}; +std::optional<std::string> RadosEnv::pool_name; +std::optional<librados::Rados> RadosEnv::rados; + +auto *const rados_env = ::testing::AddGlobalTestEnvironment(new RadosEnv); + +class rgw_gc_log : public ::testing::Test { + protected: + static librados::IoCtx ioctx; + + static void SetUpTestSuite() { + ASSERT_EQ(0, RadosEnv::ioctx_create(ioctx)); + } + static void TearDownTestSuite() { + ioctx.close(); + } + + // use the test's name as the oid so different tests don't conflict + std::string get_test_oid() const { + return ::testing::UnitTest::GetInstance()->current_test_info()->name(); + } +}; +librados::IoCtx rgw_gc_log::ioctx; + + +TEST_F(rgw_gc_log, init_existing_queue) +{ + const std::string oid = get_test_oid(); + { + // successfully inits new object + librados::ObjectWriteOperation op; + gc_log_init2(op, 1, 1); + ASSERT_EQ(0, ioctx.operate(oid, &op)); + } + { + // version check fails on second init + librados::ObjectWriteOperation op; + gc_log_init2(op, 1, 1); + ASSERT_EQ(-ECANCELED, ioctx.operate(oid, &op)); + } +} + +TEST_F(rgw_gc_log, init_existing_omap) +{ + const std::string oid = get_test_oid(); + { + librados::ObjectWriteOperation op; + cls_rgw_gc_obj_info info; + gc_log_enqueue1(op, 5, info); + ASSERT_EQ(0, ioctx.operate(oid, &op)); + } + { + // init succeeds with existing omap entries + librados::ObjectWriteOperation op; + gc_log_init2(op, 1, 1); + ASSERT_EQ(0, ioctx.operate(oid, &op)); + } +} + +TEST_F(rgw_gc_log, enqueue1_after_init) +{ + const std::string oid = get_test_oid(); + { + librados::ObjectWriteOperation op; + gc_log_init2(op, 1, 1); + ASSERT_EQ(0, ioctx.operate(oid, &op)); + } + { + // version check fails on omap enqueue + librados::ObjectWriteOperation op; + cls_rgw_gc_obj_info info; + gc_log_enqueue1(op, 5, info); + ASSERT_EQ(-ECANCELED, ioctx.operate(oid, &op)); + } +} + +TEST_F(rgw_gc_log, enqueue2_before_init) +{ + const std::string oid = get_test_oid(); + { + // version check fails on cls_rgw_gc enqueue + librados::ObjectWriteOperation op; + gc_log_enqueue2(op, 5, {}); + ASSERT_EQ(-ECANCELED, ioctx.operate(oid, &op)); + } +} + +TEST_F(rgw_gc_log, defer1_after_init) +{ + const std::string oid = get_test_oid(); + { + librados::ObjectWriteOperation op; + gc_log_init2(op, 1, 1); + ASSERT_EQ(0, ioctx.operate(oid, &op)); + } + { + // version check fails on omap defer + librados::ObjectWriteOperation op; + gc_log_defer1(op, 5, {}); + ASSERT_EQ(-ECANCELED, ioctx.operate(oid, &op)); + } +} + +TEST_F(rgw_gc_log, defer2_before_init) +{ + const std::string oid = get_test_oid(); + { + // version check fails on cls_rgw_gc defer + librados::ObjectWriteOperation op; + gc_log_defer2(op, 5, {}); + ASSERT_EQ(-ECANCELED, ioctx.operate(oid, &op)); + } +} diff --git a/src/test/rgw/test_rgw_iam_policy.cc b/src/test/rgw/test_rgw_iam_policy.cc new file mode 100644 index 000000000..f4c3c6aff --- /dev/null +++ b/src/test/rgw/test_rgw_iam_policy.cc @@ -0,0 +1,1321 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab +/* + * Ceph - scalable distributed file system + * + * Copyright (C) 2015 Red Hat + * + * This is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License version 2.1, as published by the Free Software + * Foundation. See file COPYING. + * + */ + +#include <string> + +#include <boost/intrusive_ptr.hpp> +#include <boost/optional.hpp> + +#include <gtest/gtest.h> + +#include "include/stringify.h" +#include "common/code_environment.h" +#include "common/ceph_context.h" +#include "global/global_init.h" +#include "rgw_auth.h" +#include "rgw_auth_registry.h" +#include "rgw_iam_policy.h" +#include "rgw_op.h" +#include "rgw_process_env.h" +#include "rgw_sal_rados.h" + + +using std::string; +using std::vector; + +using boost::container::flat_set; +using boost::intrusive_ptr; +using boost::make_optional; +using boost::none; + +using rgw::auth::Identity; +using rgw::auth::Principal; + +using rgw::ARN; +using rgw::IAM::Effect; +using rgw::IAM::Environment; +using rgw::Partition; +using rgw::IAM::Policy; +using rgw::IAM::s3All; +using rgw::IAM::s3Count; +using rgw::IAM::s3GetAccelerateConfiguration; +using rgw::IAM::s3GetBucketAcl; +using rgw::IAM::s3GetBucketCORS; +using rgw::IAM::s3GetBucketLocation; +using rgw::IAM::s3GetBucketLogging; +using rgw::IAM::s3GetBucketNotification; +using rgw::IAM::s3GetBucketPolicy; +using rgw::IAM::s3GetBucketPolicyStatus; +using rgw::IAM::s3GetBucketPublicAccessBlock; +using rgw::IAM::s3GetBucketEncryption; +using rgw::IAM::s3GetBucketRequestPayment; +using rgw::IAM::s3GetBucketTagging; +using rgw::IAM::s3GetBucketVersioning; +using rgw::IAM::s3GetBucketWebsite; +using rgw::IAM::s3GetLifecycleConfiguration; +using rgw::IAM::s3GetObject; +using rgw::IAM::s3GetObjectAcl; +using rgw::IAM::s3GetObjectVersionAcl; +using rgw::IAM::s3GetObjectTorrent; +using rgw::IAM::s3GetObjectTagging; +using rgw::IAM::s3GetObjectVersion; +using rgw::IAM::s3GetObjectVersionTagging; +using rgw::IAM::s3GetObjectVersionTorrent; +using rgw::IAM::s3GetPublicAccessBlock; +using rgw::IAM::s3GetReplicationConfiguration; +using rgw::IAM::s3ListAllMyBuckets; +using rgw::IAM::s3ListBucket; +using rgw::IAM::s3ListBucketMultipartUploads; +using rgw::IAM::s3ListBucketVersions; +using rgw::IAM::s3ListMultipartUploadParts; +using rgw::IAM::None; +using rgw::IAM::s3PutBucketAcl; +using rgw::IAM::s3PutBucketPolicy; +using rgw::IAM::s3GetBucketObjectLockConfiguration; +using rgw::IAM::s3GetObjectRetention; +using rgw::IAM::s3GetObjectLegalHold; +using rgw::Service; +using rgw::IAM::TokenID; +using rgw::IAM::Version; +using rgw::IAM::Action_t; +using rgw::IAM::NotAction_t; +using rgw::IAM::iamCreateRole; +using rgw::IAM::iamDeleteRole; +using rgw::IAM::iamAll; +using rgw::IAM::stsAll; +using rgw::IAM::allCount; + +class FakeIdentity : public Identity { + const Principal id; +public: + + explicit FakeIdentity(Principal&& id) : id(std::move(id)) {} + uint32_t get_perms_from_aclspec(const DoutPrefixProvider* dpp, const aclspec_t& aclspec) const override { + ceph_abort(); + return 0; + }; + + bool is_admin_of(const rgw_user& uid) const override { + ceph_abort(); + return false; + } + + bool is_owner_of(const rgw_user& uid) const override { + ceph_abort(); + return false; + } + + virtual uint32_t get_perm_mask() const override { + ceph_abort(); + return 0; + } + + string get_acct_name() const override { + abort(); + return 0; + } + + string get_subuser() const override { + abort(); + return 0; + } + + void to_str(std::ostream& out) const override { + out << id; + } + + bool is_identity(const flat_set<Principal>& ids) const override { + if (id.is_wildcard() && (!ids.empty())) { + return true; + } + return ids.find(id) != ids.end() || ids.find(Principal::wildcard()) != ids.end(); + } + + uint32_t get_identity_type() const override { + return TYPE_RGW; + } +}; + +class PolicyTest : public ::testing::Test { +protected: + intrusive_ptr<CephContext> cct; + static const string arbitrary_tenant; + static string example1; + static string example2; + static string example3; + static string example4; + static string example5; + static string example6; + static string example7; +public: + PolicyTest() { + cct = new CephContext(CEPH_ENTITY_TYPE_CLIENT); + } +}; + +TEST_F(PolicyTest, Parse1) { + boost::optional<Policy> p; + + ASSERT_NO_THROW(p = Policy(cct.get(), arbitrary_tenant, + bufferlist::static_from_string(example1), + true)); + ASSERT_TRUE(p); + + EXPECT_EQ(p->text, example1); + EXPECT_EQ(p->version, Version::v2012_10_17); + EXPECT_FALSE(p->id); + EXPECT_FALSE(p->statements[0].sid); + EXPECT_FALSE(p->statements.empty()); + EXPECT_EQ(p->statements.size(), 1U); + EXPECT_TRUE(p->statements[0].princ.empty()); + EXPECT_TRUE(p->statements[0].noprinc.empty()); + EXPECT_EQ(p->statements[0].effect, Effect::Allow); + Action_t act; + act[s3ListBucket] = 1; + EXPECT_EQ(p->statements[0].action, act); + EXPECT_EQ(p->statements[0].notaction, None); + ASSERT_FALSE(p->statements[0].resource.empty()); + ASSERT_EQ(p->statements[0].resource.size(), 1U); + EXPECT_EQ(p->statements[0].resource.begin()->partition, Partition::aws); + EXPECT_EQ(p->statements[0].resource.begin()->service, Service::s3); + EXPECT_TRUE(p->statements[0].resource.begin()->region.empty()); + EXPECT_EQ(p->statements[0].resource.begin()->account, arbitrary_tenant); + EXPECT_EQ(p->statements[0].resource.begin()->resource, "example_bucket"); + EXPECT_TRUE(p->statements[0].notresource.empty()); + EXPECT_TRUE(p->statements[0].conditions.empty()); +} + +TEST_F(PolicyTest, Eval1) { + auto p = Policy(cct.get(), arbitrary_tenant, + bufferlist::static_from_string(example1), true); + Environment e; + + ARN arn1(Partition::aws, Service::s3, + "", arbitrary_tenant, "example_bucket"); + EXPECT_EQ(p.eval(e, none, s3ListBucket, arn1), + Effect::Allow); + + ARN arn2(Partition::aws, Service::s3, + "", arbitrary_tenant, "example_bucket"); + EXPECT_EQ(p.eval(e, none, s3PutBucketAcl, arn2), + Effect::Pass); + + ARN arn3(Partition::aws, Service::s3, + "", arbitrary_tenant, "erroneous_bucket"); + EXPECT_EQ(p.eval(e, none, s3ListBucket, arn3), + Effect::Pass); + +} + +TEST_F(PolicyTest, Parse2) { + boost::optional<Policy> p; + + ASSERT_NO_THROW(p = Policy(cct.get(), arbitrary_tenant, + bufferlist::static_from_string(example2), + true)); + ASSERT_TRUE(p); + + EXPECT_EQ(p->text, example2); + EXPECT_EQ(p->version, Version::v2012_10_17); + EXPECT_EQ(*p->id, "S3-Account-Permissions"); + ASSERT_FALSE(p->statements.empty()); + EXPECT_EQ(p->statements.size(), 1U); + EXPECT_EQ(*p->statements[0].sid, "1"); + EXPECT_FALSE(p->statements[0].princ.empty()); + EXPECT_EQ(p->statements[0].princ.size(), 1U); + EXPECT_EQ(*p->statements[0].princ.begin(), + Principal::tenant("ACCOUNT-ID-WITHOUT-HYPHENS")); + EXPECT_TRUE(p->statements[0].noprinc.empty()); + EXPECT_EQ(p->statements[0].effect, Effect::Allow); + Action_t act; + for (auto i = 0ULL; i < s3Count; i++) + act[i] = 1; + act[s3All] = 1; + EXPECT_EQ(p->statements[0].action, act); + EXPECT_EQ(p->statements[0].notaction, None); + ASSERT_FALSE(p->statements[0].resource.empty()); + ASSERT_EQ(p->statements[0].resource.size(), 2U); + EXPECT_EQ(p->statements[0].resource.begin()->partition, Partition::aws); + EXPECT_EQ(p->statements[0].resource.begin()->service, Service::s3); + EXPECT_TRUE(p->statements[0].resource.begin()->region.empty()); + EXPECT_EQ(p->statements[0].resource.begin()->account, arbitrary_tenant); + EXPECT_EQ(p->statements[0].resource.begin()->resource, "mybucket"); + EXPECT_EQ((p->statements[0].resource.begin() + 1)->partition, + Partition::aws); + EXPECT_EQ((p->statements[0].resource.begin() + 1)->service, + Service::s3); + EXPECT_TRUE((p->statements[0].resource.begin() + 1)->region.empty()); + EXPECT_EQ((p->statements[0].resource.begin() + 1)->account, + arbitrary_tenant); + EXPECT_EQ((p->statements[0].resource.begin() + 1)->resource, "mybucket/*"); + EXPECT_TRUE(p->statements[0].notresource.empty()); + EXPECT_TRUE(p->statements[0].conditions.empty()); +} + +TEST_F(PolicyTest, Eval2) { + auto p = Policy(cct.get(), arbitrary_tenant, + bufferlist::static_from_string(example2), true); + Environment e; + + auto trueacct = FakeIdentity( + Principal::tenant("ACCOUNT-ID-WITHOUT-HYPHENS")); + + auto notacct = FakeIdentity( + Principal::tenant("some-other-account")); + for (auto i = 0ULL; i < s3Count; ++i) { + ARN arn1(Partition::aws, Service::s3, + "", arbitrary_tenant, "mybucket"); + EXPECT_EQ(p.eval(e, trueacct, i, arn1), + Effect::Allow); + ARN arn2(Partition::aws, Service::s3, + "", arbitrary_tenant, "mybucket/myobject"); + EXPECT_EQ(p.eval(e, trueacct, i, arn2), + Effect::Allow); + ARN arn3(Partition::aws, Service::s3, + "", arbitrary_tenant, "mybucket"); + EXPECT_EQ(p.eval(e, notacct, i, arn3), + Effect::Pass); + ARN arn4(Partition::aws, Service::s3, + "", arbitrary_tenant, "mybucket/myobject"); + EXPECT_EQ(p.eval(e, notacct, i, arn4), + Effect::Pass); + ARN arn5(Partition::aws, Service::s3, + "", arbitrary_tenant, "notyourbucket"); + EXPECT_EQ(p.eval(e, trueacct, i, arn5), + Effect::Pass); + ARN arn6(Partition::aws, Service::s3, + "", arbitrary_tenant, "notyourbucket/notyourobject"); + EXPECT_EQ(p.eval(e, trueacct, i, arn6), + Effect::Pass); + + } +} + +TEST_F(PolicyTest, Parse3) { + boost::optional<Policy> p; + + ASSERT_NO_THROW(p = Policy(cct.get(), arbitrary_tenant, + bufferlist::static_from_string(example3), true)); + ASSERT_TRUE(p); + + EXPECT_EQ(p->text, example3); + EXPECT_EQ(p->version, Version::v2012_10_17); + EXPECT_FALSE(p->id); + ASSERT_FALSE(p->statements.empty()); + EXPECT_EQ(p->statements.size(), 3U); + + EXPECT_EQ(*p->statements[0].sid, "FirstStatement"); + EXPECT_TRUE(p->statements[0].princ.empty()); + EXPECT_TRUE(p->statements[0].noprinc.empty()); + EXPECT_EQ(p->statements[0].effect, Effect::Allow); + Action_t act; + act[s3PutBucketPolicy] = 1; + EXPECT_EQ(p->statements[0].action, act); + EXPECT_EQ(p->statements[0].notaction, None); + ASSERT_FALSE(p->statements[0].resource.empty()); + ASSERT_EQ(p->statements[0].resource.size(), 1U); + EXPECT_EQ(p->statements[0].resource.begin()->partition, Partition::wildcard); + EXPECT_EQ(p->statements[0].resource.begin()->service, Service::wildcard); + EXPECT_EQ(p->statements[0].resource.begin()->region, "*"); + EXPECT_EQ(p->statements[0].resource.begin()->account, arbitrary_tenant); + EXPECT_EQ(p->statements[0].resource.begin()->resource, "*"); + EXPECT_TRUE(p->statements[0].notresource.empty()); + EXPECT_TRUE(p->statements[0].conditions.empty()); + + EXPECT_EQ(*p->statements[1].sid, "SecondStatement"); + EXPECT_TRUE(p->statements[1].princ.empty()); + EXPECT_TRUE(p->statements[1].noprinc.empty()); + EXPECT_EQ(p->statements[1].effect, Effect::Allow); + Action_t act1; + act1[s3ListAllMyBuckets] = 1; + EXPECT_EQ(p->statements[1].action, act1); + EXPECT_EQ(p->statements[1].notaction, None); + ASSERT_FALSE(p->statements[1].resource.empty()); + ASSERT_EQ(p->statements[1].resource.size(), 1U); + EXPECT_EQ(p->statements[1].resource.begin()->partition, Partition::wildcard); + EXPECT_EQ(p->statements[1].resource.begin()->service, Service::wildcard); + EXPECT_EQ(p->statements[1].resource.begin()->region, "*"); + EXPECT_EQ(p->statements[1].resource.begin()->account, arbitrary_tenant); + EXPECT_EQ(p->statements[1].resource.begin()->resource, "*"); + EXPECT_TRUE(p->statements[1].notresource.empty()); + EXPECT_TRUE(p->statements[1].conditions.empty()); + + EXPECT_EQ(*p->statements[2].sid, "ThirdStatement"); + EXPECT_TRUE(p->statements[2].princ.empty()); + EXPECT_TRUE(p->statements[2].noprinc.empty()); + EXPECT_EQ(p->statements[2].effect, Effect::Allow); + Action_t act2; + act2[s3ListMultipartUploadParts] = 1; + act2[s3ListBucket] = 1; + act2[s3ListBucketVersions] = 1; + act2[s3ListAllMyBuckets] = 1; + act2[s3ListBucketMultipartUploads] = 1; + act2[s3GetObject] = 1; + act2[s3GetObjectVersion] = 1; + act2[s3GetObjectAcl] = 1; + act2[s3GetObjectVersionAcl] = 1; + act2[s3GetObjectTorrent] = 1; + act2[s3GetObjectVersionTorrent] = 1; + act2[s3GetAccelerateConfiguration] = 1; + act2[s3GetBucketAcl] = 1; + act2[s3GetBucketCORS] = 1; + act2[s3GetBucketVersioning] = 1; + act2[s3GetBucketRequestPayment] = 1; + act2[s3GetBucketLocation] = 1; + act2[s3GetBucketPolicy] = 1; + act2[s3GetBucketNotification] = 1; + act2[s3GetBucketLogging] = 1; + act2[s3GetBucketTagging] = 1; + act2[s3GetBucketWebsite] = 1; + act2[s3GetLifecycleConfiguration] = 1; + act2[s3GetReplicationConfiguration] = 1; + act2[s3GetObjectTagging] = 1; + act2[s3GetObjectVersionTagging] = 1; + act2[s3GetBucketObjectLockConfiguration] = 1; + act2[s3GetObjectRetention] = 1; + act2[s3GetObjectLegalHold] = 1; + act2[s3GetBucketPolicyStatus] = 1; + act2[s3GetBucketPublicAccessBlock] = 1; + act2[s3GetPublicAccessBlock] = 1; + act2[s3GetBucketEncryption] = 1; + + EXPECT_EQ(p->statements[2].action, act2); + EXPECT_EQ(p->statements[2].notaction, None); + ASSERT_FALSE(p->statements[2].resource.empty()); + ASSERT_EQ(p->statements[2].resource.size(), 2U); + EXPECT_EQ(p->statements[2].resource.begin()->partition, Partition::aws); + EXPECT_EQ(p->statements[2].resource.begin()->service, Service::s3); + EXPECT_TRUE(p->statements[2].resource.begin()->region.empty()); + EXPECT_EQ(p->statements[2].resource.begin()->account, arbitrary_tenant); + EXPECT_EQ(p->statements[2].resource.begin()->resource, "confidential-data"); + EXPECT_EQ((p->statements[2].resource.begin() + 1)->partition, + Partition::aws); + EXPECT_EQ((p->statements[2].resource.begin() + 1)->service, Service::s3); + EXPECT_TRUE((p->statements[2].resource.begin() + 1)->region.empty()); + EXPECT_EQ((p->statements[2].resource.begin() + 1)->account, + arbitrary_tenant); + EXPECT_EQ((p->statements[2].resource.begin() + 1)->resource, + "confidential-data/*"); + EXPECT_TRUE(p->statements[2].notresource.empty()); + ASSERT_FALSE(p->statements[2].conditions.empty()); + ASSERT_EQ(p->statements[2].conditions.size(), 1U); + EXPECT_EQ(p->statements[2].conditions[0].op, TokenID::Bool); + EXPECT_EQ(p->statements[2].conditions[0].key, "aws:MultiFactorAuthPresent"); + EXPECT_FALSE(p->statements[2].conditions[0].ifexists); + ASSERT_FALSE(p->statements[2].conditions[0].vals.empty()); + EXPECT_EQ(p->statements[2].conditions[0].vals.size(), 1U); + EXPECT_EQ(p->statements[2].conditions[0].vals[0], "true"); +} + +TEST_F(PolicyTest, Eval3) { + auto p = Policy(cct.get(), arbitrary_tenant, + bufferlist::static_from_string(example3), true); + Environment em; + Environment tr = { { "aws:MultiFactorAuthPresent", "true" } }; + Environment fa = { { "aws:MultiFactorAuthPresent", "false" } }; + + Action_t s3allow; + s3allow[s3ListMultipartUploadParts] = 1; + s3allow[s3ListBucket] = 1; + s3allow[s3ListBucketVersions] = 1; + s3allow[s3ListAllMyBuckets] = 1; + s3allow[s3ListBucketMultipartUploads] = 1; + s3allow[s3GetObject] = 1; + s3allow[s3GetObjectVersion] = 1; + s3allow[s3GetObjectAcl] = 1; + s3allow[s3GetObjectVersionAcl] = 1; + s3allow[s3GetObjectTorrent] = 1; + s3allow[s3GetObjectVersionTorrent] = 1; + s3allow[s3GetAccelerateConfiguration] = 1; + s3allow[s3GetBucketAcl] = 1; + s3allow[s3GetBucketCORS] = 1; + s3allow[s3GetBucketVersioning] = 1; + s3allow[s3GetBucketRequestPayment] = 1; + s3allow[s3GetBucketLocation] = 1; + s3allow[s3GetBucketPolicy] = 1; + s3allow[s3GetBucketNotification] = 1; + s3allow[s3GetBucketLogging] = 1; + s3allow[s3GetBucketTagging] = 1; + s3allow[s3GetBucketWebsite] = 1; + s3allow[s3GetLifecycleConfiguration] = 1; + s3allow[s3GetReplicationConfiguration] = 1; + s3allow[s3GetObjectTagging] = 1; + s3allow[s3GetObjectVersionTagging] = 1; + s3allow[s3GetBucketObjectLockConfiguration] = 1; + s3allow[s3GetObjectRetention] = 1; + s3allow[s3GetObjectLegalHold] = 1; + s3allow[s3GetBucketPolicyStatus] = 1; + s3allow[s3GetBucketPublicAccessBlock] = 1; + s3allow[s3GetPublicAccessBlock] = 1; + s3allow[s3GetBucketEncryption] = 1; + + ARN arn1(Partition::aws, Service::s3, + "", arbitrary_tenant, "mybucket"); + EXPECT_EQ(p.eval(em, none, s3PutBucketPolicy, arn1), + Effect::Allow); + + ARN arn2(Partition::aws, Service::s3, + "", arbitrary_tenant, "mybucket"); + EXPECT_EQ(p.eval(em, none, s3PutBucketPolicy, arn2), + Effect::Allow); + + + for (auto op = 0ULL; op < s3Count; ++op) { + if ((op == s3ListAllMyBuckets) || (op == s3PutBucketPolicy)) { + continue; + } + ARN arn3(Partition::aws, Service::s3, + "", arbitrary_tenant, "confidential-data"); + EXPECT_EQ(p.eval(em, none, op, arn3), + Effect::Pass); + ARN arn4(Partition::aws, Service::s3, + "", arbitrary_tenant, "confidential-data"); + EXPECT_EQ(p.eval(tr, none, op, arn4), + s3allow[op] ? Effect::Allow : Effect::Pass); + ARN arn5(Partition::aws, Service::s3, + "", arbitrary_tenant, "confidential-data"); + EXPECT_EQ(p.eval(fa, none, op, arn5), + Effect::Pass); + ARN arn6(Partition::aws, Service::s3, + "", arbitrary_tenant, "confidential-data/moo"); + EXPECT_EQ(p.eval(em, none, op, arn6), + Effect::Pass); + ARN arn7(Partition::aws, Service::s3, + "", arbitrary_tenant, "confidential-data/moo"); + EXPECT_EQ(p.eval(tr, none, op, arn7), + s3allow[op] ? Effect::Allow : Effect::Pass); + ARN arn8(Partition::aws, Service::s3, + "", arbitrary_tenant, "confidential-data/moo"); + EXPECT_EQ(p.eval(fa, none, op, arn8), + Effect::Pass); + ARN arn9(Partition::aws, Service::s3, + "", arbitrary_tenant, "really-confidential-data"); + EXPECT_EQ(p.eval(em, none, op, arn9), + Effect::Pass); + ARN arn10(Partition::aws, Service::s3, + "", arbitrary_tenant, "really-confidential-data"); + EXPECT_EQ(p.eval(tr, none, op, arn10), + Effect::Pass); + ARN arn11(Partition::aws, Service::s3, + "", arbitrary_tenant, "really-confidential-data"); + EXPECT_EQ(p.eval(fa, none, op, arn11), + Effect::Pass); + ARN arn12(Partition::aws, Service::s3, + "", arbitrary_tenant, + "really-confidential-data/moo"); + EXPECT_EQ(p.eval(em, none, op, arn12), Effect::Pass); + ARN arn13(Partition::aws, Service::s3, + "", arbitrary_tenant, + "really-confidential-data/moo"); + EXPECT_EQ(p.eval(tr, none, op, arn13), Effect::Pass); + ARN arn14(Partition::aws, Service::s3, + "", arbitrary_tenant, + "really-confidential-data/moo"); + EXPECT_EQ(p.eval(fa, none, op, arn14), Effect::Pass); + + } +} + +TEST_F(PolicyTest, Parse4) { + boost::optional<Policy> p; + + ASSERT_NO_THROW(p = Policy(cct.get(), arbitrary_tenant, + bufferlist::static_from_string(example4), true)); + ASSERT_TRUE(p); + + EXPECT_EQ(p->text, example4); + EXPECT_EQ(p->version, Version::v2012_10_17); + EXPECT_FALSE(p->id); + EXPECT_FALSE(p->statements[0].sid); + EXPECT_FALSE(p->statements.empty()); + EXPECT_EQ(p->statements.size(), 1U); + EXPECT_TRUE(p->statements[0].princ.empty()); + EXPECT_TRUE(p->statements[0].noprinc.empty()); + EXPECT_EQ(p->statements[0].effect, Effect::Allow); + Action_t act; + act[iamCreateRole] = 1; + EXPECT_EQ(p->statements[0].action, act); + EXPECT_EQ(p->statements[0].notaction, None); + ASSERT_FALSE(p->statements[0].resource.empty()); + ASSERT_EQ(p->statements[0].resource.size(), 1U); + EXPECT_EQ(p->statements[0].resource.begin()->partition, Partition::wildcard); + EXPECT_EQ(p->statements[0].resource.begin()->service, Service::wildcard); + EXPECT_EQ(p->statements[0].resource.begin()->region, "*"); + EXPECT_EQ(p->statements[0].resource.begin()->account, arbitrary_tenant); + EXPECT_EQ(p->statements[0].resource.begin()->resource, "*"); + EXPECT_TRUE(p->statements[0].notresource.empty()); + EXPECT_TRUE(p->statements[0].conditions.empty()); +} + +TEST_F(PolicyTest, Eval4) { + auto p = Policy(cct.get(), arbitrary_tenant, + bufferlist::static_from_string(example4), true); + Environment e; + + ARN arn1(Partition::aws, Service::iam, + "", arbitrary_tenant, "role/example_role"); + EXPECT_EQ(p.eval(e, none, iamCreateRole, arn1), + Effect::Allow); + + ARN arn2(Partition::aws, Service::iam, + "", arbitrary_tenant, "role/example_role"); + EXPECT_EQ(p.eval(e, none, iamDeleteRole, arn2), + Effect::Pass); +} + +TEST_F(PolicyTest, Parse5) { + boost::optional<Policy> p; + + ASSERT_NO_THROW(p = Policy(cct.get(), arbitrary_tenant, + bufferlist::static_from_string(example5), true)); + ASSERT_TRUE(p); + EXPECT_EQ(p->text, example5); + EXPECT_EQ(p->version, Version::v2012_10_17); + EXPECT_FALSE(p->id); + EXPECT_FALSE(p->statements[0].sid); + EXPECT_FALSE(p->statements.empty()); + EXPECT_EQ(p->statements.size(), 1U); + EXPECT_TRUE(p->statements[0].princ.empty()); + EXPECT_TRUE(p->statements[0].noprinc.empty()); + EXPECT_EQ(p->statements[0].effect, Effect::Allow); + Action_t act; + for (auto i = s3All+1; i <= iamAll; i++) + act[i] = 1; + EXPECT_EQ(p->statements[0].action, act); + EXPECT_EQ(p->statements[0].notaction, None); + ASSERT_FALSE(p->statements[0].resource.empty()); + ASSERT_EQ(p->statements[0].resource.size(), 1U); + EXPECT_EQ(p->statements[0].resource.begin()->partition, Partition::aws); + EXPECT_EQ(p->statements[0].resource.begin()->service, Service::iam); + EXPECT_EQ(p->statements[0].resource.begin()->region, ""); + EXPECT_EQ(p->statements[0].resource.begin()->account, arbitrary_tenant); + EXPECT_EQ(p->statements[0].resource.begin()->resource, "role/example_role"); + EXPECT_TRUE(p->statements[0].notresource.empty()); + EXPECT_TRUE(p->statements[0].conditions.empty()); +} + +TEST_F(PolicyTest, Eval5) { + auto p = Policy(cct.get(), arbitrary_tenant, + bufferlist::static_from_string(example5), true); + Environment e; + + ARN arn1(Partition::aws, Service::iam, + "", arbitrary_tenant, "role/example_role"); + EXPECT_EQ(p.eval(e, none, iamCreateRole, arn1), + Effect::Allow); + + ARN arn2(Partition::aws, Service::iam, + "", arbitrary_tenant, "role/example_role"); + EXPECT_EQ(p.eval(e, none, s3ListBucket, arn2), + Effect::Pass); + + ARN arn3(Partition::aws, Service::iam, + "", "", "role/example_role"); + EXPECT_EQ(p.eval(e, none, iamCreateRole, arn3), + Effect::Pass); +} + +TEST_F(PolicyTest, Parse6) { + boost::optional<Policy> p; + + ASSERT_NO_THROW(p = Policy(cct.get(), arbitrary_tenant, + bufferlist::static_from_string(example6), true)); + ASSERT_TRUE(p); + EXPECT_EQ(p->text, example6); + EXPECT_EQ(p->version, Version::v2012_10_17); + EXPECT_FALSE(p->id); + EXPECT_FALSE(p->statements[0].sid); + EXPECT_FALSE(p->statements.empty()); + EXPECT_EQ(p->statements.size(), 1U); + EXPECT_TRUE(p->statements[0].princ.empty()); + EXPECT_TRUE(p->statements[0].noprinc.empty()); + EXPECT_EQ(p->statements[0].effect, Effect::Allow); + Action_t act; + for (auto i = 0U; i <= stsAll; i++) + act[i] = 1; + EXPECT_EQ(p->statements[0].action, act); + EXPECT_EQ(p->statements[0].notaction, None); + ASSERT_FALSE(p->statements[0].resource.empty()); + ASSERT_EQ(p->statements[0].resource.size(), 1U); + EXPECT_EQ(p->statements[0].resource.begin()->partition, Partition::aws); + EXPECT_EQ(p->statements[0].resource.begin()->service, Service::iam); + EXPECT_EQ(p->statements[0].resource.begin()->region, ""); + EXPECT_EQ(p->statements[0].resource.begin()->account, arbitrary_tenant); + EXPECT_EQ(p->statements[0].resource.begin()->resource, "user/A"); + EXPECT_TRUE(p->statements[0].notresource.empty()); + EXPECT_TRUE(p->statements[0].conditions.empty()); +} + +TEST_F(PolicyTest, Eval6) { + auto p = Policy(cct.get(), arbitrary_tenant, + bufferlist::static_from_string(example6), true); + Environment e; + + ARN arn1(Partition::aws, Service::iam, + "", arbitrary_tenant, "user/A"); + EXPECT_EQ(p.eval(e, none, iamCreateRole, arn1), + Effect::Allow); + + ARN arn2(Partition::aws, Service::iam, + "", arbitrary_tenant, "user/A"); + EXPECT_EQ(p.eval(e, none, s3ListBucket, arn2), + Effect::Allow); +} + +TEST_F(PolicyTest, Parse7) { + boost::optional<Policy> p; + + ASSERT_NO_THROW(p = Policy(cct.get(), arbitrary_tenant, + bufferlist::static_from_string(example7), true)); + ASSERT_TRUE(p); + + EXPECT_EQ(p->text, example7); + EXPECT_EQ(p->version, Version::v2012_10_17); + ASSERT_FALSE(p->statements.empty()); + EXPECT_EQ(p->statements.size(), 1U); + EXPECT_FALSE(p->statements[0].princ.empty()); + EXPECT_EQ(p->statements[0].princ.size(), 1U); + EXPECT_TRUE(p->statements[0].noprinc.empty()); + EXPECT_EQ(p->statements[0].effect, Effect::Allow); + Action_t act; + act[s3ListBucket] = 1; + EXPECT_EQ(p->statements[0].action, act); + EXPECT_EQ(p->statements[0].notaction, None); + ASSERT_FALSE(p->statements[0].resource.empty()); + ASSERT_EQ(p->statements[0].resource.size(), 1U); + EXPECT_EQ(p->statements[0].resource.begin()->partition, Partition::aws); + EXPECT_EQ(p->statements[0].resource.begin()->service, Service::s3); + EXPECT_TRUE(p->statements[0].resource.begin()->region.empty()); + EXPECT_EQ(p->statements[0].resource.begin()->account, arbitrary_tenant); + EXPECT_EQ(p->statements[0].resource.begin()->resource, "mybucket/*"); + EXPECT_TRUE(p->statements[0].princ.begin()->is_user()); + EXPECT_FALSE(p->statements[0].princ.begin()->is_wildcard()); + EXPECT_EQ(p->statements[0].princ.begin()->get_tenant(), ""); + EXPECT_EQ(p->statements[0].princ.begin()->get_id(), "A:subA"); + EXPECT_TRUE(p->statements[0].notresource.empty()); + EXPECT_TRUE(p->statements[0].conditions.empty()); +} + +TEST_F(PolicyTest, Eval7) { + auto p = Policy(cct.get(), arbitrary_tenant, + bufferlist::static_from_string(example7), true); + Environment e; + + auto subacct = FakeIdentity( + Principal::user(std::move(""), "A:subA")); + auto parentacct = FakeIdentity( + Principal::user(std::move(""), "A")); + auto sub2acct = FakeIdentity( + Principal::user(std::move(""), "A:sub2A")); + + ARN arn1(Partition::aws, Service::s3, + "", arbitrary_tenant, "mybucket/*"); + EXPECT_EQ(p.eval(e, subacct, s3ListBucket, arn1), + Effect::Allow); + + ARN arn2(Partition::aws, Service::s3, + "", arbitrary_tenant, "mybucket/*"); + EXPECT_EQ(p.eval(e, parentacct, s3ListBucket, arn2), + Effect::Pass); + + ARN arn3(Partition::aws, Service::s3, + "", arbitrary_tenant, "mybucket/*"); + EXPECT_EQ(p.eval(e, sub2acct, s3ListBucket, arn3), + Effect::Pass); +} + +const string PolicyTest::arbitrary_tenant = "arbitrary_tenant"; +string PolicyTest::example1 = R"( +{ + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::example_bucket" + } +} +)"; + +string PolicyTest::example2 = R"( +{ + "Version": "2012-10-17", + "Id": "S3-Account-Permissions", + "Statement": [{ + "Sid": "1", + "Effect": "Allow", + "Principal": {"AWS": ["arn:aws:iam::ACCOUNT-ID-WITHOUT-HYPHENS:root"]}, + "Action": "s3:*", + "Resource": [ + "arn:aws:s3:::mybucket", + "arn:aws:s3:::mybucket/*" + ] + }] +} +)"; + +string PolicyTest::example3 = R"( +{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "FirstStatement", + "Effect": "Allow", + "Action": ["s3:PutBucketPolicy"], + "Resource": "*" + }, + { + "Sid": "SecondStatement", + "Effect": "Allow", + "Action": "s3:ListAllMyBuckets", + "Resource": "*" + }, + { + "Sid": "ThirdStatement", + "Effect": "Allow", + "Action": [ + "s3:List*", + "s3:Get*" + ], + "Resource": [ + "arn:aws:s3:::confidential-data", + "arn:aws:s3:::confidential-data/*" + ], + "Condition": {"Bool": {"aws:MultiFactorAuthPresent": "true"}} + } + ] +} +)"; + +string PolicyTest::example4 = R"( +{ + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "iam:CreateRole", + "Resource": "*" + } +} +)"; + +string PolicyTest::example5 = R"( +{ + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "iam:*", + "Resource": "arn:aws:iam:::role/example_role" + } +} +)"; + +string PolicyTest::example6 = R"( +{ + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Action": "*", + "Resource": "arn:aws:iam:::user/A" + } +} +)"; + +string PolicyTest::example7 = R"( +{ + "Version": "2012-10-17", + "Statement": { + "Effect": "Allow", + "Principal": {"AWS": ["arn:aws:iam:::user/A:subA"]}, + "Action": "s3:ListBucket", + "Resource": "arn:aws:s3:::mybucket/*" + } +} +)"; +class IPPolicyTest : public ::testing::Test { +protected: + intrusive_ptr<CephContext> cct; + static const string arbitrary_tenant; + static string ip_address_allow_example; + static string ip_address_deny_example; + static string ip_address_full_example; + // 192.168.1.0/24 + const rgw::IAM::MaskedIP allowedIPv4Range = { false, rgw::IAM::Address("11000000101010000000000100000000"), 24 }; + // 192.168.1.1/32 + const rgw::IAM::MaskedIP blocklistedIPv4 = { false, rgw::IAM::Address("11000000101010000000000100000001"), 32 }; + // 2001:db8:85a3:0:0:8a2e:370:7334/128 + const rgw::IAM::MaskedIP allowedIPv6 = { true, rgw::IAM::Address("00100000000000010000110110111000100001011010001100000000000000000000000000000000100010100010111000000011011100000111001100110100"), 128 }; + // ::1 + const rgw::IAM::MaskedIP blocklistedIPv6 = { true, rgw::IAM::Address(1), 128 }; + // 2001:db8:85a3:0:0:8a2e:370:7330/124 + const rgw::IAM::MaskedIP allowedIPv6Range = { true, rgw::IAM::Address("00100000000000010000110110111000100001011010001100000000000000000000000000000000100010100010111000000011011100000111001100110000"), 124 }; +public: + IPPolicyTest() { + cct = new CephContext(CEPH_ENTITY_TYPE_CLIENT); + } +}; +const string IPPolicyTest::arbitrary_tenant = "arbitrary_tenant"; + +TEST_F(IPPolicyTest, MaskedIPOperations) { + EXPECT_EQ(stringify(allowedIPv4Range), "192.168.1.0/24"); + EXPECT_EQ(stringify(blocklistedIPv4), "192.168.1.1/32"); + EXPECT_EQ(stringify(allowedIPv6), "2001:db8:85a3:0:0:8a2e:370:7334/128"); + EXPECT_EQ(stringify(allowedIPv6Range), "2001:db8:85a3:0:0:8a2e:370:7330/124"); + EXPECT_EQ(stringify(blocklistedIPv6), "0:0:0:0:0:0:0:1/128"); + EXPECT_EQ(allowedIPv4Range, blocklistedIPv4); + EXPECT_EQ(allowedIPv6Range, allowedIPv6); +} + +TEST_F(IPPolicyTest, asNetworkIPv4Range) { + auto actualIPv4Range = rgw::IAM::Condition::as_network("192.168.1.0/24"); + ASSERT_TRUE(actualIPv4Range.is_initialized()); + EXPECT_EQ(*actualIPv4Range, allowedIPv4Range); +} + +TEST_F(IPPolicyTest, asNetworkIPv4) { + auto actualIPv4 = rgw::IAM::Condition::as_network("192.168.1.1"); + ASSERT_TRUE(actualIPv4.is_initialized()); + EXPECT_EQ(*actualIPv4, blocklistedIPv4); +} + +TEST_F(IPPolicyTest, asNetworkIPv6Range) { + auto actualIPv6Range = rgw::IAM::Condition::as_network("2001:db8:85a3:0:0:8a2e:370:7330/124"); + ASSERT_TRUE(actualIPv6Range.is_initialized()); + EXPECT_EQ(*actualIPv6Range, allowedIPv6Range); +} + +TEST_F(IPPolicyTest, asNetworkIPv6) { + auto actualIPv6 = rgw::IAM::Condition::as_network("2001:db8:85a3:0:0:8a2e:370:7334"); + ASSERT_TRUE(actualIPv6.is_initialized()); + EXPECT_EQ(*actualIPv6, allowedIPv6); +} + +TEST_F(IPPolicyTest, asNetworkInvalid) { + EXPECT_FALSE(rgw::IAM::Condition::as_network("")); + EXPECT_FALSE(rgw::IAM::Condition::as_network("192.168.1.1/33")); + EXPECT_FALSE(rgw::IAM::Condition::as_network("2001:db8:85a3:0:0:8a2e:370:7334/129")); + EXPECT_FALSE(rgw::IAM::Condition::as_network("192.168.1.1:")); + EXPECT_FALSE(rgw::IAM::Condition::as_network("1.2.3.10000")); +} + +TEST_F(IPPolicyTest, IPEnvironment) { + RGWProcessEnv penv; + // Unfortunately RGWCivetWeb is too tightly tied to civetweb to test RGWCivetWeb::init_env. + RGWEnv rgw_env; + rgw::sal::RadosStore store; + std::unique_ptr<rgw::sal::User> user = store.get_user(rgw_user()); + rgw_env.set("REMOTE_ADDR", "192.168.1.1"); + rgw_env.set("HTTP_HOST", "1.2.3.4"); + req_state rgw_req_state(cct.get(), penv, &rgw_env, 0); + rgw_req_state.set_user(user); + rgw_build_iam_environment(&store, &rgw_req_state); + auto ip = rgw_req_state.env.find("aws:SourceIp"); + ASSERT_NE(ip, rgw_req_state.env.end()); + EXPECT_EQ(ip->second, "192.168.1.1"); + + ASSERT_EQ(cct.get()->_conf.set_val("rgw_remote_addr_param", "SOME_VAR"), 0); + EXPECT_EQ(cct.get()->_conf->rgw_remote_addr_param, "SOME_VAR"); + rgw_req_state.env.clear(); + rgw_build_iam_environment(&store, &rgw_req_state); + ip = rgw_req_state.env.find("aws:SourceIp"); + EXPECT_EQ(ip, rgw_req_state.env.end()); + + rgw_env.set("SOME_VAR", "192.168.1.2"); + rgw_req_state.env.clear(); + rgw_build_iam_environment(&store, &rgw_req_state); + ip = rgw_req_state.env.find("aws:SourceIp"); + ASSERT_NE(ip, rgw_req_state.env.end()); + EXPECT_EQ(ip->second, "192.168.1.2"); + + ASSERT_EQ(cct.get()->_conf.set_val("rgw_remote_addr_param", "HTTP_X_FORWARDED_FOR"), 0); + rgw_env.set("HTTP_X_FORWARDED_FOR", "192.168.1.3"); + rgw_req_state.env.clear(); + rgw_build_iam_environment(&store, &rgw_req_state); + ip = rgw_req_state.env.find("aws:SourceIp"); + ASSERT_NE(ip, rgw_req_state.env.end()); + EXPECT_EQ(ip->second, "192.168.1.3"); + + rgw_env.set("HTTP_X_FORWARDED_FOR", "192.168.1.4, 4.3.2.1, 2001:db8:85a3:8d3:1319:8a2e:370:7348"); + rgw_req_state.env.clear(); + rgw_build_iam_environment(&store, &rgw_req_state); + ip = rgw_req_state.env.find("aws:SourceIp"); + ASSERT_NE(ip, rgw_req_state.env.end()); + EXPECT_EQ(ip->second, "192.168.1.4"); +} + +TEST_F(IPPolicyTest, ParseIPAddress) { + boost::optional<Policy> p; + + ASSERT_NO_THROW( + p = Policy(cct.get(), arbitrary_tenant, + bufferlist::static_from_string(ip_address_full_example), true)); + ASSERT_TRUE(p); + + EXPECT_EQ(p->text, ip_address_full_example); + EXPECT_EQ(p->version, Version::v2012_10_17); + EXPECT_EQ(*p->id, "S3IPPolicyTest"); + EXPECT_FALSE(p->statements.empty()); + EXPECT_EQ(p->statements.size(), 1U); + EXPECT_EQ(*p->statements[0].sid, "IPAllow"); + EXPECT_FALSE(p->statements[0].princ.empty()); + EXPECT_EQ(p->statements[0].princ.size(), 1U); + EXPECT_EQ(*p->statements[0].princ.begin(), + Principal::wildcard()); + EXPECT_TRUE(p->statements[0].noprinc.empty()); + EXPECT_EQ(p->statements[0].effect, Effect::Allow); + Action_t act; + act[s3ListBucket] = 1; + EXPECT_EQ(p->statements[0].action, act); + EXPECT_EQ(p->statements[0].notaction, None); + ASSERT_FALSE(p->statements[0].resource.empty()); + ASSERT_EQ(p->statements[0].resource.size(), 2U); + EXPECT_EQ(p->statements[0].resource.begin()->partition, Partition::aws); + EXPECT_EQ(p->statements[0].resource.begin()->service, Service::s3); + EXPECT_TRUE(p->statements[0].resource.begin()->region.empty()); + EXPECT_EQ(p->statements[0].resource.begin()->account, arbitrary_tenant); + EXPECT_EQ(p->statements[0].resource.begin()->resource, "example_bucket"); + EXPECT_EQ((p->statements[0].resource.begin() + 1)->resource, "example_bucket/*"); + EXPECT_TRUE(p->statements[0].notresource.empty()); + ASSERT_FALSE(p->statements[0].conditions.empty()); + ASSERT_EQ(p->statements[0].conditions.size(), 2U); + EXPECT_EQ(p->statements[0].conditions[0].op, TokenID::IpAddress); + EXPECT_EQ(p->statements[0].conditions[0].key, "aws:SourceIp"); + ASSERT_FALSE(p->statements[0].conditions[0].vals.empty()); + EXPECT_EQ(p->statements[0].conditions[0].vals.size(), 2U); + EXPECT_EQ(p->statements[0].conditions[0].vals[0], "192.168.1.0/24"); + EXPECT_EQ(p->statements[0].conditions[0].vals[1], "::1"); + boost::optional<rgw::IAM::MaskedIP> convertedIPv4 = rgw::IAM::Condition::as_network(p->statements[0].conditions[0].vals[0]); + EXPECT_TRUE(convertedIPv4.is_initialized()); + if (convertedIPv4.is_initialized()) { + EXPECT_EQ(*convertedIPv4, allowedIPv4Range); + } + + EXPECT_EQ(p->statements[0].conditions[1].op, TokenID::NotIpAddress); + EXPECT_EQ(p->statements[0].conditions[1].key, "aws:SourceIp"); + ASSERT_FALSE(p->statements[0].conditions[1].vals.empty()); + EXPECT_EQ(p->statements[0].conditions[1].vals.size(), 2U); + EXPECT_EQ(p->statements[0].conditions[1].vals[0], "192.168.1.1/32"); + EXPECT_EQ(p->statements[0].conditions[1].vals[1], "2001:0db8:85a3:0000:0000:8a2e:0370:7334"); + boost::optional<rgw::IAM::MaskedIP> convertedIPv6 = rgw::IAM::Condition::as_network(p->statements[0].conditions[1].vals[1]); + EXPECT_TRUE(convertedIPv6.is_initialized()); + if (convertedIPv6.is_initialized()) { + EXPECT_EQ(*convertedIPv6, allowedIPv6); + } +} + +TEST_F(IPPolicyTest, EvalIPAddress) { + auto allowp = + Policy(cct.get(), arbitrary_tenant, + bufferlist::static_from_string(ip_address_allow_example), true); + auto denyp = + Policy(cct.get(), arbitrary_tenant, + bufferlist::static_from_string(ip_address_deny_example), true); + auto fullp = + Policy(cct.get(), arbitrary_tenant, + bufferlist::static_from_string(ip_address_full_example), true); + Environment e; + Environment allowedIP, blocklistedIP, allowedIPv6, blocklistedIPv6; + allowedIP.emplace("aws:SourceIp","192.168.1.2"); + allowedIPv6.emplace("aws:SourceIp", "::1"); + blocklistedIP.emplace("aws:SourceIp", "192.168.1.1"); + blocklistedIPv6.emplace("aws:SourceIp", "2001:0db8:85a3:0000:0000:8a2e:0370:7334"); + + auto trueacct = FakeIdentity( + Principal::tenant("ACCOUNT-ID-WITHOUT-HYPHENS")); + // Without an IP address in the environment then evaluation will always pass + ARN arn1(Partition::aws, Service::s3, + "", arbitrary_tenant, "example_bucket"); + EXPECT_EQ(allowp.eval(e, trueacct, s3ListBucket, arn1), + Effect::Pass); + ARN arn2(Partition::aws, Service::s3, + "", arbitrary_tenant, "example_bucket/myobject"); + EXPECT_EQ(fullp.eval(e, trueacct, s3ListBucket, arn2), + Effect::Pass); + + ARN arn3(Partition::aws, Service::s3, + "", arbitrary_tenant, "example_bucket"); + EXPECT_EQ(allowp.eval(allowedIP, trueacct, s3ListBucket, arn3), + Effect::Allow); + ARN arn4(Partition::aws, Service::s3, + "", arbitrary_tenant, "example_bucket"); + EXPECT_EQ(allowp.eval(blocklistedIPv6, trueacct, s3ListBucket, arn4), + Effect::Pass); + + ARN arn5(Partition::aws, Service::s3, + "", arbitrary_tenant, "example_bucket"); + EXPECT_EQ(denyp.eval(allowedIP, trueacct, s3ListBucket, arn5), + Effect::Deny); + ARN arn6(Partition::aws, Service::s3, + "", arbitrary_tenant, "example_bucket/myobject"); + EXPECT_EQ(denyp.eval(allowedIP, trueacct, s3ListBucket, arn6), + Effect::Deny); + + ARN arn7(Partition::aws, Service::s3, + "", arbitrary_tenant, "example_bucket"); + EXPECT_EQ(denyp.eval(blocklistedIP, trueacct, s3ListBucket, arn7), + Effect::Pass); + ARN arn8(Partition::aws, Service::s3, + "", arbitrary_tenant, "example_bucket/myobject"); + EXPECT_EQ(denyp.eval(blocklistedIP, trueacct, s3ListBucket, arn8), + Effect::Pass); + + ARN arn9(Partition::aws, Service::s3, + "", arbitrary_tenant, "example_bucket"); + EXPECT_EQ(denyp.eval(blocklistedIPv6, trueacct, s3ListBucket, arn9), + Effect::Pass); + ARN arn10(Partition::aws, Service::s3, + "", arbitrary_tenant, "example_bucket/myobject"); + EXPECT_EQ(denyp.eval(blocklistedIPv6, trueacct, s3ListBucket, arn10), + Effect::Pass); + ARN arn11(Partition::aws, Service::s3, + "", arbitrary_tenant, "example_bucket"); + EXPECT_EQ(denyp.eval(allowedIPv6, trueacct, s3ListBucket, arn11), + Effect::Deny); + ARN arn12(Partition::aws, Service::s3, + "", arbitrary_tenant, "example_bucket/myobject"); + EXPECT_EQ(denyp.eval(allowedIPv6, trueacct, s3ListBucket, arn12), + Effect::Deny); + + ARN arn13(Partition::aws, Service::s3, + "", arbitrary_tenant, "example_bucket"); + EXPECT_EQ(fullp.eval(allowedIP, trueacct, s3ListBucket, arn13), + Effect::Allow); + ARN arn14(Partition::aws, Service::s3, + "", arbitrary_tenant, "example_bucket/myobject"); + EXPECT_EQ(fullp.eval(allowedIP, trueacct, s3ListBucket, arn14), + Effect::Allow); + + ARN arn15(Partition::aws, Service::s3, + "", arbitrary_tenant, "example_bucket"); + EXPECT_EQ(fullp.eval(blocklistedIP, trueacct, s3ListBucket, arn15), + Effect::Pass); + ARN arn16(Partition::aws, Service::s3, + "", arbitrary_tenant, "example_bucket/myobject"); + EXPECT_EQ(fullp.eval(blocklistedIP, trueacct, s3ListBucket, arn16), + Effect::Pass); + + ARN arn17(Partition::aws, Service::s3, + "", arbitrary_tenant, "example_bucket"); + EXPECT_EQ(fullp.eval(allowedIPv6, trueacct, s3ListBucket, arn17), + Effect::Allow); + ARN arn18(Partition::aws, Service::s3, + "", arbitrary_tenant, "example_bucket/myobject"); + EXPECT_EQ(fullp.eval(allowedIPv6, trueacct, s3ListBucket, arn18), + Effect::Allow); + + ARN arn19(Partition::aws, Service::s3, + "", arbitrary_tenant, "example_bucket"); + EXPECT_EQ(fullp.eval(blocklistedIPv6, trueacct, s3ListBucket, arn19), + Effect::Pass); + ARN arn20(Partition::aws, Service::s3, + "", arbitrary_tenant, "example_bucket/myobject"); + EXPECT_EQ(fullp.eval(blocklistedIPv6, trueacct, s3ListBucket, arn20), + Effect::Pass); +} + +string IPPolicyTest::ip_address_allow_example = R"( +{ + "Version": "2012-10-17", + "Id": "S3SimpleIPPolicyTest", + "Statement": [{ + "Sid": "1", + "Effect": "Allow", + "Principal": {"AWS": ["arn:aws:iam::ACCOUNT-ID-WITHOUT-HYPHENS:root"]}, + "Action": "s3:ListBucket", + "Resource": [ + "arn:aws:s3:::example_bucket" + ], + "Condition": { + "IpAddress": {"aws:SourceIp": "192.168.1.0/24"} + } + }] +} +)"; + +string IPPolicyTest::ip_address_deny_example = R"( +{ + "Version": "2012-10-17", + "Id": "S3IPPolicyTest", + "Statement": { + "Effect": "Deny", + "Sid": "IPDeny", + "Action": "s3:ListBucket", + "Principal": {"AWS": ["arn:aws:iam::ACCOUNT-ID-WITHOUT-HYPHENS:root"]}, + "Resource": [ + "arn:aws:s3:::example_bucket", + "arn:aws:s3:::example_bucket/*" + ], + "Condition": { + "NotIpAddress": {"aws:SourceIp": ["192.168.1.1/32", "2001:0db8:85a3:0000:0000:8a2e:0370:7334"]} + } + } +} +)"; + +string IPPolicyTest::ip_address_full_example = R"( +{ + "Version": "2012-10-17", + "Id": "S3IPPolicyTest", + "Statement": { + "Effect": "Allow", + "Sid": "IPAllow", + "Action": "s3:ListBucket", + "Principal": "*", + "Resource": [ + "arn:aws:s3:::example_bucket", + "arn:aws:s3:::example_bucket/*" + ], + "Condition": { + "IpAddress": {"aws:SourceIp": ["192.168.1.0/24", "::1"]}, + "NotIpAddress": {"aws:SourceIp": ["192.168.1.1/32", "2001:0db8:85a3:0000:0000:8a2e:0370:7334"]} + } + } +} +)"; + +TEST(MatchWildcards, Simple) +{ + EXPECT_TRUE(match_wildcards("", "")); + EXPECT_TRUE(match_wildcards("", "", MATCH_CASE_INSENSITIVE)); + EXPECT_FALSE(match_wildcards("", "abc")); + EXPECT_FALSE(match_wildcards("", "abc", MATCH_CASE_INSENSITIVE)); + EXPECT_FALSE(match_wildcards("abc", "")); + EXPECT_FALSE(match_wildcards("abc", "", MATCH_CASE_INSENSITIVE)); + EXPECT_TRUE(match_wildcards("abc", "abc")); + EXPECT_TRUE(match_wildcards("abc", "abc", MATCH_CASE_INSENSITIVE)); + EXPECT_FALSE(match_wildcards("abc", "abC")); + EXPECT_TRUE(match_wildcards("abc", "abC", MATCH_CASE_INSENSITIVE)); + EXPECT_FALSE(match_wildcards("abC", "abc")); + EXPECT_TRUE(match_wildcards("abC", "abc", MATCH_CASE_INSENSITIVE)); + EXPECT_FALSE(match_wildcards("abc", "abcd")); + EXPECT_FALSE(match_wildcards("abc", "abcd", MATCH_CASE_INSENSITIVE)); + EXPECT_FALSE(match_wildcards("abcd", "abc")); + EXPECT_FALSE(match_wildcards("abcd", "abc", MATCH_CASE_INSENSITIVE)); +} + +TEST(MatchWildcards, QuestionMark) +{ + EXPECT_FALSE(match_wildcards("?", "")); + EXPECT_FALSE(match_wildcards("?", "", MATCH_CASE_INSENSITIVE)); + EXPECT_TRUE(match_wildcards("?", "a")); + EXPECT_TRUE(match_wildcards("?", "a", MATCH_CASE_INSENSITIVE)); + EXPECT_TRUE(match_wildcards("?bc", "abc")); + EXPECT_TRUE(match_wildcards("?bc", "abc", MATCH_CASE_INSENSITIVE)); + EXPECT_TRUE(match_wildcards("a?c", "abc")); + EXPECT_TRUE(match_wildcards("a?c", "abc", MATCH_CASE_INSENSITIVE)); + EXPECT_FALSE(match_wildcards("abc", "a?c")); + EXPECT_FALSE(match_wildcards("abc", "a?c", MATCH_CASE_INSENSITIVE)); + EXPECT_FALSE(match_wildcards("a?c", "abC")); + EXPECT_TRUE(match_wildcards("a?c", "abC", MATCH_CASE_INSENSITIVE)); + EXPECT_TRUE(match_wildcards("ab?", "abc")); + EXPECT_TRUE(match_wildcards("ab?", "abc", MATCH_CASE_INSENSITIVE)); + EXPECT_TRUE(match_wildcards("a?c?e", "abcde")); + EXPECT_TRUE(match_wildcards("a?c?e", "abcde", MATCH_CASE_INSENSITIVE)); + EXPECT_TRUE(match_wildcards("???", "abc")); + EXPECT_TRUE(match_wildcards("???", "abc", MATCH_CASE_INSENSITIVE)); + EXPECT_FALSE(match_wildcards("???", "abcd")); + EXPECT_FALSE(match_wildcards("???", "abcd", MATCH_CASE_INSENSITIVE)); +} + +TEST(MatchWildcards, Asterisk) +{ + EXPECT_TRUE(match_wildcards("*", "")); + EXPECT_TRUE(match_wildcards("*", "", MATCH_CASE_INSENSITIVE)); + EXPECT_FALSE(match_wildcards("", "*")); + EXPECT_FALSE(match_wildcards("", "*", MATCH_CASE_INSENSITIVE)); + EXPECT_FALSE(match_wildcards("*a", "")); + EXPECT_FALSE(match_wildcards("*a", "", MATCH_CASE_INSENSITIVE)); + EXPECT_TRUE(match_wildcards("*a", "a")); + EXPECT_TRUE(match_wildcards("*a", "a", MATCH_CASE_INSENSITIVE)); + EXPECT_TRUE(match_wildcards("a*", "a")); + EXPECT_TRUE(match_wildcards("a*", "a", MATCH_CASE_INSENSITIVE)); + EXPECT_TRUE(match_wildcards("a*c", "ac")); + EXPECT_TRUE(match_wildcards("a*c", "ac", MATCH_CASE_INSENSITIVE)); + EXPECT_TRUE(match_wildcards("a*c", "abbc")); + EXPECT_TRUE(match_wildcards("a*c", "abbc", MATCH_CASE_INSENSITIVE)); + EXPECT_FALSE(match_wildcards("a*c", "abbC")); + EXPECT_TRUE(match_wildcards("a*c", "abbC", MATCH_CASE_INSENSITIVE)); + EXPECT_TRUE(match_wildcards("a*c*e", "abBce")); + EXPECT_TRUE(match_wildcards("a*c*e", "abBce", MATCH_CASE_INSENSITIVE)); + EXPECT_TRUE(match_wildcards("http://*.example.com", + "http://www.example.com")); + EXPECT_TRUE(match_wildcards("http://*.example.com", + "http://www.example.com", MATCH_CASE_INSENSITIVE)); + EXPECT_FALSE(match_wildcards("http://*.example.com", + "http://www.Example.com")); + EXPECT_TRUE(match_wildcards("http://*.example.com", + "http://www.Example.com", MATCH_CASE_INSENSITIVE)); + EXPECT_TRUE(match_wildcards("http://example.com/*", + "http://example.com/index.html")); + EXPECT_TRUE(match_wildcards("http://example.com/*/*.jpg", + "http://example.com/fun/smiley.jpg")); + // note: parsing of * is not greedy, so * does not match 'bc' here + EXPECT_FALSE(match_wildcards("a*c", "abcc")); + EXPECT_FALSE(match_wildcards("a*c", "abcc", MATCH_CASE_INSENSITIVE)); +} + +TEST(MatchPolicy, Action) +{ + constexpr auto flag = MATCH_POLICY_ACTION; + EXPECT_TRUE(match_policy("a:b:c", "a:b:c", flag)); + EXPECT_TRUE(match_policy("a:b:c", "A:B:C", flag)); // case insensitive + EXPECT_TRUE(match_policy("a:*:e", "a:bcd:e", flag)); + EXPECT_FALSE(match_policy("a:*", "a:b:c", flag)); // cannot span segments +} + +TEST(MatchPolicy, Resource) +{ + constexpr auto flag = MATCH_POLICY_RESOURCE; + EXPECT_TRUE(match_policy("a:b:c", "a:b:c", flag)); + EXPECT_FALSE(match_policy("a:b:c", "A:B:C", flag)); // case sensitive + EXPECT_TRUE(match_policy("a:*:e", "a:bcd:e", flag)); + EXPECT_TRUE(match_policy("a:*", "a:b:c", flag)); // can span segments +} + +TEST(MatchPolicy, ARN) +{ + constexpr auto flag = MATCH_POLICY_ARN; + EXPECT_TRUE(match_policy("a:b:c", "a:b:c", flag)); + EXPECT_TRUE(match_policy("a:b:c", "A:B:C", flag)); // case insensitive + EXPECT_TRUE(match_policy("a:*:e", "a:bcd:e", flag)); + EXPECT_FALSE(match_policy("a:*", "a:b:c", flag)); // cannot span segments +} + +TEST(MatchPolicy, String) +{ + constexpr auto flag = MATCH_POLICY_STRING; + EXPECT_TRUE(match_policy("a:b:c", "a:b:c", flag)); + EXPECT_FALSE(match_policy("a:b:c", "A:B:C", flag)); // case sensitive + EXPECT_TRUE(match_policy("a:*:e", "a:bcd:e", flag)); + EXPECT_TRUE(match_policy("a:*", "a:b:c", flag)); // can span segments +} + +Action_t set_range_bits(std::uint64_t start, std::uint64_t end) +{ + Action_t result; + for (uint64_t i = start; i < end; i++) { + result.set(i); + } + return result; +} + +using rgw::IAM::s3AllValue; +using rgw::IAM::stsAllValue; +using rgw::IAM::allValue; +using rgw::IAM::iamAllValue; +TEST(set_cont_bits, iamconsts) +{ + EXPECT_EQ(s3AllValue, set_range_bits(0, s3All)); + EXPECT_EQ(iamAllValue, set_range_bits(s3All+1, iamAll)); + EXPECT_EQ(stsAllValue, set_range_bits(iamAll+1, stsAll)); + EXPECT_EQ(allValue , set_range_bits(0, allCount)); +} diff --git a/src/test/rgw/test_rgw_kms.cc b/src/test/rgw/test_rgw_kms.cc new file mode 100644 index 000000000..49ee747f7 --- /dev/null +++ b/src/test/rgw/test_rgw_kms.cc @@ -0,0 +1,294 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#include <gtest/gtest.h> +#include <gmock/gmock.h> +#include "common/ceph_context.h" +#include "rgw_common.h" +#define FORTEST_VIRTUAL virtual +#include "rgw_kms.cc" + +using ::testing::_; +using ::testing::Action; +using ::testing::ActionInterface; +using ::testing::MakeAction; +using ::testing::StrEq; + + +class MockTransitSecretEngine : public TransitSecretEngine { + +public: + MockTransitSecretEngine(CephContext *cct, SSEContext & kctx, EngineParmMap parms) : TransitSecretEngine(cct, kctx, parms){} + + MOCK_METHOD(int, send_request, (const DoutPrefixProvider *dpp, const char *method, std::string_view infix, std::string_view key_id, const std::string& postdata, bufferlist &bl), (override)); + +}; + +class MockKvSecretEngine : public KvSecretEngine { + +public: + MockKvSecretEngine(CephContext *cct, SSEContext & kctx, EngineParmMap parms) : KvSecretEngine(cct, kctx, parms){} + + MOCK_METHOD(int, send_request, (const DoutPrefixProvider *dpp, const char *method, std::string_view infix, std::string_view key_id, const std::string& postdata, bufferlist &bl), (override)); + +}; + +class TestSSEKMS : public ::testing::Test { + +protected: + CephContext *cct; + MockTransitSecretEngine* old_engine; + MockKvSecretEngine* kv_engine; + MockTransitSecretEngine* transit_engine; + + void SetUp() override { + EngineParmMap old_parms, kv_parms, new_parms; + cct = (new CephContext(CEPH_ENTITY_TYPE_ANY))->get(); + KMSContext kctx { cct }; + old_parms["compat"] = "2"; + old_engine = new MockTransitSecretEngine(cct, kctx, std::move(old_parms)); + kv_engine = new MockKvSecretEngine(cct, kctx, std::move(kv_parms)); + new_parms["compat"] = "1"; + transit_engine = new MockTransitSecretEngine(cct, kctx, std::move(new_parms)); + } + + void TearDown() { + delete old_engine; + delete kv_engine; + delete transit_engine; + } + +}; + + +TEST_F(TestSSEKMS, vault_token_file_unset) +{ + cct->_conf.set_val("rgw_crypt_vault_auth", "token"); + EngineParmMap old_parms, kv_parms; + KMSContext kctx { cct }; + TransitSecretEngine te(cct, kctx, std::move(old_parms)); + KvSecretEngine kv(cct, kctx, std::move(kv_parms)); + const NoDoutPrefix no_dpp(cct, 1); + + std::string_view key_id("my_key"); + std::string actual_key; + + ASSERT_EQ(te.get_key(&no_dpp, key_id, actual_key), -EINVAL); + ASSERT_EQ(kv.get_key(&no_dpp, key_id, actual_key), -EINVAL); +} + + +TEST_F(TestSSEKMS, non_existent_vault_token_file) +{ + cct->_conf.set_val("rgw_crypt_vault_auth", "token"); + cct->_conf.set_val("rgw_crypt_vault_token_file", "/nonexistent/file"); + EngineParmMap old_parms, kv_parms; + KMSContext kctx { cct }; + TransitSecretEngine te(cct, kctx, std::move(old_parms)); + KvSecretEngine kv(cct, kctx, std::move(kv_parms)); + const NoDoutPrefix no_dpp(cct, 1); + + std::string_view key_id("my_key/1"); + std::string actual_key; + + ASSERT_EQ(te.get_key(&no_dpp, key_id, actual_key), -ENOENT); + ASSERT_EQ(kv.get_key(&no_dpp, key_id, actual_key), -ENOENT); +} + + +typedef int SendRequestMethod(const DoutPrefixProvider *dpp, const char *, + std::string_view, std::string_view, + const std::string &, bufferlist &); + +class SetPointedValueAction : public ActionInterface<SendRequestMethod> { + public: + std::string json; + + SetPointedValueAction(std::string json){ + this->json = json; + } + + int Perform(const ::std::tuple<const DoutPrefixProvider*, const char *, std::string_view, std::string_view, const std::string &, bufferlist &>& args) override { +// const DoutPrefixProvider *dpp = ::std::get<0>(args); +// const char *method = ::std::get<1>(args); +// std::string_view infix = ::std::get<2>(args); +// std::string_view key_id = ::std::get<3>(args); +// const std::string& postdata = ::std::get<4>(args); + bufferlist& bl = ::std::get<5>(args); + +// std::cout << "method = " << method << " infix = " << infix << " key_id = " << key_id +// << " postdata = " << postdata +// << " => json = " << json +// << std::endl; + + bl.append(json); + // note: in the bufferlist, the string is not + // necessarily 0 terminated at this point. Logic in + // rgw_kms.cc must handle this (by appending a 0.) + return 0; + } +}; + +Action<SendRequestMethod> SetPointedValue(std::string json) { + return MakeAction(new SetPointedValueAction(json)); +} + + +TEST_F(TestSSEKMS, test_transit_key_version_extraction){ + const NoDoutPrefix no_dpp(cct, 1); + string json = R"({"data": {"keys": {"6": "8qgPWvdtf6zrriS5+nkOzDJ14IGVR6Bgkub5dJn6qeg="}}})"; + EXPECT_CALL(*old_engine, send_request(&no_dpp, StrEq("GET"), StrEq(""), StrEq("1/2/3/4/5/6"), StrEq(""), _)).WillOnce(SetPointedValue(json)); + + std::string actual_key; + std::string tests[11] {"/", "my_key/", "my_key", "", "my_key/a", "my_key/1a", + "my_key/a1", "my_key/1a1", "my_key/1/a", "1", "my_key/1/" + }; + + int res; + for (const auto &test: tests) { + res = old_engine->get_key(&no_dpp, std::string_view(test), actual_key); + ASSERT_EQ(res, -EINVAL); + } + + res = old_engine->get_key(&no_dpp, std::string_view("1/2/3/4/5/6"), actual_key); + ASSERT_EQ(res, 0); + ASSERT_EQ(actual_key, from_base64("8qgPWvdtf6zrriS5+nkOzDJ14IGVR6Bgkub5dJn6qeg=")); +} + + +TEST_F(TestSSEKMS, test_transit_backend){ + + std::string_view my_key("my_key/1"); + std::string actual_key; + + // Mocks the expected return Value from Vault Server using custom Argument Action + string json = R"({"data": {"keys": {"1": "8qgPWvdtf6zrriS5+nkOzDJ14IGVR6Bgkub5dJn6qeg="}}})"; + const NoDoutPrefix no_dpp(cct, 1); + EXPECT_CALL(*old_engine, send_request(&no_dpp, StrEq("GET"), StrEq(""), StrEq("my_key/1"), StrEq(""), _)).WillOnce(SetPointedValue(json)); + + int res = old_engine->get_key(&no_dpp, my_key, actual_key); + + ASSERT_EQ(res, 0); + ASSERT_EQ(actual_key, from_base64("8qgPWvdtf6zrriS5+nkOzDJ14IGVR6Bgkub5dJn6qeg=")); +} + + +TEST_F(TestSSEKMS, test_transit_makekey){ + + std::string_view my_key("my_key"); + std::string actual_key; + map<string, bufferlist> attrs; + const NoDoutPrefix no_dpp(cct, 1); + + // Mocks the expected return Value from Vault Server using custom Argument Action + string post_json = R"({"data": {"ciphertext": "vault:v2:HbdxLnUztGVo+RseCIaYVn/4wEUiJNT6GQfw57KXQmhXVe7i1/kgLWegEPg1I6lexhIuXAM6Q2YvY0aZ","key_version": 1,"plaintext": "3xfTra/dsIf3TMa3mAT2IxPpM7YWm/NvUb4gDfSDX4g="}})"; + EXPECT_CALL(*transit_engine, send_request(&no_dpp, StrEq("POST"), StrEq("/datakey/plaintext/"), StrEq("my_key"), _, _)) + .WillOnce(SetPointedValue(post_json)); + + set_attr(attrs, RGW_ATTR_CRYPT_CONTEXT, R"({"aws:s3:arn": "fred"})"); + set_attr(attrs, RGW_ATTR_CRYPT_KEYID, my_key); + + int res = transit_engine->make_actual_key(&no_dpp, attrs, actual_key); + std::string cipher_text { get_str_attribute(attrs,RGW_ATTR_CRYPT_DATAKEY) }; + + ASSERT_EQ(res, 0); + ASSERT_EQ(actual_key, from_base64("3xfTra/dsIf3TMa3mAT2IxPpM7YWm/NvUb4gDfSDX4g=")); + ASSERT_EQ(cipher_text, "vault:v2:HbdxLnUztGVo+RseCIaYVn/4wEUiJNT6GQfw57KXQmhXVe7i1/kgLWegEPg1I6lexhIuXAM6Q2YvY0aZ"); +} + +TEST_F(TestSSEKMS, test_transit_reconstitutekey){ + + std::string_view my_key("my_key"); + std::string actual_key; + map<string, bufferlist> attrs; + const NoDoutPrefix no_dpp(cct, 1); + + // Mocks the expected return Value from Vault Server using custom Argument Action + set_attr(attrs, RGW_ATTR_CRYPT_DATAKEY, "vault:v2:HbdxLnUztGVo+RseCIaYVn/4wEUiJNT6GQfw57KXQmhXVe7i1/kgLWegEPg1I6lexhIuXAM6Q2YvY0aZ"); + string post_json = R"({"data": {"key_version": 1,"plaintext": "3xfTra/dsIf3TMa3mAT2IxPpM7YWm/NvUb4gDfSDX4g="}})"; + EXPECT_CALL(*transit_engine, send_request(&no_dpp, StrEq("POST"), StrEq("/decrypt/"), StrEq("my_key"), _, _)) + .WillOnce(SetPointedValue(post_json)); + + set_attr(attrs, RGW_ATTR_CRYPT_CONTEXT, R"({"aws:s3:arn": "fred"})"); + set_attr(attrs, RGW_ATTR_CRYPT_KEYID, my_key); + + int res = transit_engine->reconstitute_actual_key(&no_dpp, attrs, actual_key); + + ASSERT_EQ(res, 0); + ASSERT_EQ(actual_key, from_base64("3xfTra/dsIf3TMa3mAT2IxPpM7YWm/NvUb4gDfSDX4g=")); +} + +TEST_F(TestSSEKMS, test_kv_backend){ + + std::string_view my_key("my_key"); + std::string actual_key; + const NoDoutPrefix no_dpp(cct, 1); + + // Mocks the expected return value from Vault Server using custom Argument Action + string json = R"({"data": {"data": {"key": "8qgPWvdtf6zrriS5+nkOzDJ14IGVR6Bgkub5dJn6qeg="}}})"; + EXPECT_CALL(*kv_engine, send_request(&no_dpp, StrEq("GET"), StrEq(""), StrEq("my_key"), StrEq(""), _)) + .WillOnce(SetPointedValue(json)); + + int res = kv_engine->get_key(&no_dpp, my_key, actual_key); + + ASSERT_EQ(res, 0); + ASSERT_EQ(actual_key, from_base64("8qgPWvdtf6zrriS5+nkOzDJ14IGVR6Bgkub5dJn6qeg=")); +} + + +TEST_F(TestSSEKMS, concat_url) +{ + // Each test has 3 strings: + // * the base URL + // * the path we want to concatenate + // * the exepected final URL + std::string tests[9][3] ={ + {"", "", ""}, + {"", "bar", "/bar"}, + {"", "/bar", "/bar"}, + {"foo", "", "foo"}, + {"foo", "bar", "foo/bar"}, + {"foo", "/bar", "foo/bar"}, + {"foo/", "", "foo/"}, + {"foo/", "bar", "foo/bar"}, + {"foo/", "/bar", "foo/bar"}, + }; + for (const auto &test: tests) { + std::string url(test[0]), path(test[1]), expected(test[2]); + concat_url(url, path); + ASSERT_EQ(url, expected); + } +} + + +TEST_F(TestSSEKMS, string_ends_maybe_slash) +{ + struct { std::string hay, needle; bool expected; } tests[] ={ + {"jack here", "fred", false}, + {"here is a fred", "fred", true}, + {"and a fred/", "fred", true}, + {"no fred here", "fred", false}, + {"double fred//", "fred", true}, + }; + for (const auto &test: tests) { + bool expected { string_ends_maybe_slash(test.hay, test.needle) }; + ASSERT_EQ(expected, test.expected); + } +} + + +TEST_F(TestSSEKMS, test_transit_backend_empty_response) +{ + std::string_view my_key("/key/nonexistent/1"); + std::string actual_key; + const NoDoutPrefix no_dpp(cct, 1); + + // Mocks the expected return Value from Vault Server using custom Argument Action + string json = R"({"errors": ["version does not exist or cannot be found"]})"; + EXPECT_CALL(*old_engine, send_request(&no_dpp, StrEq("GET"), StrEq(""), StrEq("/key/nonexistent/1"), StrEq(""), _)).WillOnce(SetPointedValue(json)); + + int res = old_engine->get_key(&no_dpp, my_key, actual_key); + + ASSERT_EQ(res, -EINVAL); + ASSERT_EQ(actual_key, from_base64("")); +} diff --git a/src/test/rgw/test_rgw_lc.cc b/src/test/rgw/test_rgw_lc.cc new file mode 100644 index 000000000..83a4cac67 --- /dev/null +++ b/src/test/rgw/test_rgw_lc.cc @@ -0,0 +1,109 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#include "rgw_xml.h" +#include "rgw_lc.h" +#include "rgw_lc_s3.h" +#include <gtest/gtest.h> +//#include <spawn/spawn.hpp> +#include <string> +#include <vector> +#include <stdexcept> + +static const char* xmldoc_1 = +R"(<Filter> + <And> + <Prefix>tax/</Prefix> + <Tag> + <Key>key1</Key> + <Value>value1</Value> + </Tag> + <Tag> + <Key>key2</Key> + <Value>value2</Value> + </Tag> + </And> +</Filter> +)"; + +TEST(TestLCFilterDecoder, XMLDoc1) +{ + RGWXMLDecoder::XMLParser parser; + ASSERT_TRUE(parser.init()); + ASSERT_TRUE(parser.parse(xmldoc_1, strlen(xmldoc_1), 1)); + LCFilter_S3 filter; + auto result = RGWXMLDecoder::decode_xml("Filter", filter, &parser, true); + ASSERT_TRUE(result); + /* check repeated Tag element */ + auto tag_map = filter.get_tags().get_tags(); + auto val1 = tag_map.find("key1"); + ASSERT_EQ(val1->second, "value1"); + auto val2 = tag_map.find("key2"); + ASSERT_EQ(val2->second, "value2"); + /* check our flags */ + ASSERT_EQ(filter.get_flags(), 0); +} + +static const char* xmldoc_2 = +R"(<Filter> + <And> + <ArchiveZone /> + <Tag> + <Key>spongebob</Key> + <Value>squarepants</Value> + </Tag> + </And> +</Filter> +)"; + +TEST(TestLCFilterDecoder, XMLDoc2) +{ + RGWXMLDecoder::XMLParser parser; + ASSERT_TRUE(parser.init()); + ASSERT_TRUE(parser.parse(xmldoc_2, strlen(xmldoc_2), 1)); + LCFilter_S3 filter; + auto result = RGWXMLDecoder::decode_xml("Filter", filter, &parser, true); + ASSERT_TRUE(result); + /* check tags */ + auto tag_map = filter.get_tags().get_tags(); + auto val1 = tag_map.find("spongebob"); + ASSERT_EQ(val1->second, "squarepants"); + /* check our flags */ + ASSERT_EQ(filter.get_flags(), LCFilter::make_flag(LCFlagType::ArchiveZone)); +} + +// invalid And element placement +static const char* xmldoc_3 = +R"(<Filter> + <And> + <Tag> + <Key>miles</Key> + <Value>davis</Value> + </Tag> + </And> + <Tag> + <Key>spongebob</Key> + <Value>squarepants</Value> + </Tag> +</Filter> +)"; + +TEST(TestLCFilterInvalidAnd, XMLDoc3) +{ + RGWXMLDecoder::XMLParser parser; + ASSERT_TRUE(parser.init()); + ASSERT_TRUE(parser.parse(xmldoc_3, strlen(xmldoc_3), 1)); + LCFilter_S3 filter; + auto result = RGWXMLDecoder::decode_xml("Filter", filter, &parser, true); + ASSERT_TRUE(result); + /* check repeated Tag element */ + auto tag_map = filter.get_tags().get_tags(); + auto val1 = tag_map.find("spongebob"); + ASSERT_TRUE(val1 == tag_map.end()); + /* because the invalid 2nd tag element was not recognized, + * we cannot access it: + ASSERT_EQ(val1->second, "squarepants"); + */ + /* check our flags */ + ASSERT_EQ(filter.get_flags(), uint32_t(LCFlagType::none)); +} diff --git a/src/test/rgw/test_rgw_lua.cc b/src/test/rgw/test_rgw_lua.cc new file mode 100644 index 000000000..a539c025b --- /dev/null +++ b/src/test/rgw/test_rgw_lua.cc @@ -0,0 +1,1338 @@ +#include <gtest/gtest.h> +#include "common/ceph_context.h" +#include "rgw_common.h" +#include "rgw_auth_registry.h" +#include "rgw_process_env.h" +#include "rgw_sal_rados.h" +#include "rgw_lua_request.h" +#include "rgw_lua_background.h" +#include "rgw_lua_data_filter.h" + +using namespace std; +using namespace rgw; +using boost::container::flat_set; +using rgw::auth::Identity; +using rgw::auth::Principal; + +class CctCleaner { + CephContext* cct; +public: + CctCleaner(CephContext* _cct) : cct(_cct) {} + ~CctCleaner() { +#ifdef WITH_SEASTAR + delete cct; +#else + cct->put(); +#endif + } +}; + +class FakeIdentity : public Identity { +public: + FakeIdentity() = default; + + uint32_t get_perms_from_aclspec(const DoutPrefixProvider* dpp, const aclspec_t& aclspec) const override { + return 0; + }; + + bool is_admin_of(const rgw_user& uid) const override { + return false; + } + + bool is_owner_of(const rgw_user& uid) const override { + return false; + } + + virtual uint32_t get_perm_mask() const override { + return 0; + } + + uint32_t get_identity_type() const override { + return TYPE_RGW; + } + + string get_acct_name() const override { + return ""; + } + + string get_subuser() const override { + return ""; + } + + void to_str(std::ostream& out) const override { + return; + } + + bool is_identity(const flat_set<Principal>& ids) const override { + return false; + } +}; + +class TestUser : public sal::StoreUser { +public: + virtual std::unique_ptr<User> clone() override { + return std::unique_ptr<User>(new TestUser(*this)); + } + + virtual int list_buckets(const DoutPrefixProvider *dpp, const string&, const string&, uint64_t, bool, sal::BucketList&, optional_yield y) override { + return 0; + } + + virtual int create_bucket(const DoutPrefixProvider* dpp, const rgw_bucket& b, const std::string& zonegroup_id, rgw_placement_rule& placement_rule, std::string& swift_ver_location, const RGWQuotaInfo* pquota_info, const RGWAccessControlPolicy& policy, sal::Attrs& attrs, RGWBucketInfo& info, obj_version& ep_objv, bool exclusive, bool obj_lock_enabled, bool* existed, req_info& req_info, std::unique_ptr<sal::Bucket>* bucket, optional_yield y) override { + return 0; + } + + virtual int read_attrs(const DoutPrefixProvider *dpp, optional_yield y) override { + return 0; + } + + virtual int read_stats(const DoutPrefixProvider *dpp, optional_yield y, RGWStorageStats* stats, ceph::real_time *last_stats_sync, ceph::real_time *last_stats_update) override { + return 0; + } + + virtual int read_stats_async(const DoutPrefixProvider *dpp, RGWGetUserStats_CB *cb) override { + return 0; + } + + virtual int complete_flush_stats(const DoutPrefixProvider *dpp, optional_yield y) override { + return 0; + } + + virtual int read_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch, uint32_t max_entries, bool *is_truncated, RGWUsageIter& usage_iter, map<rgw_user_bucket, rgw_usage_log_entry>& usage) override { + return 0; + } + + virtual int trim_usage(const DoutPrefixProvider *dpp, uint64_t start_epoch, uint64_t end_epoch) override { + return 0; + } + + virtual int load_user(const DoutPrefixProvider *dpp, optional_yield y) override { + return 0; + } + + virtual int store_user(const DoutPrefixProvider* dpp, optional_yield y, bool exclusive, RGWUserInfo* old_info) override { + return 0; + } + + virtual int remove_user(const DoutPrefixProvider* dpp, optional_yield y) override { + return 0; + } + virtual int merge_and_store_attrs(const DoutPrefixProvider *dpp, rgw::sal::Attrs& attrs, optional_yield y) override { + return 0; + } + virtual int verify_mfa(const std::string& mfa_str, bool* verified, const DoutPrefixProvider* dpp, optional_yield y) override { + return 0; + } + virtual ~TestUser() = default; +}; + +class TestAccounter : public io::Accounter, public io::BasicClient { + RGWEnv env; + +protected: + virtual int init_env(CephContext *cct) override { + return 0; + } + +public: + ~TestAccounter() = default; + + virtual void set_account(bool enabled) override { + } + + virtual uint64_t get_bytes_sent() const override { + return 0; + } + + virtual uint64_t get_bytes_received() const override { + return 0; + } + + virtual RGWEnv& get_env() noexcept override { + return env; + } + + virtual size_t complete_request() override { + return 0; + } +}; + +auto g_cct = new CephContext(CEPH_ENTITY_TYPE_CLIENT); + +CctCleaner cleaner(g_cct); + +tracing::Tracer tracer; + +#define DEFINE_REQ_STATE RGWProcessEnv pe; RGWEnv e; req_state s(g_cct, pe, &e, 0); +#define INIT_TRACE tracer.init("test"); \ + s.trace = tracer.start_trace("test", true); + +TEST(TestRGWLua, EmptyScript) +{ + const std::string script; + + DEFINE_REQ_STATE; + + const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, script); + ASSERT_EQ(rc, 0); +} + +TEST(TestRGWLua, SyntaxError) +{ + const std::string script = R"( + if 3 < 5 then + RGWDebugLog("missing 'end'") + )"; + + DEFINE_REQ_STATE; + + const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, script); + ASSERT_EQ(rc, -1); +} + +TEST(TestRGWLua, Hello) +{ + const std::string script = R"( + RGWDebugLog("hello from lua") + )"; + + DEFINE_REQ_STATE; + + const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, script); + ASSERT_EQ(rc, 0); +} + +TEST(TestRGWLua, RGWDebugLogNumber) +{ + const std::string script = R"( + RGWDebugLog(1234567890) + )"; + + DEFINE_REQ_STATE; + + const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, script); + ASSERT_EQ(rc, 0); +} + +TEST(TestRGWLua, RGWDebugNil) +{ + const std::string script = R"( + RGWDebugLog(nil) + )"; + + DEFINE_REQ_STATE; + + const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, script); + ASSERT_EQ(rc, -1); +} + +TEST(TestRGWLua, URI) +{ + const std::string script = R"( + RGWDebugLog(Request.DecodedURI) + assert(Request.DecodedURI == "http://hello.world/") + )"; + + DEFINE_REQ_STATE; + s.decoded_uri = "http://hello.world/"; + + const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, script); + ASSERT_EQ(rc, 0); +} + +TEST(TestRGWLua, Response) +{ + const std::string script = R"( + assert(Request.Response.Message == "This is a bad request") + assert(Request.Response.HTTPStatus == "Bad Request") + assert(Request.Response.RGWCode == 4000) + assert(Request.Response.HTTPStatusCode == 400) + )"; + + DEFINE_REQ_STATE; + s.err.http_ret = 400; + s.err.ret = 4000; + s.err.err_code = "Bad Request"; + s.err.message = "This is a bad request"; + + const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, script); + ASSERT_EQ(rc, 0); +} + +TEST(TestRGWLua, SetResponse) +{ + const std::string script = R"( + assert(Request.Response.Message == "this is a bad request") + Request.Response.Message = "this is a good request" + assert(Request.Response.Message == "this is a good request") + )"; + + DEFINE_REQ_STATE; + s.err.message = "this is a bad request"; + + const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, script); + ASSERT_EQ(rc, 0); +} + +TEST(TestRGWLua, RGWIdNotWriteable) +{ + const std::string script = R"( + assert(Request.RGWId == "foo") + Request.RGWId = "bar" + )"; + + DEFINE_REQ_STATE; + s.host_id = "foo"; + + const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, script); + ASSERT_NE(rc, 0); +} + +TEST(TestRGWLua, InvalidField) +{ + const std::string script = R"( + RGWDebugLog(Request.Kaboom) + )"; + + DEFINE_REQ_STATE; + s.host_id = "foo"; + + const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, script); + ASSERT_EQ(rc, -1); +} + +TEST(TestRGWLua, InvalidSubField) +{ + const std::string script = R"( + RGWDebugLog(Request.Error.Kaboom) + )"; + + DEFINE_REQ_STATE; + + const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, script); + ASSERT_EQ(rc, -1); +} + +TEST(TestRGWLua, Bucket) +{ + const std::string script = R"( + assert(Request.Bucket) + RGWDebugLog("Bucket Id: " .. Request.Bucket.Id) + assert(Request.Bucket.Marker == "mymarker") + assert(Request.Bucket.Name == "myname") + assert(Request.Bucket.Tenant == "mytenant") + assert(Request.Bucket.Count == 0) + assert(Request.Bucket.Size == 0) + assert(Request.Bucket.ZoneGroupId) + assert(Request.Bucket.CreationTime) + assert(Request.Bucket.MTime) + assert(Request.Bucket.Quota.MaxSize == -1) + assert(Request.Bucket.Quota.MaxObjects == -1) + assert(tostring(Request.Bucket.Quota.Enabled)) + assert(tostring(Request.Bucket.Quota.Rounded)) + assert(Request.Bucket.User.Id) + assert(Request.Bucket.User.Tenant) + )"; + + DEFINE_REQ_STATE; + + rgw_bucket b; + b.tenant = "mytenant"; + b.name = "myname"; + b.marker = "mymarker"; + b.bucket_id = "myid"; + s.bucket.reset(new sal::RadosBucket(nullptr, b)); + + const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, script); + ASSERT_EQ(rc, 0); +} + +TEST(TestRGWLua, WriteBucket) +{ + const std::string script = R"( + assert(Request.Bucket) + assert(Request.Bucket.Name == "myname") + Request.Bucket.Name = "othername" + )"; + + DEFINE_REQ_STATE; + s.init_state.url_bucket = "myname"; + + const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, script); + ASSERT_EQ(rc, 0); + ASSERT_EQ(s.init_state.url_bucket, "othername"); +} + +TEST(TestRGWLua, WriteBucketFail) +{ + const std::string script = R"( + assert(Request.Bucket) + assert(Request.Bucket.Name == "myname") + Request.Bucket.Name = "othername" + )"; + + DEFINE_REQ_STATE; + rgw_bucket b; + b.name = "myname"; + s.bucket.reset(new sal::RadosBucket(nullptr, b)); + + const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, script); + ASSERT_NE(rc, 0); +} + +TEST(TestRGWLua, GenericAttributes) +{ + const std::string script = R"( + assert(Request.GenericAttributes["hello"] == "world") + assert(Request.GenericAttributes["foo"] == "bar") + assert(Request.GenericAttributes["kaboom"] == nil) + assert(#Request.GenericAttributes == 4) + for k, v in pairs(Request.GenericAttributes) do + assert(k) + assert(v) + end + )"; + + DEFINE_REQ_STATE; + s.generic_attrs["hello"] = "world"; + s.generic_attrs["foo"] = "bar"; + s.generic_attrs["goodbye"] = "cruel world"; + s.generic_attrs["ka"] = "boom"; + + const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, script); + ASSERT_EQ(rc, 0); +} + +TEST(TestRGWLua, Environment) +{ + const std::string script = R"( + assert(Request.Environment[""] == "bar") + assert(Request.Environment["goodbye"] == "cruel world") + assert(Request.Environment["ka"] == "boom") + assert(#Request.Environment == 3, #Request.Environment) + for k, v in pairs(Request.Environment) do + assert(k) + assert(v) + end + )"; + + DEFINE_REQ_STATE; + s.env.emplace("", "bar"); + s.env.emplace("goodbye", "cruel world"); + s.env.emplace("ka", "boom"); + + const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, script); + ASSERT_EQ(rc, 0); +} + +TEST(TestRGWLua, Tags) +{ + const std::string script = R"( + assert(#Request.Tags == 4) + assert(Request.Tags["foo"] == "bar") + for k, v in pairs(Request.Tags) do + assert(k) + assert(v) + end + )"; + + DEFINE_REQ_STATE; + s.tagset.add_tag("hello", "world"); + s.tagset.add_tag("foo", "bar"); + s.tagset.add_tag("goodbye", "cruel world"); + s.tagset.add_tag("ka", "boom"); + + const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, script); + ASSERT_EQ(rc, 0); +} + +TEST(TestRGWLua, TagsNotWriteable) +{ + const std::string script = R"( + Request.Tags["hello"] = "goodbye" + )"; + + DEFINE_REQ_STATE; + s.tagset.add_tag("hello", "world"); + + const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, script); + ASSERT_NE(rc, 0); +} + +TEST(TestRGWLua, Metadata) +{ + const std::string script = R"( + assert(#Request.HTTP.Metadata == 3) + for k, v in pairs(Request.HTTP.Metadata) do + assert(k) + assert(v) + end + assert(Request.HTTP.Metadata["hello"] == "world") + assert(Request.HTTP.Metadata["kaboom"] == nil) + Request.HTTP.Metadata["hello"] = "goodbye" + Request.HTTP.Metadata["kaboom"] = "boom" + assert(#Request.HTTP.Metadata == 4) + assert(Request.HTTP.Metadata["hello"] == "goodbye") + assert(Request.HTTP.Metadata["kaboom"] == "boom") + )"; + + DEFINE_REQ_STATE; + s.info.x_meta_map["hello"] = "world"; + s.info.x_meta_map["foo"] = "bar"; + s.info.x_meta_map["ka"] = "boom"; + + const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, script); + ASSERT_EQ(rc, 0); +} + +TEST(TestRGWLua, Acl) +{ + const std::string script = R"( + function print_grant(g) + print("Grant Type: " .. g.Type) + print("Grant Group Type: " .. g.GroupType) + print("Grant Referer: " .. g.Referer) + if (g.User) then + print("Grant User.Tenant: " .. g.User.Tenant) + print("Grant User.Id: " .. g.User.Id) + end + end + + assert(Request.UserAcl.Owner.DisplayName == "jack black", Request.UserAcl.Owner.DisplayName) + assert(Request.UserAcl.Owner.User.Id == "black", Request.UserAcl.Owner.User.Id) + assert(Request.UserAcl.Owner.User.Tenant == "jack", Request.UserAcl.Owner.User.Tenant) + assert(#Request.UserAcl.Grants == 5) + print_grant(Request.UserAcl.Grants[""]) + for k, v in pairs(Request.UserAcl.Grants) do + print_grant(v) + if k == "john$doe" then + assert(v.Permission == 4) + elseif k == "jane$doe" then + assert(v.Permission == 1) + else + assert(false) + end + end + )"; + + DEFINE_REQ_STATE; + ACLOwner owner; + owner.set_id(rgw_user("jack", "black")); + owner.set_name("jack black"); + s.user_acl.reset(new RGWAccessControlPolicy(g_cct)); + s.user_acl->set_owner(owner); + ACLGrant grant1, grant2, grant3, grant4, grant5; + grant1.set_canon(rgw_user("jane", "doe"), "her grant", 1); + grant2.set_group(ACL_GROUP_ALL_USERS ,2); + grant3.set_referer("http://localhost/ref2", 3); + grant4.set_canon(rgw_user("john", "doe"), "his grant", 4); + grant5.set_group(ACL_GROUP_AUTHENTICATED_USERS, 5); + s.user_acl->get_acl().add_grant(&grant1); + s.user_acl->get_acl().add_grant(&grant2); + s.user_acl->get_acl().add_grant(&grant3); + s.user_acl->get_acl().add_grant(&grant4); + s.user_acl->get_acl().add_grant(&grant5); + const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, script); + ASSERT_EQ(rc, 0); +} + +TEST(TestRGWLua, User) +{ + const std::string script = R"( + assert(Request.User) + assert(Request.User.Id == "myid") + assert(Request.User.Tenant == "mytenant") + )"; + + DEFINE_REQ_STATE; + + rgw_user u; + u.tenant = "mytenant"; + u.id = "myid"; + s.user.reset(new sal::RadosUser(nullptr, u)); + + const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, script); + ASSERT_EQ(rc, 0); +} + + +TEST(TestRGWLua, UseFunction) +{ + const std::string script = R"( + function print_owner(owner) + print("Owner Dispaly Name: " .. owner.DisplayName) + print("Owner Id: " .. owner.User.Id) + print("Owner Tenanet: " .. owner.User.Tenant) + end + + print_owner(Request.ObjectOwner) + + function print_acl(acl_type) + index = acl_type .. "ACL" + acl = Request[index] + if acl then + print(acl_type .. "ACL Owner") + print_owner(acl.Owner) + else + print("no " .. acl_type .. " ACL in request: " .. Request.Id) + end + end + + print_acl("User") + print_acl("Bucket") + print_acl("Object") + )"; + + DEFINE_REQ_STATE; + s.owner.set_name("user two"); + s.owner.set_id(rgw_user("tenant2", "user2")); + s.user_acl.reset(new RGWAccessControlPolicy()); + s.user_acl->get_owner().set_name("user three"); + s.user_acl->get_owner().set_id(rgw_user("tenant3", "user3")); + s.bucket_acl.reset(new RGWAccessControlPolicy()); + s.bucket_acl->get_owner().set_name("user four"); + s.bucket_acl->get_owner().set_id(rgw_user("tenant4", "user4")); + s.object_acl.reset(new RGWAccessControlPolicy()); + s.object_acl->get_owner().set_name("user five"); + s.object_acl->get_owner().set_id(rgw_user("tenant5", "user5")); + + const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, script); + ASSERT_EQ(rc, 0); +} + +TEST(TestRGWLua, WithLib) +{ + const std::string script = R"( + expected_result = {"my", "bucket", "name", "is", "fish"} + i = 1 + for p in string.gmatch(Request.Bucket.Name, "%a+") do + assert(p == expected_result[i]) + i = i + 1 + end + )"; + + DEFINE_REQ_STATE; + + rgw_bucket b; + b.name = "my-bucket-name-is-fish"; + s.bucket.reset(new sal::RadosBucket(nullptr, b)); + + const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, script); + ASSERT_EQ(rc, 0); +} + +TEST(TestRGWLua, NotAllowedInLib) +{ + const std::string script = R"( + os.clock() -- this should be ok + os.exit() -- this should fail (os.exit() is removed) + )"; + + DEFINE_REQ_STATE; + + const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, script); + ASSERT_NE(rc, 0); +} + +#define MAKE_STORE auto store = std::unique_ptr<sal::RadosStore>(new sal::RadosStore); \ + store->setRados(new RGWRados); + +TEST(TestRGWLua, OpsLog) +{ + const std::string script = R"( + if Request.Response.HTTPStatusCode == 200 then + assert(Request.Response.Message == "Life is great") + else + assert(Request.Bucket) + assert(Request.Log() == 0) + end + )"; + + MAKE_STORE; + + struct MockOpsLogSink : OpsLogSink { + bool logged = false; + int log(req_state*, rgw_log_entry&) override { logged = true; return 0; } + }; + MockOpsLogSink olog; + + DEFINE_REQ_STATE; + s.err.http_ret = 200; + s.err.ret = 0; + s.err.err_code = "200OK"; + s.err.message = "Life is great"; + rgw_bucket b; + b.tenant = "tenant"; + b.name = "name"; + b.marker = "marker"; + b.bucket_id = "id"; + s.bucket.reset(new sal::RadosBucket(nullptr, b)); + s.bucket_name = "name"; + s.enable_ops_log = true; + s.enable_usage_log = false; + s.user.reset(new TestUser()); + TestAccounter ac; + s.cio = ∾ + s.cct->_conf->rgw_ops_log_rados = false; + + s.auth.identity = std::unique_ptr<rgw::auth::Identity>( + new FakeIdentity()); + + auto rc = lua::request::execute(store.get(), nullptr, &olog, &s, nullptr, script); + EXPECT_EQ(rc, 0); + EXPECT_FALSE(olog.logged); // don't log http_ret=200 + + s.err.http_ret = 400; + rc = lua::request::execute(store.get(), nullptr, &olog, &s, nullptr, script); + EXPECT_EQ(rc, 0); + EXPECT_TRUE(olog.logged); +} + +class TestBackground : public rgw::lua::Background { + const unsigned read_time; + +protected: + int read_script() override { + // don't read the object from the store + std::this_thread::sleep_for(std::chrono::seconds(read_time)); + return 0; + } + +public: + TestBackground(sal::RadosStore* store, const std::string& script, unsigned read_time = 0) : + rgw::lua::Background(store, g_cct, "", /* luarocks path */ 1 /* run every second */), + read_time(read_time) { + // the script is passed in the constructor + rgw_script = script; + } + + ~TestBackground() override { + shutdown(); + } +}; + +TEST(TestRGWLuaBackground, Start) +{ + MAKE_STORE; + { + // ctr and dtor without running + TestBackground lua_background(store.get(), ""); + } + { + // ctr and dtor with running + TestBackground lua_background(store.get(), ""); + lua_background.start(); + } +} + + +constexpr auto wait_time = std::chrono::seconds(3); + +template<typename T> +const T& get_table_value(const TestBackground& b, const std::string& index) { + try { + return std::get<T>(b.get_table_value(index)); + } catch (std::bad_variant_access const& ex) { + std::cout << "expected RGW[" << index << "] to be: " << typeid(T).name() << std::endl; + throw(ex); + } +} + +TEST(TestRGWLuaBackground, Script) +{ + const std::string script = R"( + local key = "hello" + local value = "world" + RGW[key] = value + )"; + + MAKE_STORE; + TestBackground lua_background(store.get(), script); + lua_background.start(); + std::this_thread::sleep_for(wait_time); + EXPECT_EQ(get_table_value<std::string>(lua_background, "hello"), "world"); +} + +TEST(TestRGWLuaBackground, RequestScript) +{ + const std::string background_script = R"( + local key = "hello" + local value = "from background" + RGW[key] = value + )"; + + MAKE_STORE; + TestBackground lua_background(store.get(), background_script); + lua_background.start(); + std::this_thread::sleep_for(wait_time); + + const std::string request_script = R"( + local key = "hello" + assert(RGW[key] == "from background") + local value = "from request" + RGW[key] = value + )"; + + DEFINE_REQ_STATE; + pe.lua.background = &lua_background; + + // to make sure test is consistent we have to puase the background + lua_background.pause(); + const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, request_script); + ASSERT_EQ(rc, 0); + EXPECT_EQ(get_table_value<std::string>(lua_background, "hello"), "from request"); + // now we resume and let the background set the value + lua_background.resume(store.get()); + std::this_thread::sleep_for(wait_time); + EXPECT_EQ(get_table_value<std::string>(lua_background, "hello"), "from background"); +} + +TEST(TestRGWLuaBackground, Pause) +{ + const std::string script = R"( + local key = "hello" + local value = "1" + if RGW[key] then + RGW[key] = value..RGW[key] + else + RGW[key] = value + end + )"; + + MAKE_STORE; + TestBackground lua_background(store.get(), script); + lua_background.start(); + std::this_thread::sleep_for(wait_time); + const auto value_len = get_table_value<std::string>(lua_background, "hello").size(); + EXPECT_GT(value_len, 0); + lua_background.pause(); + std::this_thread::sleep_for(wait_time); + // no change in len + EXPECT_EQ(value_len, get_table_value<std::string>(lua_background, "hello").size()); +} + +TEST(TestRGWLuaBackground, PauseWhileReading) +{ + const std::string script = R"( + local key = "hello" + local value = "world" + RGW[key] = value + if RGW[key] then + RGW[key] = value..RGW[key] + else + RGW[key] = value + end + )"; + + MAKE_STORE; + constexpr auto long_wait_time = std::chrono::seconds(6); + TestBackground lua_background(store.get(), script, 2); + lua_background.start(); + std::this_thread::sleep_for(long_wait_time); + const auto value_len = get_table_value<std::string>(lua_background, "hello").size(); + EXPECT_GT(value_len, 0); + lua_background.pause(); + std::this_thread::sleep_for(long_wait_time); + // one execution might occur after pause + EXPECT_TRUE(value_len + 1 >= get_table_value<std::string>(lua_background, "hello").size()); +} + +TEST(TestRGWLuaBackground, ReadWhilePaused) +{ + const std::string script = R"( + local key = "hello" + local value = "world" + RGW[key] = value + )"; + + MAKE_STORE; + TestBackground lua_background(store.get(), script); + lua_background.pause(); + lua_background.start(); + std::this_thread::sleep_for(wait_time); + EXPECT_EQ(get_table_value<std::string>(lua_background, "hello"), ""); + lua_background.resume(store.get()); + std::this_thread::sleep_for(wait_time); + EXPECT_EQ(get_table_value<std::string>(lua_background, "hello"), "world"); +} + +TEST(TestRGWLuaBackground, PauseResume) +{ + const std::string script = R"( + local key = "hello" + local value = "1" + if RGW[key] then + RGW[key] = value..RGW[key] + else + RGW[key] = value + end + )"; + + MAKE_STORE; + TestBackground lua_background(store.get(), script); + lua_background.start(); + std::this_thread::sleep_for(wait_time); + const auto value_len = get_table_value<std::string>(lua_background, "hello").size(); + EXPECT_GT(value_len, 0); + lua_background.pause(); + std::this_thread::sleep_for(wait_time); + // no change in len + EXPECT_EQ(value_len, get_table_value<std::string>(lua_background, "hello").size()); + lua_background.resume(store.get()); + std::this_thread::sleep_for(wait_time); + // should be a change in len + EXPECT_GT(get_table_value<std::string>(lua_background, "hello").size(), value_len); +} + +TEST(TestRGWLuaBackground, MultipleStarts) +{ + const std::string script = R"( + local key = "hello" + local value = "1" + if RGW[key] then + RGW[key] = value..RGW[key] + else + RGW[key] = value + end + )"; + + MAKE_STORE; + TestBackground lua_background(store.get(), script); + lua_background.start(); + std::this_thread::sleep_for(wait_time); + const auto value_len = get_table_value<std::string>(lua_background, "hello").size(); + EXPECT_GT(value_len, 0); + lua_background.start(); + lua_background.shutdown(); + lua_background.shutdown(); + std::this_thread::sleep_for(wait_time); + lua_background.start(); + std::this_thread::sleep_for(wait_time); + // should be a change in len + EXPECT_GT(get_table_value<std::string>(lua_background, "hello").size(), value_len); +} + +TEST(TestRGWLuaBackground, TableValues) +{ + MAKE_STORE; + TestBackground lua_background(store.get(), ""); + + const std::string request_script = R"( + RGW["key1"] = "string value" + RGW["key2"] = 42 + RGW["key3"] = 42.2 + RGW["key4"] = true + )"; + + DEFINE_REQ_STATE; + pe.lua.background = &lua_background; + + const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, request_script); + ASSERT_EQ(rc, 0); + EXPECT_EQ(get_table_value<std::string>(lua_background, "key1"), "string value"); + EXPECT_EQ(get_table_value<long long int>(lua_background, "key2"), 42); + EXPECT_EQ(get_table_value<double>(lua_background, "key3"), 42.2); + EXPECT_TRUE(get_table_value<bool>(lua_background, "key4")); +} + +TEST(TestRGWLuaBackground, TablePersist) +{ + MAKE_STORE; + TestBackground lua_background(store.get(), ""); + + std::string request_script = R"( + RGW["key1"] = "string value" + RGW["key2"] = 42 + )"; + + DEFINE_REQ_STATE; + pe.lua.background = &lua_background; + + auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, request_script); + ASSERT_EQ(rc, 0); + EXPECT_EQ(get_table_value<std::string>(lua_background, "key1"), "string value"); + EXPECT_EQ(get_table_value<long long int>(lua_background, "key2"), 42); + + request_script = R"( + RGW["key3"] = RGW["key1"] + RGW["key4"] = RGW["key2"] + )"; + + rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, request_script); + ASSERT_EQ(rc, 0); + EXPECT_EQ(get_table_value<std::string>(lua_background, "key1"), "string value"); + EXPECT_EQ(get_table_value<long long int>(lua_background, "key2"), 42); + EXPECT_EQ(get_table_value<std::string>(lua_background, "key3"), "string value"); + EXPECT_EQ(get_table_value<long long int>(lua_background, "key4"), 42); +} + +TEST(TestRGWLuaBackground, TableValuesFromRequest) +{ + MAKE_STORE; + TestBackground lua_background(store.get(), ""); + lua_background.start(); + + const std::string request_script = R"( + RGW["key1"] = Request.Response.RGWCode + RGW["key2"] = Request.Response.Message + RGW["key3"] = Request.Response.RGWCode*0.1 + RGW["key4"] = Request.Tags["key1"] == Request.Tags["key2"] + )"; + + DEFINE_REQ_STATE; + pe.lua.background = &lua_background; + + s.tagset.add_tag("key1", "val1"); + s.tagset.add_tag("key2", "val1"); + s.err.ret = -99; + s.err.message = "hi"; + + const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, request_script); + ASSERT_EQ(rc, 0); + EXPECT_EQ(get_table_value<long long int>(lua_background, "key1"), -99); + EXPECT_EQ(get_table_value<std::string>(lua_background, "key2"), "hi"); + EXPECT_EQ(get_table_value<double>(lua_background, "key3"), -9.9); + EXPECT_EQ(get_table_value<bool>(lua_background, "key4"), true); +} + +TEST(TestRGWLuaBackground, TableInvalidValue) +{ + MAKE_STORE; + TestBackground lua_background(store.get(), ""); + lua_background.start(); + + const std::string request_script = R"( + RGW["key1"] = "val1" + RGW["key2"] = 42 + RGW["key3"] = 42.2 + RGW["key4"] = true + RGW["key5"] = Request.Tags + )"; + + DEFINE_REQ_STATE; + pe.lua.background = &lua_background; + s.tagset.add_tag("key1", "val1"); + s.tagset.add_tag("key2", "val2"); + + const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, request_script); + ASSERT_NE(rc, 0); + EXPECT_EQ(get_table_value<std::string>(lua_background, "key1"), "val1"); + EXPECT_EQ(get_table_value<long long int>(lua_background, "key2"), 42); + EXPECT_EQ(get_table_value<double>(lua_background, "key3"), 42.2); + EXPECT_EQ(get_table_value<bool>(lua_background, "key4"), true); +} + +TEST(TestRGWLuaBackground, TableErase) +{ + MAKE_STORE; + TestBackground lua_background(store.get(), ""); + + std::string request_script = R"( + RGW["size"] = 0 + RGW["key1"] = "string value" + RGW["key2"] = 42 + RGW["key3"] = "another string value" + RGW["size"] = #RGW + )"; + + DEFINE_REQ_STATE; + pe.lua.background = &lua_background; + + auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, request_script); + ASSERT_EQ(rc, 0); + EXPECT_EQ(get_table_value<std::string>(lua_background, "key1"), "string value"); + EXPECT_EQ(get_table_value<long long int>(lua_background, "key2"), 42); + EXPECT_EQ(get_table_value<std::string>(lua_background, "key3"), "another string value"); + EXPECT_EQ(get_table_value<long long int>(lua_background, "size"), 4); + + request_script = R"( + -- erase key1 + RGW["key1"] = nil + -- following should be a no op + RGW["key4"] = nil + RGW["size"] = #RGW + )"; + + rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, request_script); + ASSERT_EQ(rc, 0); + EXPECT_EQ(get_table_value<std::string>(lua_background, "key1"), ""); + EXPECT_EQ(get_table_value<long long int>(lua_background, "key2"), 42); + EXPECT_EQ(get_table_value<std::string>(lua_background, "key3"), "another string value"); + EXPECT_EQ(get_table_value<long long int>(lua_background, "size"), 3); +} + +TEST(TestRGWLuaBackground, TableIterate) +{ + MAKE_STORE; + TestBackground lua_background(store.get(), ""); + + const std::string request_script = R"( + RGW["key1"] = "string value" + RGW["key2"] = 42 + RGW["key3"] = 42.2 + RGW["key4"] = true + RGW["size"] = 0 + for k, v in pairs(RGW) do + RGW["size"] = RGW["size"] + 1 + end + )"; + + DEFINE_REQ_STATE; + pe.lua.background = &lua_background; + + const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, request_script); + ASSERT_EQ(rc, 0); + EXPECT_EQ(get_table_value<std::string>(lua_background, "key1"), "string value"); + EXPECT_EQ(get_table_value<long long int>(lua_background, "key2"), 42); + EXPECT_EQ(get_table_value<double>(lua_background, "key3"), 42.2); + EXPECT_TRUE(get_table_value<bool>(lua_background, "key4")); + EXPECT_EQ(get_table_value<long long int>(lua_background, "size"), 5); +} + +TEST(TestRGWLuaBackground, TableIncrement) +{ + MAKE_STORE; + TestBackground lua_background(store.get(), ""); + + const std::string request_script = R"( + RGW["key1"] = 42 + RGW["key2"] = 42.2 + RGW.increment("key1") + assert(RGW["key1"] == 43) + RGW.increment("key2") + assert(RGW["key2"] == 43.2) + )"; + + DEFINE_REQ_STATE; + pe.lua.background = &lua_background; + + const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, request_script); + ASSERT_EQ(rc, 0); +} + +TEST(TestRGWLuaBackground, TableIncrementBy) +{ + MAKE_STORE; + TestBackground lua_background(store.get(), ""); + + const std::string request_script = R"( + RGW["key1"] = 42 + RGW["key2"] = 42.2 + RGW.increment("key1", 10) + assert(RGW["key1"] == 52) + RGW.increment("key2", 10) + assert(RGW["key2"] == 52.2) + RGW.increment("key1", 0.2) + assert(RGW["key1"] == 52.2) + )"; + + DEFINE_REQ_STATE; + pe.lua.background = &lua_background; + + const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, request_script); + ASSERT_EQ(rc, 0); +} + +TEST(TestRGWLuaBackground, TableDecrement) +{ + MAKE_STORE; + TestBackground lua_background(store.get(), ""); + + const std::string request_script = R"( + RGW["key1"] = 42 + RGW["key2"] = 42.2 + RGW.decrement("key1") + assert(RGW["key1"] == 41) + RGW.decrement("key2") + assert(RGW["key2"] == 41.2) + )"; + + DEFINE_REQ_STATE; + pe.lua.background = &lua_background; + + const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, request_script); + ASSERT_EQ(rc, 0); +} + +TEST(TestRGWLuaBackground, TableDecrementBy) +{ + MAKE_STORE; + TestBackground lua_background(store.get(), ""); + + const std::string request_script = R"( + RGW["key1"] = 42 + RGW["key2"] = 42.2 + RGW.decrement("key1", 10) + assert(RGW["key1"] == 32) + RGW.decrement("key2", 10) + assert(RGW["key2"] == 32.2) + RGW.decrement("key1", 0.8) + assert(RGW["key1"] == 31.2) + )"; + + DEFINE_REQ_STATE; + pe.lua.background = &lua_background; + + const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, request_script); + ASSERT_EQ(rc, 0); +} + +TEST(TestRGWLuaBackground, TableIncrementValueError) +{ + MAKE_STORE; + TestBackground lua_background(store.get(), ""); + + std::string request_script = R"( + -- cannot increment string values + RGW["key1"] = "hello" + RGW.increment("key1") + )"; + + DEFINE_REQ_STATE; + pe.lua.background = &lua_background; + + auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, request_script); + ASSERT_NE(rc, 0); + + request_script = R"( + -- cannot increment bool values + RGW["key1"] = true + RGW.increment("key1") + )"; + + rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, request_script); + ASSERT_NE(rc, 0); + + request_script = R"( + -- cannot increment by string values + RGW["key1"] = 99 + RGW.increment("key1", "kaboom") + )"; + + rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, request_script); + ASSERT_NE(rc, 0); +} + +TEST(TestRGWLuaBackground, TableIncrementError) +{ + MAKE_STORE; + TestBackground lua_background(store.get(), ""); + + std::string request_script = R"( + -- missing argument + RGW["key1"] = 11 + RGW.increment() + )"; + + DEFINE_REQ_STATE; + pe.lua.background = &lua_background; + + auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, request_script); + ASSERT_NE(rc, 0); + + request_script = R"( + -- used as settable field + RGW.increment = 11 + )"; + + rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, request_script); + ASSERT_NE(rc, 0); +} + +TEST(TestRGWLua, TracingSetAttribute) +{ + const std::string script = R"( + Request.Trace.SetAttribute("str-attr", "value") + Request.Trace.SetAttribute("int-attr", 42) + Request.Trace.SetAttribute("double-attr", 42.5) + )"; + + DEFINE_REQ_STATE; + INIT_TRACE; + const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, script); + ASSERT_EQ(rc, 0); +} + +TEST(TestRGWLua, TracingSetBadAttribute) +{ + const std::string script = R"( + Request.Trace.SetAttribute("attr", nil) + )"; + + DEFINE_REQ_STATE; + INIT_TRACE; + const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, script); + #ifdef HAVE_JAEGER + ASSERT_NE(rc, 0); + #else + ASSERT_EQ(rc, 0); + #endif +} + +TEST(TestRGWLua, TracingAddEvent) +{ + const std::string script = R"( + event_attrs = {} + event_attrs["x"] = "value-x" + event_attrs[42] = 42 + event_attrs[42.5] = 42.5 + event_attrs["y"] = "value-y" + + Request.Trace.AddEvent("my_event", event_attrs) + )"; + + DEFINE_REQ_STATE; + INIT_TRACE; + const auto rc = lua::request::execute(nullptr, nullptr, nullptr, &s, nullptr, script); + ASSERT_EQ(rc, 0); +} + +TEST(TestRGWLua, Data) +{ + const std::string script = R"( + local expected = "The quick brown fox jumps over the lazy dog" + local actual = "" + RGW["key1"] = 0 + + for i, c in pairs(Data) do + actual = actual .. c + RGW.increment("key1") + end + assert(expected == actual) + assert(#Data == #expected); + assert(RGW["key1"] == #Data) + assert(Request.RGWId == "foo") + assert(Offset == 12345678) + )"; + + MAKE_STORE; + TestBackground lua_background(store.get(), ""); + DEFINE_REQ_STATE; + s.host_id = "foo"; + pe.lua.background = &lua_background; + lua::RGWObjFilter filter(&s, script); + bufferlist bl; + bl.append("The quick brown fox jumps over the lazy dog"); + off_t offset = 12345678; + const auto rc = filter.execute(bl, offset, "put_obj"); + ASSERT_EQ(rc, 0); +} + +TEST(TestRGWLua, WriteDataFail) +{ + const std::string script = R"( + Data[1] = "h" + Data[2] = "e" + Data[3] = "l" + Data[4] = "l" + Data[5] = "o" + )"; + + DEFINE_REQ_STATE; + lua::RGWObjFilter filter(&s, script); + bufferlist bl; + bl.append("The quick brown fox jumps over the lazy dog"); + const auto rc = filter.execute(bl, 0, "put_obj"); + ASSERT_NE(rc, 0); +} + diff --git a/src/test/rgw/test_rgw_manifest.cc b/src/test/rgw/test_rgw_manifest.cc new file mode 100644 index 000000000..acde46d44 --- /dev/null +++ b/src/test/rgw/test_rgw_manifest.cc @@ -0,0 +1,397 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab +/* + * Ceph - scalable distributed file system + * + * Copyright (C) 2013 eNovance SAS <licensing@enovance.com> + * + * This is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License version 2.1, as published by the Free Software + * Foundation. See file COPYING. + * + */ +#include <iostream> +#include "global/global_init.h" +#include "common/ceph_argparse.h" +#include "rgw_common.h" +#include "rgw_rados.h" +#include "test_rgw_common.h" +#include <gtest/gtest.h> + +using namespace std; + +auto cct = new CephContext(CEPH_ENTITY_TYPE_CLIENT); +const DoutPrefix dp(cct, 1, "test rgw manifest: "); + +struct OldObjManifestPart { + old_rgw_obj loc; /* the object where the data is located */ + uint64_t loc_ofs; /* the offset at that object where the data is located */ + uint64_t size; /* the part size */ + + OldObjManifestPart() : loc_ofs(0), size(0) {} + + void encode(bufferlist& bl) const { + ENCODE_START(2, 2, bl); + encode(loc, bl); + encode(loc_ofs, bl); + encode(size, bl); + ENCODE_FINISH(bl); + } + + void decode(bufferlist::const_iterator& bl) { + DECODE_START_LEGACY_COMPAT_LEN_32(2, 2, 2, bl); + decode(loc, bl); + decode(loc_ofs, bl); + decode(size, bl); + DECODE_FINISH(bl); + } + + void dump(Formatter *f) const; + static void generate_test_instances(list<OldObjManifestPart*>& o); +}; +WRITE_CLASS_ENCODER(OldObjManifestPart) + +class OldObjManifest { +protected: + map<uint64_t, OldObjManifestPart> objs; + + uint64_t obj_size; +public: + + OldObjManifest() : obj_size(0) {} + OldObjManifest(const OldObjManifest& rhs) { + *this = rhs; + } + OldObjManifest& operator=(const OldObjManifest& rhs) { + objs = rhs.objs; + obj_size = rhs.obj_size; + return *this; + } + + const map<uint64_t, OldObjManifestPart>& get_objs() { + return objs; + } + + void append(uint64_t ofs, const OldObjManifestPart& part) { + objs[ofs] = part; + obj_size = std::max(obj_size, ofs + part.size); + } + + void encode(bufferlist& bl) const { + ENCODE_START(2, 2, bl); + encode(obj_size, bl); + encode(objs, bl); + ENCODE_FINISH(bl); + } + + void decode(bufferlist::const_iterator& bl) { + DECODE_START_LEGACY_COMPAT_LEN_32(6, 2, 2, bl); + decode(obj_size, bl); + decode(objs, bl); + DECODE_FINISH(bl); + } + + bool empty() { + return objs.empty(); + } +}; +WRITE_CLASS_ENCODER(OldObjManifest) + +void append_head(list<rgw_obj> *objs, rgw_obj& head) +{ + objs->push_back(head); +} + +void append_stripes(list<rgw_obj> *objs, RGWObjManifest& manifest, uint64_t obj_size, uint64_t stripe_size) +{ + string prefix = manifest.get_prefix(); + rgw_bucket bucket = manifest.get_obj().bucket; + + int i = 0; + for (uint64_t ofs = manifest.get_max_head_size(); ofs < obj_size; ofs += stripe_size) { + char buf[16]; + snprintf(buf, sizeof(buf), "%d", ++i); + string oid = prefix + buf; + cout << "oid=" << oid << std::endl; + rgw_obj obj; + obj.init_ns(bucket, oid, "shadow"); + objs->push_back(obj); + } +} + +static void gen_obj(test_rgw_env& env, uint64_t obj_size, uint64_t head_max_size, uint64_t stripe_size, + RGWObjManifest *manifest, const rgw_placement_rule& placement_rule, rgw_bucket *bucket, rgw_obj *head, RGWObjManifest::generator *gen, + list<rgw_obj> *test_objs) +{ + manifest->set_trivial_rule(head_max_size, stripe_size); + + test_rgw_init_bucket(bucket, "buck"); + + *head = rgw_obj(*bucket, "oid"); + gen->create_begin(g_ceph_context, manifest, placement_rule, nullptr, *bucket, *head); + + append_head(test_objs, *head); + cout << "test_objs.size()=" << test_objs->size() << std::endl; + append_stripes(test_objs, *manifest, obj_size, stripe_size); + + cout << "test_objs.size()=" << test_objs->size() << std::endl; + + ASSERT_EQ((int)manifest->get_obj_size(), 0); + ASSERT_EQ((int)manifest->get_head_size(), 0); + ASSERT_EQ(manifest->has_tail(), false); + + uint64_t ofs = 0; + list<rgw_obj>::iterator iter = test_objs->begin(); + + while (ofs < obj_size) { + rgw_raw_obj obj = gen->get_cur_obj(env.zonegroup, env.zone_params); + cout << "obj=" << obj << std::endl; + rgw_raw_obj test_raw = rgw_obj_select(*iter).get_raw_obj(env.zonegroup, env.zone_params); + ASSERT_TRUE(obj == test_raw); + + ofs = std::min(ofs + gen->cur_stripe_max_size(), obj_size); + gen->create_next(ofs); + + cout << "obj=" << obj << " *iter=" << *iter << std::endl; + cout << "test_objs.size()=" << test_objs->size() << std::endl; + ++iter; + + } + + if (manifest->has_tail()) { + rgw_raw_obj obj = gen->get_cur_obj(env.zonegroup, env.zone_params); + rgw_raw_obj test_raw = rgw_obj_select(*iter).get_raw_obj(env.zonegroup, env.zone_params); + ASSERT_TRUE(obj == test_raw); + ++iter; + } + ASSERT_TRUE(iter == test_objs->end()); + ASSERT_EQ(manifest->get_obj_size(), obj_size); + ASSERT_EQ(manifest->get_head_size(), std::min(obj_size, head_max_size)); + ASSERT_EQ(manifest->has_tail(), (obj_size > head_max_size)); +} + +static void gen_old_obj(test_rgw_env& env, uint64_t obj_size, uint64_t head_max_size, uint64_t stripe_size, + OldObjManifest *manifest, old_rgw_bucket *bucket, old_rgw_obj *head, + list<old_rgw_obj> *test_objs) +{ + test_rgw_init_old_bucket(bucket, "buck"); + + *head = old_rgw_obj(*bucket, "obj"); + + OldObjManifestPart part; + part.loc = *head; + part.size = head_max_size; + part.loc_ofs = 0; + + manifest->append(0, part); + test_objs->push_back(part.loc); + + string prefix; + append_rand_alpha(g_ceph_context, prefix, prefix, 16); + + int i = 0; + for (uint64_t ofs = head_max_size; ofs < obj_size; ofs += stripe_size, i++) { + char buf[32]; + snprintf(buf, sizeof(buf), "%s.%d", prefix.c_str(), i); + old_rgw_obj loc(*bucket, buf); + loc.set_ns("shadow"); + OldObjManifestPart part; + part.loc = loc; + part.size = min(stripe_size, obj_size - ofs); + part.loc_ofs = 0; + + manifest->append(ofs, part); + + test_objs->push_back(loc); + } +} + +TEST(TestRGWManifest, head_only_obj) { + test_rgw_env env; + RGWObjManifest manifest; + rgw_bucket bucket; + rgw_obj head; + RGWObjManifest::generator gen; + + int obj_size = 256 * 1024; + + list<rgw_obj> objs; + + gen_obj(env, obj_size, 512 * 1024, 4 * 1024 * 1024, &manifest, env.zonegroup.default_placement, &bucket, &head, &gen, &objs); + + cout << " manifest.get_obj_size()=" << manifest.get_obj_size() << std::endl; + cout << " manifest.get_head_size()=" << manifest.get_head_size() << std::endl; + list<rgw_obj>::iterator liter; + + RGWObjManifest::obj_iterator iter; + for (iter = manifest.obj_begin(&dp), liter = objs.begin(); + iter != manifest.obj_end(&dp) && liter != objs.end(); + ++iter, ++liter) { + ASSERT_TRUE(env.get_raw(*liter) == env.get_raw(iter.get_location())); + } + + ASSERT_TRUE(iter == manifest.obj_end(&dp)); + ASSERT_TRUE(liter == objs.end()); + + rgw_raw_obj raw_head; + + iter = manifest.obj_find(&dp, 100 * 1024); + ASSERT_TRUE(env.get_raw(iter.get_location()) == env.get_raw(head)); + ASSERT_EQ((int)iter.get_stripe_size(), obj_size); +} + +TEST(TestRGWManifest, obj_with_head_and_tail) { + test_rgw_env env; + RGWObjManifest manifest; + rgw_bucket bucket; + rgw_obj head; + RGWObjManifest::generator gen; + + list<rgw_obj> objs; + + int obj_size = 21 * 1024 * 1024 + 1000; + int stripe_size = 4 * 1024 * 1024; + int head_size = 512 * 1024; + + gen_obj(env, obj_size, head_size, stripe_size, &manifest, env.zonegroup.default_placement, &bucket, &head, &gen, &objs); + + list<rgw_obj>::iterator liter; + + rgw_obj_select last_obj; + + RGWObjManifest::obj_iterator iter; + for (iter = manifest.obj_begin(&dp), liter = objs.begin(); + iter != manifest.obj_end(&dp) && liter != objs.end(); + ++iter, ++liter) { + cout << "*liter=" << *liter << " iter.get_location()=" << env.get_raw(iter.get_location()) << std::endl; + ASSERT_TRUE(env.get_raw(*liter) == env.get_raw(iter.get_location())); + + last_obj = iter.get_location(); + } + + ASSERT_TRUE(iter == manifest.obj_end(&dp)); + ASSERT_TRUE(liter == objs.end()); + + iter = manifest.obj_find(&dp, 100 * 1024); + ASSERT_TRUE(env.get_raw(iter.get_location()) == env.get_raw(head)); + ASSERT_EQ((int)iter.get_stripe_size(), head_size); + + uint64_t ofs = 20 * 1024 * 1024 + head_size; + iter = manifest.obj_find(&dp, ofs + 100); + + ASSERT_TRUE(env.get_raw(iter.get_location()) == env.get_raw(last_obj)); + ASSERT_EQ(iter.get_stripe_ofs(), ofs); + ASSERT_EQ(iter.get_stripe_size(), obj_size - ofs); +} + +TEST(TestRGWManifest, multipart) { + test_rgw_env env; + int num_parts = 16; + vector <RGWObjManifest> pm(num_parts); + rgw_bucket bucket; + uint64_t part_size = 10 * 1024 * 1024; + uint64_t stripe_size = 4 * 1024 * 1024; + + string upload_id = "abc123"; + + for (int i = 0; i < num_parts; ++i) { + RGWObjManifest& manifest = pm[i]; + RGWObjManifest::generator gen; + manifest.set_prefix(upload_id); + + manifest.set_multipart_part_rule(stripe_size, i + 1); + + uint64_t ofs; + rgw_obj head; + for (ofs = 0; ofs < part_size; ofs += stripe_size) { + if (ofs == 0) { + rgw_placement_rule rule(env.zonegroup.default_placement.name, RGW_STORAGE_CLASS_STANDARD); + int r = gen.create_begin(g_ceph_context, &manifest, rule, nullptr, bucket, head); + ASSERT_EQ(r, 0); + continue; + } + gen.create_next(ofs); + } + + if (ofs > part_size) { + gen.create_next(part_size); + } + } + + RGWObjManifest m; + + for (int i = 0; i < num_parts; i++) { + m.append(&dp, pm[i], env.zonegroup, env.zone_params); + } + RGWObjManifest::obj_iterator iter; + for (iter = m.obj_begin(&dp); iter != m.obj_end(&dp); ++iter) { + RGWObjManifest::obj_iterator fiter = m.obj_find(&dp, iter.get_ofs()); + ASSERT_TRUE(env.get_raw(fiter.get_location()) == env.get_raw(iter.get_location())); + } + + ASSERT_EQ(m.get_obj_size(), num_parts * part_size); +} + +TEST(TestRGWManifest, old_obj_manifest) { + test_rgw_env env; + OldObjManifest old_manifest; + old_rgw_bucket old_bucket; + old_rgw_obj old_head; + + int obj_size = 40 * 1024 * 1024; + uint64_t stripe_size = 4 * 1024 * 1024; + uint64_t head_size = 512 * 1024; + + list<old_rgw_obj> old_objs; + + gen_old_obj(env, obj_size, head_size, stripe_size, &old_manifest, &old_bucket, &old_head, &old_objs); + + ASSERT_EQ(old_objs.size(), 11u); + + + bufferlist bl; + encode(old_manifest , bl); + + RGWObjManifest manifest; + + try { + auto iter = bl.cbegin(); + decode(manifest, iter); + } catch (buffer::error& err) { + ASSERT_TRUE(false); + } + + rgw_raw_obj last_obj; + + RGWObjManifest::obj_iterator iter; + auto liter = old_objs.begin(); + for (iter = manifest.obj_begin(&dp); + iter != manifest.obj_end(&dp) && liter != old_objs.end(); + ++iter, ++liter) { + rgw_pool old_pool(liter->bucket.data_pool); + string old_oid; + prepend_old_bucket_marker(old_bucket, liter->get_object(), old_oid); + rgw_raw_obj raw_old(old_pool, old_oid); + cout << "*liter=" << raw_old << " iter.get_location()=" << env.get_raw(iter.get_location()) << std::endl; + ASSERT_EQ(raw_old, env.get_raw(iter.get_location())); + + last_obj = env.get_raw(iter.get_location()); + } + + ASSERT_TRUE(liter == old_objs.end()); + ASSERT_TRUE(iter == manifest.obj_end(&dp)); + +} + + +int main(int argc, char **argv) { + auto args = argv_to_vec(argc, argv); + auto cct = global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT, + CODE_ENVIRONMENT_UTILITY, + CINIT_FLAG_NO_DEFAULT_CONFIG_FILE); + common_init_finish(g_ceph_context); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} + diff --git a/src/test/rgw/test_rgw_obj.cc b/src/test/rgw/test_rgw_obj.cc new file mode 100644 index 000000000..53d7897ae --- /dev/null +++ b/src/test/rgw/test_rgw_obj.cc @@ -0,0 +1,272 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab +/* + * Ceph - scalable distributed file system + * + * Copyright (C) 2013 eNovance SAS <licensing@enovance.com> + * + * This is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License version 2.1, as published by the Free Software + * Foundation. See file COPYING. + * + */ +#include <iostream> +#include "common/ceph_json.h" +#include "common/Formatter.h" +#include "rgw_common.h" +#include "rgw_rados.h" +#include "services/svc_tier_rados.h" +#include "test_rgw_common.h" +#include <gtest/gtest.h> + +using namespace std; + +void check_parsed_correctly(rgw_obj& obj, const string& name, const string& ns, const string& instance) +{ + /* parse_raw_oid() */ + rgw_obj_key parsed_key; + ASSERT_EQ(true, rgw_obj_key::parse_raw_oid(obj.get_oid(), &parsed_key)); + + cout << "parsed: " << parsed_key << std::endl; + + ASSERT_EQ(name, parsed_key.name); + ASSERT_EQ(ns, parsed_key.ns); + ASSERT_EQ(instance, parsed_key.instance); + + /* translate_raw_obj_to_obj_in_ns() */ + rgw_obj_key tkey = parsed_key; + string tns = ns + "foo"; + ASSERT_EQ(0, rgw_obj_key::oid_to_key_in_ns(obj.get_oid(), &tkey, tns)); + + tkey = rgw_obj_key(); + tns = ns; + ASSERT_EQ(true, rgw_obj_key::oid_to_key_in_ns(obj.get_oid(), &tkey, tns)); + + cout << "parsed: " << tkey << std::endl; + + ASSERT_EQ(obj.key, tkey); + + /* strip_namespace_from_object() */ + + string strip_name = obj.get_oid(); + string strip_ns, strip_instance; + + ASSERT_EQ(true, rgw_obj_key::strip_namespace_from_name(strip_name, strip_ns, strip_instance)); + + cout << "stripped: " << strip_name << " ns=" << strip_ns << " i=" << strip_instance << std::endl; + + ASSERT_EQ(name, strip_name); + ASSERT_EQ(ns, strip_ns); + ASSERT_EQ(instance, strip_instance); +} + +void test_obj(const string& name, const string& ns, const string& instance) +{ + rgw_bucket b; + test_rgw_init_bucket(&b, "test"); + + JSONFormatter *formatter = new JSONFormatter(true); + + formatter->open_object_section("test"); + rgw_obj o(b, name); + rgw_obj obj1(o); + + if (!instance.empty()) { + obj1.key.instance = instance; + } + if (!ns.empty()) { + obj1.key.ns = ns; + } + + check_parsed_correctly(obj1, name, ns, instance); + encode_json("obj1", obj1, formatter); + + bufferlist bl; + encode(obj1, bl); + + rgw_obj obj2; + decode(obj2, bl); + check_parsed_correctly(obj2, name, ns, instance); + + encode_json("obj2", obj2, formatter); + + rgw_obj obj3(o); + bufferlist bl3; + encode(obj3, bl3); + decode(obj3, bl3); + encode_json("obj3", obj3, formatter); + + if (!instance.empty()) { + obj3.key.instance = instance; + } + if (!ns.empty()) { + obj3.key.ns = ns; + } + check_parsed_correctly(obj3, name, ns, instance); + + encode_json("obj3-2", obj3, formatter); + + formatter->close_section(); + + formatter->flush(cout); + + ASSERT_EQ(obj1, obj2); + ASSERT_EQ(obj1, obj3); + + + /* rgw_obj_key conversion */ + rgw_obj_index_key k; + obj1.key.get_index_key(&k); + + rgw_obj new_obj(b, k); + + ASSERT_EQ(obj1, new_obj); + + delete formatter; +} + +TEST(TestRGWObj, underscore) { + test_obj("_obj", "", ""); + test_obj("_obj", "ns", ""); + test_obj("_obj", "", "v1"); + test_obj("_obj", "ns", "v1"); +} + +TEST(TestRGWObj, no_underscore) { + test_obj("obj", "", ""); + test_obj("obj", "ns", ""); + test_obj("obj", "", "v1"); + test_obj("obj", "ns", "v1"); +} + +template <class T> +void dump(JSONFormatter& f, const string& name, const T& entity) +{ + f.open_object_section(name.c_str()); + ::encode_json(name.c_str(), entity, &f); + f.close_section(); + f.flush(cout); +} + +static void test_obj_to_raw(test_rgw_env& env, const rgw_bucket& b, + const string& name, const string& instance, const string& ns, + const string& placement_id) +{ + JSONFormatter f(true); + dump(f, "bucket", b); + rgw_obj obj = test_rgw_create_obj(b, name, instance, ns); + dump(f, "obj", obj); + + rgw_obj_select s(obj); + rgw_raw_obj raw_obj = s.get_raw_obj(env.zonegroup, env.zone_params); + dump(f, "raw_obj", raw_obj); + + if (!placement_id.empty()) { + ASSERT_EQ(raw_obj.pool, env.get_placement(placement_id).data_pool); + } else { + ASSERT_EQ(raw_obj.pool, b.explicit_placement.data_pool); + } + ASSERT_EQ(raw_obj.oid, test_rgw_get_obj_oid(obj)); + + rgw_obj new_obj; + RGWSI_Tier_RADOS::raw_obj_to_obj(b, raw_obj, &new_obj); + + dump(f, "new_obj", new_obj); + + ASSERT_EQ(obj, new_obj); + +} + +TEST(TestRGWObj, obj_to_raw) { + test_rgw_env env; + + rgw_bucket b; + test_rgw_init_bucket(&b, "test"); + + rgw_bucket eb; + test_rgw_init_explicit_placement_bucket(&eb, "ebtest"); + + for (auto name : { "myobj", "_myobj", "_myobj_"}) { + for (auto inst : { "", "inst"}) { + for (auto ns : { "", "ns"}) { + test_obj_to_raw(env, b, name, inst, ns, env.zonegroup.default_placement.name); + test_obj_to_raw(env, eb, name, inst, ns, string()); + } + } + } +} + +TEST(TestRGWObj, old_to_raw) { + JSONFormatter f(true); + test_rgw_env env; + + old_rgw_bucket eb; + test_rgw_init_old_bucket(&eb, "ebtest"); + + for (auto name : { "myobj", "_myobj", "_myobj_"}) { + for (string inst : { "", "inst"}) { + for (string ns : { "", "ns"}) { + old_rgw_obj old(eb, name); + if (!inst.empty()) { + old.set_instance(inst); + } + if (!ns.empty()) { + old.set_ns(ns); + } + + bufferlist bl; + + encode(old, bl); + + rgw_obj new_obj; + rgw_raw_obj raw_obj; + + try { + auto iter = bl.cbegin(); + decode(new_obj, iter); + + iter = bl.begin(); + decode(raw_obj, iter); + } catch (buffer::error& err) { + ASSERT_TRUE(false); + } + + bl.clear(); + + rgw_obj new_obj2; + rgw_raw_obj raw_obj2; + + encode(new_obj, bl); + + dump(f, "raw_obj", raw_obj); + dump(f, "new_obj", new_obj); + cout << "raw=" << raw_obj << std::endl; + + try { + auto iter = bl.cbegin(); + decode(new_obj2, iter); + + /* + can't decode raw obj here, because we didn't encode an old versioned + object + */ + + bl.clear(); + encode(raw_obj, bl); + iter = bl.begin(); + decode(raw_obj2, iter); + } catch (buffer::error& err) { + ASSERT_TRUE(false); + } + + dump(f, "raw_obj2", raw_obj2); + dump(f, "new_obj2", new_obj2); + cout << "raw2=" << raw_obj2 << std::endl; + + ASSERT_EQ(new_obj, new_obj2); + ASSERT_EQ(raw_obj, raw_obj2); + } + } + } +} diff --git a/src/test/rgw/test_rgw_period_history.cc b/src/test/rgw/test_rgw_period_history.cc new file mode 100644 index 000000000..25ea87d3a --- /dev/null +++ b/src/test/rgw/test_rgw_period_history.cc @@ -0,0 +1,336 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab +/* + * Ceph - scalable distributed file system + * + * Copyright (C) 2015 Red Hat + * + * This is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License version 2.1, as published by the Free Software + * Foundation. See file COPYING. + * + */ +#include "rgw_period_history.h" +#include "rgw_rados.h" +#include "rgw_zone.h" +#include "global/global_init.h" +#include "common/ceph_argparse.h" +#include <boost/lexical_cast.hpp> +#include <gtest/gtest.h> + +using namespace std; +namespace { + +// construct a period with the given fields +RGWPeriod make_period(const std::string& id, epoch_t realm_epoch, + const std::string& predecessor) +{ + RGWPeriod period(id); + period.set_realm_epoch(realm_epoch); + period.set_predecessor(predecessor); + return period; +} + +const auto current_period = make_period("5", 5, "4"); + +// mock puller that throws an exception if it's called +struct ErrorPuller : public RGWPeriodHistory::Puller { + int pull(const DoutPrefixProvider *dpp, const std::string& id, RGWPeriod& period, optional_yield) override { + throw std::runtime_error("unexpected call to pull"); + } +}; +ErrorPuller puller; // default puller + +// mock puller that records the period ids requested and returns an error +using Ids = std::vector<std::string>; +class RecordingPuller : public RGWPeriodHistory::Puller { + const int error; + public: + explicit RecordingPuller(int error) : error(error) {} + Ids ids; + int pull(const DoutPrefixProvider *dpp, const std::string& id, RGWPeriod& period, optional_yield) override { + ids.push_back(id); + return error; + } +}; + +// mock puller that returns a fake period by parsing the period id +struct NumericPuller : public RGWPeriodHistory::Puller { + int pull(const DoutPrefixProvider *dpp, const std::string& id, RGWPeriod& period, optional_yield) override { + // relies on numeric period ids to divine the realm_epoch + auto realm_epoch = boost::lexical_cast<epoch_t>(id); + auto predecessor = boost::lexical_cast<std::string>(realm_epoch-1); + period = make_period(id, realm_epoch, predecessor); + return 0; + } +}; + +} // anonymous namespace + +// for ASSERT_EQ() +bool operator==(const RGWPeriod& lhs, const RGWPeriod& rhs) +{ + return lhs.get_id() == rhs.get_id() + && lhs.get_realm_epoch() == rhs.get_realm_epoch(); +} + +TEST(PeriodHistory, InsertBefore) +{ + RGWPeriodHistory history(g_ceph_context, &puller, current_period); + + // inserting right before current_period 5 will attach to history + auto c = history.insert(make_period("4", 4, "3")); + ASSERT_TRUE(c); + ASSERT_FALSE(c.has_prev()); + ASSERT_TRUE(c.has_next()); + + // cursor can traverse forward to current_period + c.next(); + ASSERT_EQ(5u, c.get_epoch()); + ASSERT_EQ(current_period, c.get_period()); +} + +TEST(PeriodHistory, InsertAfter) +{ + RGWPeriodHistory history(g_ceph_context, &puller, current_period); + + // inserting right after current_period 5 will attach to history + auto c = history.insert(make_period("6", 6, "5")); + ASSERT_TRUE(c); + ASSERT_TRUE(c.has_prev()); + ASSERT_FALSE(c.has_next()); + + // cursor can traverse back to current_period + c.prev(); + ASSERT_EQ(5u, c.get_epoch()); + ASSERT_EQ(current_period, c.get_period()); +} + +TEST(PeriodHistory, InsertWayBefore) +{ + RGWPeriodHistory history(g_ceph_context, &puller, current_period); + + // inserting way before current_period 5 will not attach to history + auto c = history.insert(make_period("1", 1, "")); + ASSERT_FALSE(c); + ASSERT_EQ(0, c.get_error()); +} + +TEST(PeriodHistory, InsertWayAfter) +{ + RGWPeriodHistory history(g_ceph_context, &puller, current_period); + + // inserting way after current_period 5 will not attach to history + auto c = history.insert(make_period("9", 9, "8")); + ASSERT_FALSE(c); + ASSERT_EQ(0, c.get_error()); +} + +TEST(PeriodHistory, PullPredecessorsBeforeCurrent) +{ + RecordingPuller puller{-EFAULT}; + RGWPeriodHistory history(g_ceph_context, &puller, current_period); + const DoutPrefix dp(g_ceph_context, 1, "test rgw period history: "); + + // create a disjoint history at 1 and verify that periods are requested + // backwards from current_period + auto c1 = history.attach(&dp, make_period("1", 1, ""), null_yield); + ASSERT_FALSE(c1); + ASSERT_EQ(-EFAULT, c1.get_error()); + ASSERT_EQ(Ids{"4"}, puller.ids); + + auto c4 = history.insert(make_period("4", 4, "3")); + ASSERT_TRUE(c4); + + c1 = history.attach(&dp, make_period("1", 1, ""), null_yield); + ASSERT_FALSE(c1); + ASSERT_EQ(-EFAULT, c1.get_error()); + ASSERT_EQ(Ids({"4", "3"}), puller.ids); + + auto c3 = history.insert(make_period("3", 3, "2")); + ASSERT_TRUE(c3); + + c1 = history.attach(&dp, make_period("1", 1, ""), null_yield); + ASSERT_FALSE(c1); + ASSERT_EQ(-EFAULT, c1.get_error()); + ASSERT_EQ(Ids({"4", "3", "2"}), puller.ids); + + auto c2 = history.insert(make_period("2", 2, "1")); + ASSERT_TRUE(c2); + + c1 = history.attach(&dp, make_period("1", 1, ""), null_yield); + ASSERT_TRUE(c1); + ASSERT_EQ(Ids({"4", "3", "2"}), puller.ids); +} + +TEST(PeriodHistory, PullPredecessorsAfterCurrent) +{ + RecordingPuller puller{-EFAULT}; + RGWPeriodHistory history(g_ceph_context, &puller, current_period); + const DoutPrefix dp(g_ceph_context, 1, "test rgw period history: "); + + // create a disjoint history at 9 and verify that periods are requested + // backwards down to current_period + auto c9 = history.attach(&dp, make_period("9", 9, "8"), null_yield); + ASSERT_FALSE(c9); + ASSERT_EQ(-EFAULT, c9.get_error()); + ASSERT_EQ(Ids{"8"}, puller.ids); + + auto c8 = history.attach(&dp, make_period("8", 8, "7"), null_yield); + ASSERT_FALSE(c8); + ASSERT_EQ(-EFAULT, c8.get_error()); + ASSERT_EQ(Ids({"8", "7"}), puller.ids); + + auto c7 = history.attach(&dp, make_period("7", 7, "6"), null_yield); + ASSERT_FALSE(c7); + ASSERT_EQ(-EFAULT, c7.get_error()); + ASSERT_EQ(Ids({"8", "7", "6"}), puller.ids); + + auto c6 = history.attach(&dp, make_period("6", 6, "5"), null_yield); + ASSERT_TRUE(c6); + ASSERT_EQ(Ids({"8", "7", "6"}), puller.ids); +} + +TEST(PeriodHistory, MergeBeforeCurrent) +{ + RGWPeriodHistory history(g_ceph_context, &puller, current_period); + + auto c = history.get_current(); + ASSERT_FALSE(c.has_prev()); + + // create a disjoint history at 3 + auto c3 = history.insert(make_period("3", 3, "2")); + ASSERT_FALSE(c3); + + // insert the missing period to merge 3 and 5 + auto c4 = history.insert(make_period("4", 4, "3")); + ASSERT_TRUE(c4); + ASSERT_TRUE(c4.has_prev()); + ASSERT_TRUE(c4.has_next()); + + // verify that the merge didn't destroy the original cursor's history + ASSERT_EQ(current_period, c.get_period()); + ASSERT_TRUE(c.has_prev()); +} + +TEST(PeriodHistory, MergeAfterCurrent) +{ + RGWPeriodHistory history(g_ceph_context, &puller, current_period); + + auto c = history.get_current(); + ASSERT_FALSE(c.has_next()); + + // create a disjoint history at 7 + auto c7 = history.insert(make_period("7", 7, "6")); + ASSERT_FALSE(c7); + + // insert the missing period to merge 5 and 7 + auto c6 = history.insert(make_period("6", 6, "5")); + ASSERT_TRUE(c6); + ASSERT_TRUE(c6.has_prev()); + ASSERT_TRUE(c6.has_next()); + + // verify that the merge didn't destroy the original cursor's history + ASSERT_EQ(current_period, c.get_period()); + ASSERT_TRUE(c.has_next()); +} + +TEST(PeriodHistory, MergeWithoutCurrent) +{ + RGWPeriodHistory history(g_ceph_context, &puller, current_period); + + // create a disjoint history at 7 + auto c7 = history.insert(make_period("7", 7, "6")); + ASSERT_FALSE(c7); + + // create a disjoint history at 9 + auto c9 = history.insert(make_period("9", 9, "8")); + ASSERT_FALSE(c9); + + // insert the missing period to merge 7 and 9 + auto c8 = history.insert(make_period("8", 8, "7")); + ASSERT_FALSE(c8); // not connected to current_period yet + + // insert the missing period to merge 5 and 7-9 + auto c = history.insert(make_period("6", 6, "5")); + ASSERT_TRUE(c); + ASSERT_TRUE(c.has_next()); + + // verify that we merged all periods from 5-9 + c.next(); + ASSERT_EQ(7u, c.get_epoch()); + ASSERT_TRUE(c.has_next()); + c.next(); + ASSERT_EQ(8u, c.get_epoch()); + ASSERT_TRUE(c.has_next()); + c.next(); + ASSERT_EQ(9u, c.get_epoch()); + ASSERT_FALSE(c.has_next()); +} + +TEST(PeriodHistory, AttachBefore) +{ + NumericPuller puller; + RGWPeriodHistory history(g_ceph_context, &puller, current_period); + const DoutPrefix dp(g_ceph_context, 1, "test rgw period history: "); + + auto c1 = history.attach(&dp, make_period("1", 1, ""), null_yield); + ASSERT_TRUE(c1); + + // verify that we pulled and merged all periods from 1-5 + auto c = history.get_current(); + ASSERT_TRUE(c); + ASSERT_TRUE(c.has_prev()); + c.prev(); + ASSERT_EQ(4u, c.get_epoch()); + ASSERT_TRUE(c.has_prev()); + c.prev(); + ASSERT_EQ(3u, c.get_epoch()); + ASSERT_TRUE(c.has_prev()); + c.prev(); + ASSERT_EQ(2u, c.get_epoch()); + ASSERT_TRUE(c.has_prev()); + c.prev(); + ASSERT_EQ(1u, c.get_epoch()); + ASSERT_FALSE(c.has_prev()); +} + +TEST(PeriodHistory, AttachAfter) +{ + NumericPuller puller; + RGWPeriodHistory history(g_ceph_context, &puller, current_period); + const DoutPrefix dp(g_ceph_context, 1, "test rgw period history: "); + + auto c9 = history.attach(&dp, make_period("9", 9, "8"), null_yield); + ASSERT_TRUE(c9); + + // verify that we pulled and merged all periods from 5-9 + auto c = history.get_current(); + ASSERT_TRUE(c); + ASSERT_TRUE(c.has_next()); + c.next(); + ASSERT_EQ(6u, c.get_epoch()); + ASSERT_TRUE(c.has_next()); + c.next(); + ASSERT_EQ(7u, c.get_epoch()); + ASSERT_TRUE(c.has_next()); + c.next(); + ASSERT_EQ(8u, c.get_epoch()); + ASSERT_TRUE(c.has_next()); + c.next(); + ASSERT_EQ(9u, c.get_epoch()); + ASSERT_FALSE(c.has_next()); +} + +int main(int argc, char** argv) +{ + auto args = argv_to_vec(argc, argv); + auto cct = global_init(NULL, args, CEPH_ENTITY_TYPE_CLIENT, + CODE_ENVIRONMENT_UTILITY, + CINIT_FLAG_NO_DEFAULT_CONFIG_FILE); + common_init_finish(g_ceph_context); + + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/src/test/rgw/test_rgw_putobj.cc b/src/test/rgw/test_rgw_putobj.cc new file mode 100644 index 000000000..35abc3036 --- /dev/null +++ b/src/test/rgw/test_rgw_putobj.cc @@ -0,0 +1,196 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab +/* + * Ceph - scalable distributed file system + * + * Copyright (C) 2018 Red Hat, Inc. + * + * This is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License version 2.1, as published by the Free Software + * Foundation. See file COPYING. + * + */ + +#include "rgw_putobj.h" +#include <gtest/gtest.h> + +inline bufferlist string_buf(const char* buf) { + bufferlist bl; + bl.append(buffer::create_static(strlen(buf), (char*)buf)); + return bl; +} + +struct Op { + std::string data; + uint64_t offset; +}; +inline bool operator==(const Op& lhs, const Op& rhs) { + return lhs.data == rhs.data && lhs.offset == rhs.offset; +} +inline std::ostream& operator<<(std::ostream& out, const Op& op) { + return out << "{off=" << op.offset << " data='" << op.data << "'}"; +} + +struct MockProcessor : rgw::sal::DataProcessor { + std::vector<Op> ops; + + int process(bufferlist&& data, uint64_t offset) override { + ops.push_back({data.to_str(), offset}); + return {}; + } +}; + +TEST(PutObj_Chunk, FlushHalf) +{ + MockProcessor mock; + rgw::putobj::ChunkProcessor chunk(&mock, 4); + + ASSERT_EQ(0, chunk.process(string_buf("22"), 0)); + ASSERT_TRUE(mock.ops.empty()); // no writes + + ASSERT_EQ(0, chunk.process({}, 2)); // flush + ASSERT_EQ(2u, mock.ops.size()); + EXPECT_EQ(Op({"22", 0}), mock.ops[0]); + EXPECT_EQ(Op({"", 2}), mock.ops[1]); +} + +TEST(PutObj_Chunk, One) +{ + MockProcessor mock; + rgw::putobj::ChunkProcessor chunk(&mock, 4); + + ASSERT_EQ(0, chunk.process(string_buf("4444"), 0)); + ASSERT_EQ(1u, mock.ops.size()); + EXPECT_EQ(Op({"4444", 0}), mock.ops[0]); + + ASSERT_EQ(0, chunk.process({}, 4)); // flush + ASSERT_EQ(2u, mock.ops.size()); + EXPECT_EQ(Op({"", 4}), mock.ops[1]); +} + +TEST(PutObj_Chunk, OneAndFlushHalf) +{ + MockProcessor mock; + rgw::putobj::ChunkProcessor chunk(&mock, 4); + + ASSERT_EQ(0, chunk.process(string_buf("22"), 0)); + ASSERT_TRUE(mock.ops.empty()); + + ASSERT_EQ(0, chunk.process(string_buf("4444"), 2)); + ASSERT_EQ(1u, mock.ops.size()); + EXPECT_EQ(Op({"2244", 0}), mock.ops[0]); + + ASSERT_EQ(0, chunk.process({}, 6)); // flush + ASSERT_EQ(3u, mock.ops.size()); + EXPECT_EQ(Op({"44", 4}), mock.ops[1]); + EXPECT_EQ(Op({"", 6}), mock.ops[2]); +} + +TEST(PutObj_Chunk, Two) +{ + MockProcessor mock; + rgw::putobj::ChunkProcessor chunk(&mock, 4); + + ASSERT_EQ(0, chunk.process(string_buf("88888888"), 0)); + ASSERT_EQ(2u, mock.ops.size()); + EXPECT_EQ(Op({"8888", 0}), mock.ops[0]); + EXPECT_EQ(Op({"8888", 4}), mock.ops[1]); + + ASSERT_EQ(0, chunk.process({}, 8)); // flush + ASSERT_EQ(3u, mock.ops.size()); + EXPECT_EQ(Op({"", 8}), mock.ops[2]); +} + +TEST(PutObj_Chunk, TwoAndFlushHalf) +{ + MockProcessor mock; + rgw::putobj::ChunkProcessor chunk(&mock, 4); + + ASSERT_EQ(0, chunk.process(string_buf("22"), 0)); + ASSERT_TRUE(mock.ops.empty()); + + ASSERT_EQ(0, chunk.process(string_buf("88888888"), 2)); + ASSERT_EQ(2u, mock.ops.size()); + EXPECT_EQ(Op({"2288", 0}), mock.ops[0]); + EXPECT_EQ(Op({"8888", 4}), mock.ops[1]); + + ASSERT_EQ(0, chunk.process({}, 10)); // flush + ASSERT_EQ(4u, mock.ops.size()); + EXPECT_EQ(Op({"88", 8}), mock.ops[2]); + EXPECT_EQ(Op({"", 10}), mock.ops[3]); +} + + +using StripeMap = std::map<uint64_t, uint64_t>; // offset, stripe_size + +class StripeMapGen : public rgw::putobj::StripeGenerator { + const StripeMap& stripes; + public: + StripeMapGen(const StripeMap& stripes) : stripes(stripes) {} + + int next(uint64_t offset, uint64_t *stripe_size) override { + auto i = stripes.find(offset); + if (i == stripes.end()) { + return -ENOENT; + } + *stripe_size = i->second; + return 0; + } +}; + +TEST(PutObj_Stripe, DifferentStripeSize) +{ + MockProcessor mock; + StripeMap stripes{ + { 0, 4}, + { 4, 6}, + {10, 2} + }; + StripeMapGen gen(stripes); + rgw::putobj::StripeProcessor processor(&mock, &gen, stripes.begin()->second); + + ASSERT_EQ(0, processor.process(string_buf("22"), 0)); + ASSERT_EQ(1u, mock.ops.size()); + EXPECT_EQ(Op({"22", 0}), mock.ops[0]); + + ASSERT_EQ(0, processor.process(string_buf("4444"), 2)); + ASSERT_EQ(4u, mock.ops.size()); + EXPECT_EQ(Op({"44", 2}), mock.ops[1]); + EXPECT_EQ(Op({"", 4}), mock.ops[2]); // flush + EXPECT_EQ(Op({"44", 0}), mock.ops[3]); + + ASSERT_EQ(0, processor.process(string_buf("666666"), 6)); + ASSERT_EQ(7u, mock.ops.size()); + EXPECT_EQ(Op({"6666", 2}), mock.ops[4]); + EXPECT_EQ(Op({"", 6}), mock.ops[5]); // flush + EXPECT_EQ(Op({"66", 0}), mock.ops[6]); + + ASSERT_EQ(0, processor.process({}, 12)); + ASSERT_EQ(8u, mock.ops.size()); + EXPECT_EQ(Op({"", 2}), mock.ops[7]); // flush + + // gen returns an error past this + ASSERT_EQ(-ENOENT, processor.process(string_buf("1"), 12)); +} + +TEST(PutObj_Stripe, SkipFirstChunk) +{ + MockProcessor mock; + StripeMap stripes{ + {0, 4}, + {4, 4}, + }; + StripeMapGen gen(stripes); + rgw::putobj::StripeProcessor processor(&mock, &gen, stripes.begin()->second); + + ASSERT_EQ(0, processor.process(string_buf("666666"), 2)); + ASSERT_EQ(3u, mock.ops.size()); + EXPECT_EQ(Op({"66", 2}), mock.ops[0]); + EXPECT_EQ(Op({"", 4}), mock.ops[1]); // flush + EXPECT_EQ(Op({"6666", 0}), mock.ops[2]); + + ASSERT_EQ(0, processor.process({}, 8)); + ASSERT_EQ(4u, mock.ops.size()); + EXPECT_EQ(Op({"", 4}), mock.ops[3]); // flush +} diff --git a/src/test/rgw/test_rgw_ratelimit.cc b/src/test/rgw/test_rgw_ratelimit.cc new file mode 100644 index 000000000..01be4df48 --- /dev/null +++ b/src/test/rgw/test_rgw_ratelimit.cc @@ -0,0 +1,376 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab ft=cpp + +#include <gtest/gtest.h> +#include "rgw_ratelimit.h" + + +using namespace std::chrono_literals; + +TEST(RGWRateLimit, op_limit_not_enabled) +{ + // info.enabled = false, so no limit + std::atomic_bool replacing; + std::condition_variable cv; + RateLimiter ratelimit(replacing, cv); + RGWRateLimitInfo info; + auto time = ceph::coarse_real_clock::now(); + std::string key = "uuser123"; + bool success = ratelimit.should_rate_limit("PUT", key, time, &info); + EXPECT_EQ(false, success); +} +TEST(RGWRateLimit, reject_op_over_limit) +{ + // check that request is being rejected because there are not enough tokens + std::atomic_bool replacing; + std::condition_variable cv; + RateLimiter ratelimit(replacing, cv); + RGWRateLimitInfo info; + info.enabled = true; + info.max_read_ops = 1; + auto time = ceph::coarse_real_clock::now(); + std::string key = "uuser123"; + bool success = ratelimit.should_rate_limit("GET", key, time, &info); + time = ceph::coarse_real_clock::now(); + success = ratelimit.should_rate_limit("GET", key, time, &info); + EXPECT_EQ(true, success); +} +TEST(RGWRateLimit, accept_op_after_giveback) +{ + // check that giveback is working fine + std::atomic_bool replacing; + std::condition_variable cv; + RateLimiter ratelimit(replacing, cv); + RGWRateLimitInfo info; + info.enabled = true; + info.max_read_ops = 1; + auto time = ceph::coarse_real_clock::now(); + std::string key = "uuser123"; + bool success = ratelimit.should_rate_limit("GET", key, time, &info); + ratelimit.giveback_tokens("GET", key); + time = ceph::coarse_real_clock::now(); + success = ratelimit.should_rate_limit("GET", key, time, &info); + EXPECT_EQ(false, success); +} +TEST(RGWRateLimit, accept_op_after_refill) +{ + // check that tokens are being filled properly + std::atomic_bool replacing; + std::condition_variable cv; + RateLimiter ratelimit(replacing, cv); + RGWRateLimitInfo info; + info.enabled = true; + info.max_read_ops = 1; + auto time = ceph::coarse_real_clock::now(); + std::string key = "uuser123"; + bool success = ratelimit.should_rate_limit("GET", key, time, &info); + time += 61s; + success = ratelimit.should_rate_limit("GET", key, time, &info); + EXPECT_EQ(false, success); +} +TEST(RGWRateLimit, reject_bw_over_limit) +{ + // check that a newer request is rejected if there is no enough tokens (bw) + std::atomic_bool replacing; + std::condition_variable cv; + RateLimiter ratelimit(replacing, cv); + RGWRateLimitInfo info; + info.enabled = true; + info.max_read_bytes = 1; + auto time = ceph::coarse_real_clock::now(); + std::string key = "uuser123"; + bool success = ratelimit.should_rate_limit("GET", key, time, &info); + ratelimit.decrease_bytes("GET",key, 2, &info); + time = ceph::coarse_real_clock::now(); + success = ratelimit.should_rate_limit("GET", key, time, &info); + EXPECT_EQ(true, success); +} +TEST(RGWRateLimit, accept_bw) +{ + // check that when there are enough tokens (bw) the request is still being served + std::atomic_bool replacing; + std::condition_variable cv; + RateLimiter ratelimit(replacing, cv); + RGWRateLimitInfo info; + info.enabled = true; + info.max_read_bytes = 2; + auto time = ceph::coarse_real_clock::now(); + std::string key = "uuser123"; + bool success = ratelimit.should_rate_limit("GET", key, time, &info); + ratelimit.decrease_bytes("GET",key, 1, &info); + time = ceph::coarse_real_clock::now(); + success = ratelimit.should_rate_limit("GET", key, time, &info); + EXPECT_EQ(false, success); +} +TEST(RGWRateLimit, check_bw_debt_at_max_120secs) +{ + // check that the bandwidth debt is not larger than 120 seconds + std::atomic_bool replacing; + std::condition_variable cv; + RateLimiter ratelimit(replacing, cv); + RGWRateLimitInfo info; + info.enabled = true; + info.max_read_bytes = 2; + auto time = ceph::coarse_real_clock::now(); + std::string key = "uuser123"; + bool success = ratelimit.should_rate_limit("GET", key, time, &info); + ratelimit.decrease_bytes("GET",key, 100, &info); + time += 121s; + success = ratelimit.should_rate_limit("GET", key, time, &info); + EXPECT_EQ(false, success); +} +TEST(RGWRateLimit, check_that_bw_limit_not_affect_ops) +{ + // check that high read bytes limit, does not affect ops limit + std::atomic_bool replacing; + std::condition_variable cv; + RateLimiter ratelimit(replacing, cv); + RGWRateLimitInfo info; + info.enabled = true; + info.max_read_ops = 1; + info.max_read_bytes = 100000000; + auto time = ceph::coarse_real_clock::now(); + std::string key = "uuser123"; + bool success = ratelimit.should_rate_limit("GET", key, time, &info); + ratelimit.decrease_bytes("GET",key, 10000, &info); + time = ceph::coarse_real_clock::now(); + success = ratelimit.should_rate_limit("GET", key, time, &info); + EXPECT_EQ(true, success); +} +TEST(RGWRateLimit, read_limit_does_not_affect_writes) +{ + // read limit does not affect writes + std::atomic_bool replacing; + std::condition_variable cv; + RateLimiter ratelimit(replacing, cv); + RGWRateLimitInfo info; + info.enabled = true; + info.max_read_ops = 1; + info.max_read_bytes = 100000000; + auto time = ceph::coarse_real_clock::now(); + std::string key = "uuser123"; + bool success = ratelimit.should_rate_limit("PUT", key, time, &info); + ratelimit.decrease_bytes("PUT",key, 10000, &info); + time = ceph::coarse_real_clock::now(); + success = ratelimit.should_rate_limit("PUT", key, time, &info); + EXPECT_EQ(false, success); +} +TEST(RGWRateLimit, write_limit_does_not_affect_reads) +{ + // write limit does not affect reads + std::atomic_bool replacing; + std::condition_variable cv; + RateLimiter ratelimit(replacing, cv); + RGWRateLimitInfo info; + info.enabled = true; + info.max_write_ops = 1; + info.max_write_bytes = 100000000; + auto time = ceph::coarse_real_clock::now(); + std::string key = "uuser123"; + bool success = ratelimit.should_rate_limit("GET", key, time, &info); + ratelimit.decrease_bytes("GET",key, 10000, &info); + time = ceph::coarse_real_clock::now(); + success = ratelimit.should_rate_limit("GET", key, time, &info); + EXPECT_EQ(false, success); +} + +TEST(RGWRateLimit, allow_unlimited_access) +{ + // 0 values in RGWRateLimitInfo should allow unlimited access + std::atomic_bool replacing; + std::condition_variable cv; + RateLimiter ratelimit(replacing, cv); + RGWRateLimitInfo info; + info.enabled = true; + auto time = ceph::coarse_real_clock::now(); + std::string key = "uuser123"; + bool success = ratelimit.should_rate_limit("GET", key, time, &info); + EXPECT_EQ(false, success); +} + +TEST(RGWRateLimitGC, NO_GC_AHEAD_OF_TIME) +{ + // Test if GC is not starting the replace before getting to map_size * 0.9 + // Please make sure to change those values when you change the map_size in the code + + std::shared_ptr<ActiveRateLimiter> ratelimit(new ActiveRateLimiter(g_ceph_context)); + ratelimit->start(); + auto active = ratelimit->get_active(); + RGWRateLimitInfo info; + auto time = ceph::coarse_real_clock::now(); + std::string key = "uuser123"; + active->should_rate_limit("GET", key, time, &info); + auto activegc = ratelimit->get_active(); + EXPECT_EQ(activegc, active); +} +TEST(RGWRateLimiterGC, GC_IS_WORKING) +{ + // Test if GC is replacing the active RateLimiter + // Please make sure to change those values when you change the map_size in the code + + std::shared_ptr<ActiveRateLimiter> ratelimit(new ActiveRateLimiter(g_ceph_context)); + ratelimit->start(); + auto active = ratelimit->get_active(); + RGWRateLimitInfo info; + info.enabled = true; + auto time = ceph::coarse_real_clock::now(); + std::string key = "-1"; + for(int i = 0; i < 2000000; i++) + { + active->should_rate_limit("GET", key, time, &info); + key = std::to_string(i); + } + auto activegc = ratelimit->get_active(); + EXPECT_NE(activegc, active); +} + + +TEST(RGWRateLimitEntry, op_limit_not_enabled) +{ + // info.enabled = false, so no limit + RateLimiterEntry entry; + RGWRateLimitInfo info; + auto time = ceph::coarse_real_clock::now().time_since_epoch(); + bool success = entry.should_rate_limit(false, &info, time); + EXPECT_EQ(false, success); +} +TEST(RGWRateLimitEntry, reject_op_over_limit) +{ + // check that request is being rejected because there are not enough tokens + + RGWRateLimitInfo info; + RateLimiterEntry entry; + info.enabled = true; + info.max_read_ops = 1; + auto time = ceph::coarse_real_clock::now().time_since_epoch(); + bool success = entry.should_rate_limit(true, &info, time); + time = ceph::coarse_real_clock::now().time_since_epoch(); + success = entry.should_rate_limit(true, &info, time); + EXPECT_EQ(true, success); +} +TEST(RGWRateLimitEntry, accept_op_after_giveback) +{ + // check that giveback is working fine + RGWRateLimitInfo info; + RateLimiterEntry entry; + info.enabled = true; + info.max_read_ops = 1; + auto time = ceph::coarse_real_clock::now().time_since_epoch(); + bool success = entry.should_rate_limit(true, &info, time); + entry.giveback_tokens(true); + time = ceph::coarse_real_clock::now().time_since_epoch(); + success = entry.should_rate_limit(true, &info, time); + EXPECT_EQ(false, success); +} +TEST(RGWRateLimitEntry, accept_op_after_refill) +{ + // check that tokens are being filled properly + RateLimiterEntry entry; + RGWRateLimitInfo info; + info.enabled = true; + info.max_read_ops = 1; + auto time = ceph::coarse_real_clock::now().time_since_epoch(); + bool success = entry.should_rate_limit(true, &info, time); + time += 61s; + success = entry.should_rate_limit(true, &info, time); + EXPECT_EQ(false, success); +} +TEST(RGWRateLimitEntry, reject_bw_over_limit) +{ + // check that a newer request is rejected if there is no enough tokens (bw) + RateLimiterEntry entry; + RGWRateLimitInfo info; + info.enabled = true; + info.max_read_bytes = 1; + auto time = ceph::coarse_real_clock::now().time_since_epoch(); + bool success = entry.should_rate_limit(true, &info, time); + entry.decrease_bytes(true, 2, &info); + time = ceph::coarse_real_clock::now().time_since_epoch(); + success = entry.should_rate_limit(true, &info, time); + EXPECT_EQ(true, success); +} +TEST(RGWRateLimitEntry, accept_bw) +{ + // check that when there are enough tokens (bw) the request is still being served + RateLimiterEntry entry; + RGWRateLimitInfo info; + info.enabled = true; + info.max_read_bytes = 2; + auto time = ceph::coarse_real_clock::now().time_since_epoch(); + bool success = entry.should_rate_limit(true, &info, time); + entry.decrease_bytes(true, 1, &info); + time = ceph::coarse_real_clock::now().time_since_epoch(); + success = entry.should_rate_limit(true, &info, time); + EXPECT_EQ(false, success); +} +TEST(RGWRateLimitEntry, check_bw_debt_at_max_120secs) +{ + // check that the bandwidth debt is not larger than 120 seconds + RateLimiterEntry entry; + RGWRateLimitInfo info; + info.enabled = true; + info.max_read_bytes = 2; + auto time = ceph::coarse_real_clock::now().time_since_epoch(); + bool success = entry.should_rate_limit(true, &info, time); + entry.decrease_bytes(true, 100, &info); + time += 121s; + success = entry.should_rate_limit(true, &info, time); + EXPECT_EQ(false, success); +} +TEST(RGWRateLimitEntry, check_that_bw_limit_not_affect_ops) +{ + // check that high read bytes limit, does not affect ops limit + RateLimiterEntry entry; + RGWRateLimitInfo info; + info.enabled = true; + info.max_read_ops = 1; + info.max_read_bytes = 100000000; + auto time = ceph::coarse_real_clock::now().time_since_epoch(); + bool success = entry.should_rate_limit(true, &info, time); + entry.decrease_bytes(true, 10000, &info); + time = ceph::coarse_real_clock::now().time_since_epoch(); + success = entry.should_rate_limit(true, &info, time); + EXPECT_EQ(true, success); +} +TEST(RGWRateLimitEntry, read_limit_does_not_affect_writes) +{ + // read limit does not affect writes + RateLimiterEntry entry; + RGWRateLimitInfo info; + info.enabled = true; + info.max_read_ops = 1; + info.max_read_bytes = 100000000; + auto time = ceph::coarse_real_clock::now().time_since_epoch(); + bool success = entry.should_rate_limit(false, &info, time); + entry.decrease_bytes(false, 10000, &info); + time = ceph::coarse_real_clock::now().time_since_epoch(); + success = entry.should_rate_limit(false, &info, time); + EXPECT_EQ(false, success); +} +TEST(RGWRateLimitEntry, write_limit_does_not_affect_reads) +{ + // write limit does not affect reads + RateLimiterEntry entry; + RGWRateLimitInfo info; + info.enabled = true; + info.max_write_ops = 1; + info.max_write_bytes = 100000000; + auto time = ceph::coarse_real_clock::now().time_since_epoch(); + std::string key = "uuser123"; + bool success = entry.should_rate_limit(true, &info, time); + entry.decrease_bytes(true, 10000, &info); + time = ceph::coarse_real_clock::now().time_since_epoch(); + success = entry.should_rate_limit(true, &info, time); + EXPECT_EQ(false, success); +} + +TEST(RGWRateLimitEntry, allow_unlimited_access) +{ + // 0 values in RGWRateLimitInfo should allow unlimited access (default value) + RateLimiterEntry entry; + RGWRateLimitInfo info; + info.enabled = true; + auto time = ceph::coarse_real_clock::now().time_since_epoch(); + bool success = entry.should_rate_limit(true, &info, time); + EXPECT_EQ(false, success); +} diff --git a/src/test/rgw/test_rgw_reshard.cc b/src/test/rgw/test_rgw_reshard.cc new file mode 100644 index 000000000..da41b967f --- /dev/null +++ b/src/test/rgw/test_rgw_reshard.cc @@ -0,0 +1,68 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab +/* + * Ceph - scalable distributed file system + * + * Copyright (C) 2019 Red Hat + * + * This is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License version 2.1, as published by the Free Software + * Foundation. See file COPYING. + * + */ + +#include "rgw_reshard.h" +#include <gtest/gtest.h> + + +TEST(TestRGWReshard, dynamic_reshard_shard_count) +{ + // assuming we have prime numbers up to 1999 + ASSERT_EQ(1999u, RGWBucketReshard::get_max_prime_shards()) << + "initial list has primes up to 1999"; + + ASSERT_EQ(1u, RGWBucketReshard::get_prime_shards_greater_or_equal(1)) << + "we allow for 1 shard even though it's not prime"; + ASSERT_EQ(809u, RGWBucketReshard::get_prime_shards_greater_or_equal(808)) << + "809 is prime"; + ASSERT_EQ(809u, RGWBucketReshard::get_prime_shards_greater_or_equal(809)) << + "809 is prime"; + ASSERT_EQ(811u, RGWBucketReshard::get_prime_shards_greater_or_equal(810)) << + "811 is prime"; + ASSERT_EQ(811u, RGWBucketReshard::get_prime_shards_greater_or_equal(811)) << + "811 is prime"; + ASSERT_EQ(821u, RGWBucketReshard::get_prime_shards_greater_or_equal(812)) << + "821 is prime"; + + ASSERT_EQ(1u, RGWBucketReshard::get_prime_shards_less_or_equal(1)) << + "we allow for 1 shard even though it's not prime"; + ASSERT_EQ(797u, RGWBucketReshard::get_prime_shards_less_or_equal(808)) << + "809 is prime"; + ASSERT_EQ(809u, RGWBucketReshard::get_prime_shards_less_or_equal(809)) << + "809 is prime"; + ASSERT_EQ(809u, RGWBucketReshard::get_prime_shards_less_or_equal(810)) << + "811 is prime"; + ASSERT_EQ(811u, RGWBucketReshard::get_prime_shards_less_or_equal(811)) << + "811 is prime"; + ASSERT_EQ(811u, RGWBucketReshard::get_prime_shards_less_or_equal(812)) << + "821 is prime"; + + // tests when max dynamic shards is equal to end of prime list + ASSERT_EQ(1999u, RGWBucketReshard::get_preferred_shards(1998, 1999)); + ASSERT_EQ(1999u, RGWBucketReshard::get_preferred_shards(1999, 1999)); + ASSERT_EQ(1999u, RGWBucketReshard::get_preferred_shards(2000, 1999)); + ASSERT_EQ(1999u, RGWBucketReshard::get_preferred_shards(2001, 1999)); + + // tests when max dynamic shards is above end of prime list + ASSERT_EQ(1999u, RGWBucketReshard::get_preferred_shards(1998, 3000)); + ASSERT_EQ(1999u, RGWBucketReshard::get_preferred_shards(1999, 3000)); + ASSERT_EQ(2000u, RGWBucketReshard::get_preferred_shards(2000, 3000)); + ASSERT_EQ(2001u, RGWBucketReshard::get_preferred_shards(2001, 3000)); + + // tests when max dynamic shards is below end of prime list + ASSERT_EQ(499u, RGWBucketReshard::get_preferred_shards(1998, 500)); + ASSERT_EQ(499u, RGWBucketReshard::get_preferred_shards(1999, 500)); + ASSERT_EQ(499u, RGWBucketReshard::get_preferred_shards(2000, 500)); + ASSERT_EQ(499u, RGWBucketReshard::get_preferred_shards(2001, 500)); +} diff --git a/src/test/rgw/test_rgw_reshard_wait.cc b/src/test/rgw/test_rgw_reshard_wait.cc new file mode 100644 index 000000000..06caae34a --- /dev/null +++ b/src/test/rgw/test_rgw_reshard_wait.cc @@ -0,0 +1,164 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab +/* + * Ceph - scalable distributed file system + * + * Copyright (C) 2018 Red Hat, Inc. + * + * This is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License version 2.1, as published by the Free Software + * Foundation. See file COPYING. + * + */ + +#include "rgw_reshard.h" +#include <spawn/spawn.hpp> + +#include <gtest/gtest.h> + +using namespace std::chrono_literals; +using Clock = RGWReshardWait::Clock; + +TEST(ReshardWait, wait_block) +{ + constexpr ceph::timespan wait_duration = 10ms; + RGWReshardWait waiter(wait_duration); + + const auto start = Clock::now(); + EXPECT_EQ(0, waiter.wait(null_yield)); + const ceph::timespan elapsed = Clock::now() - start; + + EXPECT_LE(wait_duration, elapsed); // waited at least 10ms + waiter.stop(); +} + +TEST(ReshardWait, stop_block) +{ + constexpr ceph::timespan short_duration = 10ms; + constexpr ceph::timespan long_duration = 10s; + + RGWReshardWait long_waiter(long_duration); + RGWReshardWait short_waiter(short_duration); + + const auto start = Clock::now(); + std::thread thread([&long_waiter] { + EXPECT_EQ(-ECANCELED, long_waiter.wait(null_yield)); + }); + + EXPECT_EQ(0, short_waiter.wait(null_yield)); + + long_waiter.stop(); // cancel long waiter + + thread.join(); + const ceph::timespan elapsed = Clock::now() - start; + + EXPECT_LE(short_duration, elapsed); // waited at least 10ms + EXPECT_GT(long_duration, elapsed); // waited less than 10s + short_waiter.stop(); +} + +TEST(ReshardWait, wait_yield) +{ + constexpr ceph::timespan wait_duration = 50ms; + RGWReshardWait waiter(wait_duration); + + boost::asio::io_context context; + spawn::spawn(context, [&] (yield_context yield) { + EXPECT_EQ(0, waiter.wait(optional_yield{context, yield})); + }); + + const auto start = Clock::now(); + EXPECT_EQ(1u, context.poll()); // spawn + EXPECT_FALSE(context.stopped()); + + EXPECT_EQ(1u, context.run_one()); // timeout + EXPECT_TRUE(context.stopped()); + const ceph::timespan elapsed = Clock::now() - start; + + EXPECT_LE(wait_duration, elapsed); // waited at least 10ms + waiter.stop(); +} + +TEST(ReshardWait, stop_yield) +{ + constexpr ceph::timespan short_duration = 50ms; + constexpr ceph::timespan long_duration = 10s; + + RGWReshardWait long_waiter(long_duration); + RGWReshardWait short_waiter(short_duration); + + boost::asio::io_context context; + spawn::spawn(context, + [&] (yield_context yield) { + EXPECT_EQ(-ECANCELED, long_waiter.wait(optional_yield{context, yield})); + }); + + const auto start = Clock::now(); + EXPECT_EQ(1u, context.poll()); // spawn + EXPECT_FALSE(context.stopped()); + + EXPECT_EQ(0, short_waiter.wait(null_yield)); + + long_waiter.stop(); // cancel long waiter + + EXPECT_EQ(1u, context.run_one_for(short_duration)); // timeout + EXPECT_TRUE(context.stopped()); + const ceph::timespan elapsed = Clock::now() - start; + + EXPECT_LE(short_duration, elapsed); // waited at least 10ms + EXPECT_GT(long_duration, elapsed); // waited less than 10s + short_waiter.stop(); +} + +TEST(ReshardWait, stop_multiple) +{ + constexpr ceph::timespan short_duration = 50ms; + constexpr ceph::timespan long_duration = 10s; + + RGWReshardWait long_waiter(long_duration); + RGWReshardWait short_waiter(short_duration); + + // spawn 4 threads + std::vector<std::thread> threads; + { + auto sync_waiter([&long_waiter] { + EXPECT_EQ(-ECANCELED, long_waiter.wait(null_yield)); + }); + threads.emplace_back(sync_waiter); + threads.emplace_back(sync_waiter); + threads.emplace_back(sync_waiter); + threads.emplace_back(sync_waiter); + } + // spawn 4 coroutines + boost::asio::io_context context; + { + auto async_waiter = [&] (yield_context yield) { + EXPECT_EQ(-ECANCELED, long_waiter.wait(optional_yield{context, yield})); + }; + spawn::spawn(context, async_waiter); + spawn::spawn(context, async_waiter); + spawn::spawn(context, async_waiter); + spawn::spawn(context, async_waiter); + } + + const auto start = Clock::now(); + EXPECT_EQ(4u, context.poll()); // spawn + EXPECT_FALSE(context.stopped()); + + EXPECT_EQ(0, short_waiter.wait(null_yield)); + + long_waiter.stop(); // cancel long waiter + + EXPECT_EQ(4u, context.run_for(short_duration)); // timeout + EXPECT_TRUE(context.stopped()); + + for (auto& thread : threads) { + thread.join(); + } + const ceph::timespan elapsed = Clock::now() - start; + + EXPECT_LE(short_duration, elapsed); // waited at least 10ms + EXPECT_GT(long_duration, elapsed); // waited less than 10s + short_waiter.stop(); +} diff --git a/src/test/rgw/test_rgw_string.cc b/src/test/rgw/test_rgw_string.cc new file mode 100644 index 000000000..90a0b00c8 --- /dev/null +++ b/src/test/rgw/test_rgw_string.cc @@ -0,0 +1,76 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab +/* + * Ceph - scalable distributed file system + * + * Copyright (C) 2017 Red Hat + * + * This is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License version 2.1, as published by the Free Software + * Foundation. See file COPYING. + * + */ + +#include "rgw_string.h" +#include <gtest/gtest.h> + +const std::string abc{"abc"}; +const char *def{"def"}; // const char* +char ghi_arr[] = {'g', 'h', 'i', '\0'}; +char *ghi{ghi_arr}; // char* +constexpr std::string_view jkl{"jkl", 3}; +#define mno "mno" // string literal (char[4]) +char pqr[] = {'p', 'q', 'r', '\0'}; + +TEST(string_size, types) +{ + ASSERT_EQ(3u, string_size(abc)); + ASSERT_EQ(3u, string_size(def)); + ASSERT_EQ(3u, string_size(ghi)); + ASSERT_EQ(3u, string_size(jkl)); + ASSERT_EQ(3u, string_size(mno)); + ASSERT_EQ(3u, string_size(pqr)); + + constexpr auto compile_time_string_view_size = string_size(jkl); + ASSERT_EQ(3u, compile_time_string_view_size); + constexpr auto compile_time_string_literal_size = string_size(mno); + ASSERT_EQ(3u, compile_time_string_literal_size); + + char arr[] = {'a', 'b', 'c'}; // not null-terminated + ASSERT_THROW(string_size(arr), std::invalid_argument); +} + +TEST(string_cat_reserve, types) +{ + ASSERT_EQ("abcdefghijklmnopqr", + string_cat_reserve(abc, def, ghi, jkl, mno, pqr)); +} + +TEST(string_cat_reserve, count) +{ + ASSERT_EQ("", string_cat_reserve()); + ASSERT_EQ("abc", string_cat_reserve(abc)); + ASSERT_EQ("abcdef", string_cat_reserve(abc, def)); +} + +TEST(string_join_reserve, types) +{ + ASSERT_EQ("abc, def, ghi, jkl, mno, pqr", + string_join_reserve(", ", abc, def, ghi, jkl, mno, pqr)); +} + +TEST(string_join_reserve, count) +{ + ASSERT_EQ("", string_join_reserve(", ")); + ASSERT_EQ("abc", string_join_reserve(", ", abc)); + ASSERT_EQ("abc, def", string_join_reserve(", ", abc, def)); +} + +TEST(string_join_reserve, delim) +{ + ASSERT_EQ("abcdef", string_join_reserve("", abc, def)); + ASSERT_EQ("abc def", string_join_reserve(' ', abc, def)); + ASSERT_EQ("abc\ndef", string_join_reserve('\n', abc, def)); + ASSERT_EQ("abcfoodef", string_join_reserve(std::string{"foo"}, abc, def)); +} diff --git a/src/test/rgw/test_rgw_throttle.cc b/src/test/rgw/test_rgw_throttle.cc new file mode 100644 index 000000000..d67f2c6ce --- /dev/null +++ b/src/test/rgw/test_rgw_throttle.cc @@ -0,0 +1,221 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab +/* + * Ceph - scalable distributed file system + * + * Copyright (C) 2018 Red Hat, Inc. + * + * This is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License version 2.1, as published by the Free Software + * Foundation. See file COPYING. + * + */ + +#include "rgw_aio_throttle.h" + +#include <optional> +#include <thread> +#include "include/scope_guard.h" + +#include <spawn/spawn.hpp> +#include <gtest/gtest.h> + +struct RadosEnv : public ::testing::Environment { + public: + static constexpr auto poolname = "ceph_test_rgw_throttle"; + + static std::optional<RGWSI_RADOS> rados; + + void SetUp() override { + rados.emplace(g_ceph_context); + const NoDoutPrefix no_dpp(g_ceph_context, 1); + ASSERT_EQ(0, rados->start(null_yield, &no_dpp)); + int r = rados->pool({poolname}).create(&no_dpp); + if (r == -EEXIST) + r = 0; + ASSERT_EQ(0, r); + } + void TearDown() override { + ASSERT_EQ(0, rados->get_rados_handle()->pool_delete(poolname)); + rados->shutdown(); + rados.reset(); + } +}; +std::optional<RGWSI_RADOS> RadosEnv::rados; + +auto *const rados_env = ::testing::AddGlobalTestEnvironment(new RadosEnv); + +// test fixture for global setup/teardown +class RadosFixture : public ::testing::Test { + protected: + RGWSI_RADOS::Obj make_obj(const std::string& oid) { + auto obj = RadosEnv::rados->obj({{RadosEnv::poolname}, oid}); + const NoDoutPrefix no_dpp(g_ceph_context, 1); + ceph_assert_always(0 == obj.open(&no_dpp)); + return obj; + } +}; + +using Aio_Throttle = RadosFixture; + +namespace rgw { + +struct scoped_completion { + Aio* aio = nullptr; + AioResult* result = nullptr; + ~scoped_completion() { if (aio) { complete(-ECANCELED); } } + void complete(int r) { + result->result = r; + aio->put(*result); + aio = nullptr; + } +}; + +auto wait_on(scoped_completion& c) { + return [&c] (Aio* aio, AioResult& r) { c.aio = aio; c.result = &r; }; +} + +auto wait_for(boost::asio::io_context& context, ceph::timespan duration) { + return [&context, duration] (Aio* aio, AioResult& r) { + using Clock = ceph::coarse_mono_clock; + using Timer = boost::asio::basic_waitable_timer<Clock>; + auto t = std::make_unique<Timer>(context); + t->expires_after(duration); + t->async_wait([aio, &r, t=std::move(t)] (boost::system::error_code ec) { + if (ec != boost::asio::error::operation_aborted) { + aio->put(r); + } + }); + }; +} + +TEST_F(Aio_Throttle, NoThrottleUpToMax) +{ + BlockingAioThrottle throttle(4); + auto obj = make_obj(__PRETTY_FUNCTION__); + { + scoped_completion op1; + auto c1 = throttle.get(obj, wait_on(op1), 1, 0); + EXPECT_TRUE(c1.empty()); + scoped_completion op2; + auto c2 = throttle.get(obj, wait_on(op2), 1, 0); + EXPECT_TRUE(c2.empty()); + scoped_completion op3; + auto c3 = throttle.get(obj, wait_on(op3), 1, 0); + EXPECT_TRUE(c3.empty()); + scoped_completion op4; + auto c4 = throttle.get(obj, wait_on(op4), 1, 0); + EXPECT_TRUE(c4.empty()); + // no completions because no ops had to wait + auto c5 = throttle.poll(); + EXPECT_TRUE(c5.empty()); + } + auto completions = throttle.drain(); + ASSERT_EQ(4u, completions.size()); + for (auto& c : completions) { + EXPECT_EQ(-ECANCELED, c.result); + } +} + +TEST_F(Aio_Throttle, CostOverWindow) +{ + BlockingAioThrottle throttle(4); + auto obj = make_obj(__PRETTY_FUNCTION__); + + scoped_completion op; + auto c = throttle.get(obj, wait_on(op), 8, 0); + ASSERT_EQ(1u, c.size()); + EXPECT_EQ(-EDEADLK, c.front().result); +} + +TEST_F(Aio_Throttle, ThrottleOverMax) +{ + constexpr uint64_t window = 4; + BlockingAioThrottle throttle(window); + + auto obj = make_obj(__PRETTY_FUNCTION__); + + // issue 32 writes, and verify that max_outstanding <= window + constexpr uint64_t total = 32; + uint64_t max_outstanding = 0; + uint64_t outstanding = 0; + + // timer thread + boost::asio::io_context context; + using Executor = boost::asio::io_context::executor_type; + using Work = boost::asio::executor_work_guard<Executor>; + std::optional<Work> work(context.get_executor()); + std::thread worker([&context] { context.run(); }); + auto g = make_scope_guard([&work, &worker] { + work.reset(); + worker.join(); + }); + + for (uint64_t i = 0; i < total; i++) { + using namespace std::chrono_literals; + auto c = throttle.get(obj, wait_for(context, 10ms), 1, 0); + outstanding++; + outstanding -= c.size(); + if (max_outstanding < outstanding) { + max_outstanding = outstanding; + } + } + auto c = throttle.drain(); + outstanding -= c.size(); + EXPECT_EQ(0u, outstanding); + EXPECT_EQ(window, max_outstanding); +} + +TEST_F(Aio_Throttle, YieldCostOverWindow) +{ + auto obj = make_obj(__PRETTY_FUNCTION__); + + boost::asio::io_context context; + spawn::spawn(context, + [&] (yield_context yield) { + YieldingAioThrottle throttle(4, context, yield); + scoped_completion op; + auto c = throttle.get(obj, wait_on(op), 8, 0); + ASSERT_EQ(1u, c.size()); + EXPECT_EQ(-EDEADLK, c.front().result); + }); + context.run(); +} + +TEST_F(Aio_Throttle, YieldingThrottleOverMax) +{ + constexpr uint64_t window = 4; + + auto obj = make_obj(__PRETTY_FUNCTION__); + + // issue 32 writes, and verify that max_outstanding <= window + constexpr uint64_t total = 32; + uint64_t max_outstanding = 0; + uint64_t outstanding = 0; + + boost::asio::io_context context; + spawn::spawn(context, + [&] (yield_context yield) { + YieldingAioThrottle throttle(window, context, yield); + for (uint64_t i = 0; i < total; i++) { + using namespace std::chrono_literals; + auto c = throttle.get(obj, wait_for(context, 10ms), 1, 0); + outstanding++; + outstanding -= c.size(); + if (max_outstanding < outstanding) { + max_outstanding = outstanding; + } + } + auto c = throttle.drain(); + outstanding -= c.size(); + }); + context.poll(); // run until we block + EXPECT_EQ(window, outstanding); + + context.run(); + EXPECT_EQ(0u, outstanding); + EXPECT_EQ(window, max_outstanding); +} + +} // namespace rgw diff --git a/src/test/rgw/test_rgw_url.cc b/src/test/rgw/test_rgw_url.cc new file mode 100644 index 000000000..92731dfad --- /dev/null +++ b/src/test/rgw/test_rgw_url.cc @@ -0,0 +1,111 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#include "rgw_url.h" +#include <string> +#include <gtest/gtest.h> + +using namespace rgw; + +TEST(TestURL, SimpleAuthority) +{ + std::string host; + std::string user; + std::string password; + const std::string url = "http://example.com"; + ASSERT_TRUE(parse_url_authority(url, host, user, password)); + ASSERT_TRUE(user.empty()); + ASSERT_TRUE(password.empty()); + EXPECT_STREQ(host.c_str(), "example.com"); +} + +TEST(TestURL, SimpleAuthority_1) +{ + std::string host; + std::string user; + std::string password; + const std::string url = "http://example.com/"; + ASSERT_TRUE(parse_url_authority(url, host, user, password)); + ASSERT_TRUE(user.empty()); + ASSERT_TRUE(password.empty()); + EXPECT_STREQ(host.c_str(), "example.com"); +} + +TEST(TestURL, IPAuthority) +{ + std::string host; + std::string user; + std::string password; + const std::string url = "http://1.2.3.4"; + ASSERT_TRUE(parse_url_authority(url, host, user, password)); + ASSERT_TRUE(user.empty()); + ASSERT_TRUE(password.empty()); + EXPECT_STREQ(host.c_str(), "1.2.3.4"); +} + +TEST(TestURL, IPv6Authority) +{ + std::string host; + std::string user; + std::string password; + const std::string url = "http://FE80:CD00:0000:0CDE:1257:0000:211E:729C"; + ASSERT_TRUE(parse_url_authority(url, host, user, password)); + ASSERT_TRUE(user.empty()); + ASSERT_TRUE(password.empty()); + EXPECT_STREQ(host.c_str(), "FE80:CD00:0000:0CDE:1257:0000:211E:729C"); +} + +TEST(TestURL, AuthorityWithUserinfo) +{ + std::string host; + std::string user; + std::string password; + const std::string url = "https://user:password@example.com"; + ASSERT_TRUE(parse_url_authority(url, host, user, password)); + EXPECT_STREQ(host.c_str(), "example.com"); + EXPECT_STREQ(user.c_str(), "user"); + EXPECT_STREQ(password.c_str(), "password"); +} + +TEST(TestURL, AuthorityWithPort) +{ + std::string host; + std::string user; + std::string password; + const std::string url = "http://user:password@example.com:1234"; + ASSERT_TRUE(parse_url_authority(url, host, user, password)); + EXPECT_STREQ(host.c_str(), "example.com:1234"); + EXPECT_STREQ(user.c_str(), "user"); + EXPECT_STREQ(password.c_str(), "password"); +} + +TEST(TestURL, DifferentSchema) +{ + std::string host; + std::string user; + std::string password; + const std::string url = "kafka://example.com"; + ASSERT_TRUE(parse_url_authority(url, host, user, password)); + ASSERT_TRUE(user.empty()); + ASSERT_TRUE(password.empty()); + EXPECT_STREQ(host.c_str(), "example.com"); +} + +TEST(TestURL, InvalidHost) +{ + std::string host; + std::string user; + std::string password; + const std::string url = "http://exa_mple.com"; + ASSERT_FALSE(parse_url_authority(url, host, user, password)); +} + +TEST(TestURL, WithPath) +{ + std::string host; + std::string user; + std::string password; + const std::string url = "amqps://www.example.com:1234/vhost_name"; + ASSERT_TRUE(parse_url_authority(url, host, user, password)); +} + diff --git a/src/test/rgw/test_rgw_xml.cc b/src/test/rgw/test_rgw_xml.cc new file mode 100644 index 000000000..fa9f21157 --- /dev/null +++ b/src/test/rgw/test_rgw_xml.cc @@ -0,0 +1,463 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab + +#include "rgw_xml.h" +#include <gtest/gtest.h> +#include <list> +#include <stdexcept> + +struct NameAndStatus { + // these are sub-tags + std::string name; + bool status; + + // intrusive XML decoding API + bool decode_xml(XMLObj *obj) { + if (!RGWXMLDecoder::decode_xml("Name", name, obj, true)) { + // name is mandatory + return false; + } + if (!RGWXMLDecoder::decode_xml("Status", status, obj, false)) { + // status is optional and defaults to True + status = true; + } + return true; + } +}; + +struct Item { + // these are sub-tags + NameAndStatus name_and_status; + int value; + int extra_value; + + // these are attributes + std::string date; + std::string comment; + + // intrusive XML decoding API + bool decode_xml(XMLObj *obj) { + if (!RGWXMLDecoder::decode_xml("NameAndStatus", name_and_status, obj, true)) { + // name amd status are mandatory + return false; + } + if (!RGWXMLDecoder::decode_xml("Value", value, obj, true)) { + // value is mandatory + return false; + } + if (!RGWXMLDecoder::decode_xml("ExtraValue", extra_value, obj, false)) { + // extra value is optional and defaults to zero + extra_value = 0; + } + + // date attribute is optional + if (!obj->get_attr("Date", date)) { + date = "no date"; + } + // comment attribute is optional + if (!obj->get_attr("Comment", comment)) { + comment = "no comment"; + } + + return true; + } +}; + +struct Items { + // these are sub-tags + std::list<Item> item_list; + + // intrusive XML decoding API + bool decode_xml(XMLObj *obj) { + do_decode_xml_obj(item_list, "Item", obj); + return true; + } +}; + +// in case of non-intrusive decoding class +// hierarchy should reflect the XML hierarchy + +class NameXMLObj: public XMLObj { +protected: + void xml_handle_data(const char *s, int len) override { + // no need to set "data", setting "name" directly + value.append(s, len); + } + +public: + std::string value; + ~NameXMLObj() override = default; +}; + +class StatusXMLObj: public XMLObj { +protected: + void xml_handle_data(const char *s, int len) override { + std::istringstream is(std::string(s, len)); + is >> std::boolalpha >> value; + } + +public: + bool value; + ~StatusXMLObj() override = default; +}; + +class NameAndStatusXMLObj: public NameAndStatus, public XMLObj { +public: + ~NameAndStatusXMLObj() override = default; + + bool xml_end(const char *el) override { + XMLObjIter iter = find("Name"); + NameXMLObj* _name = static_cast<NameXMLObj*>(iter.get_next()); + if (!_name) { + // name is mandatory + return false; + } + name = _name->value; + iter = find("Status"); + StatusXMLObj* _status = static_cast<StatusXMLObj*>(iter.get_next()); + if (!_status) { + // status is optional and defaults to True + status = true; + } else { + status = _status->value; + } + return true; + } +}; + +class ItemXMLObj: public Item, public XMLObj { +public: + ~ItemXMLObj() override = default; + + bool xml_end(const char *el) override { + XMLObjIter iter = find("NameAndStatus"); + NameAndStatusXMLObj* _name_and_status = static_cast<NameAndStatusXMLObj*>(iter.get_next()); + if (!_name_and_status) { + // name and status are mandatory + return false; + } + name_and_status = *static_cast<NameAndStatus*>(_name_and_status); + iter = find("Value"); + XMLObj* _value = iter.get_next(); + if (!_value) { + // value is mandatory + return false; + } + try { + value = std::stoi(_value->get_data()); + } catch (const std::exception& e) { + return false; + } + iter = find("ExtraValue"); + XMLObj* _extra_value = iter.get_next(); + if (_extra_value) { + // extra value is optional but cannot contain garbage + try { + extra_value = std::stoi(_extra_value->get_data()); + } catch (const std::exception& e) { + return false; + } + } else { + // if not set, it defaults to zero + extra_value = 0; + } + + // date attribute is optional + if (!get_attr("Date", date)) { + date = "no date"; + } + // comment attribute is optional + if (!get_attr("Comment", comment)) { + comment = "no comment"; + } + + return true; + } +}; + +class ItemsXMLObj: public Items, public XMLObj { +public: + ~ItemsXMLObj() override = default; + + bool xml_end(const char *el) override { + XMLObjIter iter = find("Item"); + ItemXMLObj* item_ptr = static_cast<ItemXMLObj*>(iter.get_next()); + // mandatory to have at least one item + bool item_found = false; + while (item_ptr) { + item_list.push_back(*static_cast<Item*>(item_ptr)); + item_ptr = static_cast<ItemXMLObj*>(iter.get_next()); + item_found = true; + } + return item_found; + } +}; + +class ItemsXMLParser: public RGWXMLParser { + static const int MAX_NAME_LEN = 16; +public: + XMLObj *alloc_obj(const char *el) override { + if (strncmp(el, "Items", MAX_NAME_LEN) == 0) { + items = new ItemsXMLObj; + return items; + } else if (strncmp(el, "Item", MAX_NAME_LEN) == 0) { + return new ItemXMLObj; + } else if (strncmp(el, "NameAndStatus", MAX_NAME_LEN) == 0) { + return new NameAndStatusXMLObj; + } else if (strncmp(el, "Name", MAX_NAME_LEN) == 0) { + return new NameXMLObj; + } else if (strncmp(el, "Status", MAX_NAME_LEN) == 0) { + return new StatusXMLObj; + } + return nullptr; + } + // this is a pointer to the parsed results + ItemsXMLObj* items; +}; + +static const char* good_input = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>" + "<Items>" + "<Item><NameAndStatus><Name>hello</Name></NameAndStatus><Value>1</Value></Item>" + "<Item><ExtraValue>99</ExtraValue><NameAndStatus><Name>world</Name></NameAndStatus><Value>2</Value></Item>" + "<Item><Value>3</Value><NameAndStatus><Name>foo</Name></NameAndStatus></Item>" + "<Item><Value>4</Value><ExtraValue>42</ExtraValue><NameAndStatus><Name>bar</Name><Status>False</Status></NameAndStatus></Item>" + "</Items>"; + +static const char* expected_output = "((hello,1),1,0),((world,1),2,99),((foo,1),3,0),((bar,0),4,42),"; + +std::string to_string(const Items& items) { + std::stringstream ss; + for (const auto& item : items.item_list) { + ss << "((" << item.name_and_status.name << "," << item.name_and_status.status << ")," << item.value << "," << item.extra_value << ")" << ","; + } + return ss.str(); +} + +std::string to_string_with_attributes(const Items& items) { + std::stringstream ss; + for (const auto& item : items.item_list) { + ss << "(" << item.date << "," << item.comment << ",(" << item.name_and_status.name << "," << item.name_and_status.status << ")," + << item.value << "," << item.extra_value << ")" << ","; + } + return ss.str(); +} + +TEST(TestParser, BasicParsing) +{ + ItemsXMLParser parser; + ASSERT_TRUE(parser.init()); + ASSERT_TRUE(parser.parse(good_input, strlen(good_input), 1)); + ASSERT_EQ(parser.items->item_list.size(), 4U); + ASSERT_STREQ(to_string(*parser.items).c_str(), expected_output); +} + +static const char* malformed_input = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>" + "<Items>" + "<Item><NameAndStatus><Name>hello</Name></NameAndStatus><Value>1</Value><Item>" + "<Item><ExtraValue>99</ExtraValue><NameAndStatus><Name>world</Name></NameAndStatus><Value>2</Value></Item>" + "<Item><Value>3</Value><NameAndStatus><Name>foo</Name></NameAndStatus></Item>" + "<Item><Value>4</Value><ExtraValue>42</ExtraValue><NameAndStatus><Name>bar</Name><Status>False</Status></NameAndStatus></Item>" + "</Items>"; + +TEST(TestParser, MalformedInput) +{ + ItemsXMLParser parser; + ASSERT_TRUE(parser.init()); + ASSERT_FALSE(parser.parse(good_input, strlen(malformed_input), 1)); +} + +static const char* missing_value_input = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>" + "<Items>" + "<Item><NameAndStatus><Name>hello</Name></NameAndStatus><Value>1</Value></Item>" + "<Item><ExtraValue>99</ExtraValue><NameAndStatus><Name>world</Name></NameAndStatus><Value>2</Value></Item>" + "<Item><Value>3</Value><NameAndStatus><Name>foo</Name></NameAndStatus></Item>" + "<Item><ExtraValue>42</ExtraValue><NameAndStatus><Name>bar</Name><Status>False</Status></NameAndStatus></Item>" + "</Items>"; + +TEST(TestParser, MissingMandatoryTag) +{ + ItemsXMLParser parser; + ASSERT_TRUE(parser.init()); + ASSERT_FALSE(parser.parse(missing_value_input, strlen(missing_value_input), 1)); +} + +static const char* unknown_tag_input = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>" + "<Items>" + "<Item><NameAndStatus><Name>hello</Name></NameAndStatus><Value>1</Value></Item>" + "<Item><ExtraValue>99</ExtraValue><NameAndStatus><Name>world</Name></NameAndStatus><Value>2</Value></Item>" + "<Item><Value>3</Value><NameAndStatus><Name>foo</Name></NameAndStatus><Kaboom>0</Kaboom></Item>" + "<Item><Value>4</Value><ExtraValue>42</ExtraValue><NameAndStatus><Name>bar</Name><Status>False</Status></NameAndStatus></Item>" + "<Kaboom>0</Kaboom>" + "</Items>"; + +TEST(TestParser, UnknownTag) +{ + ItemsXMLParser parser; + ASSERT_TRUE(parser.init()); + ASSERT_TRUE(parser.parse(unknown_tag_input, strlen(unknown_tag_input), 1)); + ASSERT_EQ(parser.items->item_list.size(), 4U); + ASSERT_STREQ(to_string(*parser.items).c_str(), expected_output); +} + +static const char* invalid_value_input = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>" + "<Items>" + "<Item><NameAndStatus><Name>hello</Name></NameAndStatus><Value>1</Value></Item>" + "<Item><ExtraValue>kaboom</ExtraValue><NameAndStatus><Name>world</Name></NameAndStatus><Value>2</Value></Item>" + "<Item><Value>3</Value><NameAndStatus><Name>foo</Name></NameAndStatus></Item>" + "<Item><Value>4</Value><ExtraValue>42</ExtraValue><NameAndStatus><Name>bar</Name><Status>False</Status></NameAndStatus></Item>" + "</Items>"; + +TEST(TestParser, InvalidValue) +{ + ItemsXMLParser parser; + ASSERT_TRUE(parser.init()); + ASSERT_FALSE(parser.parse(invalid_value_input, strlen(invalid_value_input), 1)); +} + +static const char* good_input1 = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>" + "<Items>" + "<Item><NameAndStatus><Name>hello</Name></NameAndStatus><Value>1</Value></Item>" + "<Item><ExtraValue>99</ExtraValue><NameAndStatus><Name>world</Name>"; + +static const char* good_input2 = "</NameAndStatus><Value>2</Value></Item>" + "<Item><Value>3</Value><NameAndStatus><Name>foo</Name></NameAndStatus></Item>" + "<Item><Value>4</Value><ExtraValue>42</ExtraValue><NameAndStatus><Name>bar</Name><Status>False</Status></NameAndStatus></Item>" + "</Items>"; + +TEST(TestParser, MultipleChunks) +{ + ItemsXMLParser parser; + ASSERT_TRUE(parser.init()); + ASSERT_TRUE(parser.parse(good_input1, strlen(good_input1), 0)); + ASSERT_TRUE(parser.parse(good_input2, strlen(good_input2), 1)); + ASSERT_EQ(parser.items->item_list.size(), 4U); + ASSERT_STREQ(to_string(*parser.items).c_str(), expected_output); +} + +static const char* input_with_attributes = "<?xml version=\"1.0\" encoding=\"UTF-8\"?>" + "<Items>" + "<Item Date=\"Tue Dec 27 17:21:29 2011\" Kaboom=\"just ignore\">" + "<NameAndStatus><Name>hello</Name></NameAndStatus><Value>1</Value>" + "</Item>" + "<Item Comment=\"hello world\">" + "<ExtraValue>99</ExtraValue><NameAndStatus><Name>world</Name></NameAndStatus><Value>2</Value>" + "</Item>" + "<Item><Value>3</Value><NameAndStatus><Name>foo</Name></NameAndStatus></Item>" + "<Item Comment=\"goodbye\" Date=\"Thu Feb 28 10:00:18 UTC 2019 \">" + "<Value>4</Value><ExtraValue>42</ExtraValue><NameAndStatus><Name>bar</Name><Status>False</Status></NameAndStatus>" + "</Item>" + "</Items>"; + +static const char* expected_output_with_attributes = "(Tue Dec 27 17:21:29 2011,no comment,(hello,1),1,0)," + "(no date,hello world,(world,1),2,99)," + "(no date,no comment,(foo,1),3,0)," + "(Thu Feb 28 10:00:18 UTC 2019 ,goodbye,(bar,0),4,42),"; + +TEST(TestParser, Attributes) +{ + ItemsXMLParser parser; + ASSERT_TRUE(parser.init()); + ASSERT_TRUE(parser.parse(input_with_attributes, strlen(input_with_attributes), 1)); + ASSERT_EQ(parser.items->item_list.size(), 4U); + ASSERT_STREQ(to_string_with_attributes(*parser.items).c_str(), + expected_output_with_attributes); +} + +TEST(TestDecoder, BasicParsing) +{ + RGWXMLDecoder::XMLParser parser; + ASSERT_TRUE(parser.init()); + ASSERT_TRUE(parser.parse(good_input, strlen(good_input), 1)); + Items result; + ASSERT_NO_THROW({ + ASSERT_TRUE(RGWXMLDecoder::decode_xml("Items", result, &parser, true)); + }); + ASSERT_EQ(result.item_list.size(), 4U); + ASSERT_STREQ(to_string(result).c_str(), expected_output); +} + +TEST(TestDecoder, MalfomedInput) +{ + RGWXMLDecoder::XMLParser parser; + ASSERT_TRUE(parser.init()); + ASSERT_FALSE(parser.parse(good_input, strlen(malformed_input), 1)); +} + +TEST(TestDecoder, MissingMandatoryTag) +{ + RGWXMLDecoder::XMLParser parser; + ASSERT_TRUE(parser.init()); + ASSERT_TRUE(parser.parse(missing_value_input, strlen(missing_value_input), 1)); + Items result; + ASSERT_ANY_THROW({ + ASSERT_TRUE(RGWXMLDecoder::decode_xml("Items", result, &parser, true)); + }); +} + +TEST(TestDecoder, InvalidValue) +{ + RGWXMLDecoder::XMLParser parser; + ASSERT_TRUE(parser.init()); + ASSERT_TRUE(parser.parse(invalid_value_input, strlen(invalid_value_input), 1)); + Items result; + ASSERT_ANY_THROW({ + ASSERT_TRUE(RGWXMLDecoder::decode_xml("Items", result, &parser, true)); + }); +} + +TEST(TestDecoder, MultipleChunks) +{ + RGWXMLDecoder::XMLParser parser; + ASSERT_TRUE(parser.init()); + ASSERT_TRUE(parser.parse(good_input1, strlen(good_input1), 0)); + ASSERT_TRUE(parser.parse(good_input2, strlen(good_input2), 1)); + Items result; + ASSERT_NO_THROW({ + ASSERT_TRUE(RGWXMLDecoder::decode_xml("Items", result, &parser, true)); + }); + ASSERT_EQ(result.item_list.size(), 4U); + ASSERT_STREQ(to_string(result).c_str(), expected_output); +} + +TEST(TestDecoder, Attributes) +{ + RGWXMLDecoder::XMLParser parser; + ASSERT_TRUE(parser.init()); + ASSERT_TRUE(parser.parse(input_with_attributes, strlen(input_with_attributes), 1)); + Items result; + ASSERT_NO_THROW({ + ASSERT_TRUE(RGWXMLDecoder::decode_xml("Items", result, &parser, true)); + }); + ASSERT_EQ(result.item_list.size(), 4U); + ASSERT_STREQ(to_string_with_attributes(result).c_str(), + expected_output_with_attributes); +} + +static const char* expected_xml_output = "<Items xmlns=\"https://www.ceph.com/doc/\">" + "<Item Order=\"0\"><NameAndStatus><Name>hello</Name><Status>True</Status></NameAndStatus><Value>0</Value></Item>" + "<Item Order=\"1\"><NameAndStatus><Name>hello</Name><Status>False</Status></NameAndStatus><Value>1</Value></Item>" + "<Item Order=\"2\"><NameAndStatus><Name>hello</Name><Status>True</Status></NameAndStatus><Value>2</Value></Item>" + "<Item Order=\"3\"><NameAndStatus><Name>hello</Name><Status>False</Status></NameAndStatus><Value>3</Value></Item>" + "<Item Order=\"4\"><NameAndStatus><Name>hello</Name><Status>True</Status></NameAndStatus><Value>4</Value></Item>" + "</Items>"; +TEST(TestEncoder, ListWithAttrsAndNS) +{ + XMLFormatter f; + const auto array_size = 5; + f.open_array_section_in_ns("Items", "https://www.ceph.com/doc/"); + for (auto i = 0; i < array_size; ++i) { + FormatterAttrs item_attrs("Order", std::to_string(i).c_str(), NULL); + f.open_object_section_with_attrs("Item", item_attrs); + f.open_object_section("NameAndStatus"); + encode_xml("Name", "hello", &f); + encode_xml("Status", (i%2 == 0), &f); + f.close_section(); + encode_xml("Value", i, &f); + f.close_section(); + } + f.close_section(); + std::stringstream ss; + f.flush(ss); + ASSERT_STREQ(ss.str().c_str(), expected_xml_output); +} + |