summaryrefslogtreecommitdiffstats
path: root/reftable/stack_test.c
diff options
context:
space:
mode:
Diffstat (limited to 'reftable/stack_test.c')
-rw-r--r--reftable/stack_test.c293
1 files changed, 213 insertions, 80 deletions
diff --git a/reftable/stack_test.c b/reftable/stack_test.c
index d0b7175..1df3ffc 100644
--- a/reftable/stack_test.c
+++ b/reftable/stack_test.c
@@ -13,7 +13,6 @@ https://developers.google.com/open-source/licenses/bsd
#include "reftable-reader.h"
#include "merged.h"
#include "basics.h"
-#include "constants.h"
#include "record.h"
#include "test_framework.h"
#include "reftable-tests.h"
@@ -39,7 +38,17 @@ static int count_dir_entries(const char *dirname)
return 0;
while ((d = readdir(dir))) {
- if (!strcmp(d->d_name, "..") || !strcmp(d->d_name, "."))
+ /*
+ * Besides skipping over "." and "..", we also need to
+ * skip over other files that have a leading ".". This
+ * is due to behaviour of NFS, which will rename files
+ * to ".nfs*" to emulate delete-on-last-close.
+ *
+ * In any case this should be fine as the reftable
+ * library will never write files with leading dots
+ * anyway.
+ */
+ if (starts_with(d->d_name, "."))
continue;
len++;
}
@@ -78,7 +87,7 @@ static void test_read_file(void)
int i = 0;
EXPECT(fd > 0);
- n = write(fd, out, strlen(out));
+ n = write_in_full(fd, out, strlen(out));
EXPECT(n == strlen(out));
err = close(fd);
EXPECT(err >= 0);
@@ -233,7 +242,7 @@ static void test_reftable_stack_uptodate(void)
EXPECT_ERR(err);
err = reftable_stack_add(st2, &write_test_ref, &ref2);
- EXPECT(err == REFTABLE_LOCK_ERROR);
+ EXPECT(err == REFTABLE_OUTDATED_ERROR);
err = reftable_stack_reload(st2);
EXPECT_ERR(err);
@@ -289,6 +298,104 @@ static void test_reftable_stack_transaction_api(void)
clear_dir(dir);
}
+static void test_reftable_stack_transaction_api_performs_auto_compaction(void)
+{
+ char *dir = get_tmp_dir(__LINE__);
+ struct reftable_write_options cfg = {0};
+ struct reftable_addition *add = NULL;
+ struct reftable_stack *st = NULL;
+ int i, n = 20, err;
+
+ err = reftable_new_stack(&st, dir, cfg);
+ EXPECT_ERR(err);
+
+ for (i = 0; i <= n; i++) {
+ struct reftable_ref_record ref = {
+ .update_index = reftable_stack_next_update_index(st),
+ .value_type = REFTABLE_REF_SYMREF,
+ .value.symref = "master",
+ };
+ char name[100];
+
+ snprintf(name, sizeof(name), "branch%04d", i);
+ ref.refname = name;
+
+ /*
+ * Disable auto-compaction for all but the last runs. Like this
+ * we can ensure that we indeed honor this setting and have
+ * better control over when exactly auto compaction runs.
+ */
+ st->config.disable_auto_compact = i != n;
+
+ err = reftable_stack_new_addition(&add, st);
+ EXPECT_ERR(err);
+
+ err = reftable_addition_add(add, &write_test_ref, &ref);
+ EXPECT_ERR(err);
+
+ err = reftable_addition_commit(add);
+ EXPECT_ERR(err);
+
+ reftable_addition_destroy(add);
+
+ /*
+ * The stack length should grow continuously for all runs where
+ * auto compaction is disabled. When enabled, we should merge
+ * all tables in the stack.
+ */
+ if (i != n)
+ EXPECT(st->merged->stack_len == i + 1);
+ else
+ EXPECT(st->merged->stack_len == 1);
+ }
+
+ reftable_stack_destroy(st);
+ clear_dir(dir);
+}
+
+static void test_reftable_stack_auto_compaction_fails_gracefully(void)
+{
+ struct reftable_ref_record ref = {
+ .refname = "refs/heads/master",
+ .update_index = 1,
+ .value_type = REFTABLE_REF_VAL1,
+ .value.val1 = {0x01},
+ };
+ struct reftable_write_options cfg = {0};
+ struct reftable_stack *st;
+ struct strbuf table_path = STRBUF_INIT;
+ char *dir = get_tmp_dir(__LINE__);
+ int err;
+
+ err = reftable_new_stack(&st, dir, cfg);
+ EXPECT_ERR(err);
+
+ err = reftable_stack_add(st, write_test_ref, &ref);
+ EXPECT_ERR(err);
+ EXPECT(st->merged->stack_len == 1);
+ EXPECT(st->stats.attempts == 0);
+ EXPECT(st->stats.failures == 0);
+
+ /*
+ * Lock the newly written table such that it cannot be compacted.
+ * Adding a new table to the stack should not be impacted by this, even
+ * though auto-compaction will now fail.
+ */
+ strbuf_addf(&table_path, "%s/%s.lock", dir, st->readers[0]->name);
+ write_file_buf(table_path.buf, "", 0);
+
+ ref.update_index = 2;
+ err = reftable_stack_add(st, write_test_ref, &ref);
+ EXPECT_ERR(err);
+ EXPECT(st->merged->stack_len == 2);
+ EXPECT(st->stats.attempts == 1);
+ EXPECT(st->stats.failures == 1);
+
+ reftable_stack_destroy(st);
+ strbuf_release(&table_path);
+ clear_dir(dir);
+}
+
static void test_reftable_stack_validate_refname(void)
{
struct reftable_write_options cfg = { 0 };
@@ -389,18 +496,19 @@ static void test_reftable_stack_add(void)
int err = 0;
struct reftable_write_options cfg = {
.exact_log_message = 1,
+ .default_permissions = 0660,
+ .disable_auto_compact = 1,
};
struct reftable_stack *st = NULL;
char *dir = get_tmp_dir(__LINE__);
-
struct reftable_ref_record refs[2] = { { NULL } };
struct reftable_log_record logs[2] = { { NULL } };
+ struct strbuf path = STRBUF_INIT;
+ struct stat stat_result;
int N = ARRAY_SIZE(refs);
-
err = reftable_new_stack(&st, dir, cfg);
EXPECT_ERR(err);
- st->disable_auto_compact = 1;
for (i = 0; i < N; i++) {
char buf[256];
@@ -408,14 +516,11 @@ static void test_reftable_stack_add(void)
refs[i].refname = xstrdup(buf);
refs[i].update_index = i + 1;
refs[i].value_type = REFTABLE_REF_VAL1;
- refs[i].value.val1 = reftable_malloc(GIT_SHA1_RAWSZ);
set_test_hash(refs[i].value.val1, i);
logs[i].refname = xstrdup(buf);
logs[i].update_index = N + i + 1;
logs[i].value_type = REFTABLE_LOG_UPDATE;
-
- logs[i].value.update.new_hash = reftable_malloc(GIT_SHA1_RAWSZ);
logs[i].value.update.email = xstrdup("identity@invalid");
set_test_hash(logs[i].value.update.new_hash, i);
}
@@ -456,12 +561,32 @@ static void test_reftable_stack_add(void)
reftable_log_record_release(&dest);
}
+#ifndef GIT_WINDOWS_NATIVE
+ strbuf_addstr(&path, dir);
+ strbuf_addstr(&path, "/tables.list");
+ err = stat(path.buf, &stat_result);
+ EXPECT(!err);
+ EXPECT((stat_result.st_mode & 0777) == cfg.default_permissions);
+
+ strbuf_reset(&path);
+ strbuf_addstr(&path, dir);
+ strbuf_addstr(&path, "/");
+ /* do not try at home; not an external API for reftable. */
+ strbuf_addstr(&path, st->readers[0]->name);
+ err = stat(path.buf, &stat_result);
+ EXPECT(!err);
+ EXPECT((stat_result.st_mode & 0777) == cfg.default_permissions);
+#else
+ (void) stat_result;
+#endif
+
/* cleanup */
reftable_stack_destroy(st);
for (i = 0; i < N; i++) {
reftable_ref_record_release(&refs[i]);
reftable_log_record_release(&logs[i]);
}
+ strbuf_release(&path);
clear_dir(dir);
}
@@ -473,16 +598,17 @@ static void test_reftable_stack_log_normalize(void)
};
struct reftable_stack *st = NULL;
char *dir = get_tmp_dir(__LINE__);
-
- uint8_t h1[GIT_SHA1_RAWSZ] = { 0x01 }, h2[GIT_SHA1_RAWSZ] = { 0x02 };
-
- struct reftable_log_record input = { .refname = "branch",
- .update_index = 1,
- .value_type = REFTABLE_LOG_UPDATE,
- .value = { .update = {
- .new_hash = h1,
- .old_hash = h2,
- } } };
+ struct reftable_log_record input = {
+ .refname = "branch",
+ .update_index = 1,
+ .value_type = REFTABLE_LOG_UPDATE,
+ .value = {
+ .update = {
+ .new_hash = { 1 },
+ .old_hash = { 2 },
+ },
+ },
+ };
struct reftable_log_record dest = {
.update_index = 0,
};
@@ -545,7 +671,6 @@ static void test_reftable_stack_tombstone(void)
refs[i].update_index = i + 1;
if (i % 2 == 0) {
refs[i].value_type = REFTABLE_REF_VAL1;
- refs[i].value.val1 = reftable_malloc(GIT_SHA1_RAWSZ);
set_test_hash(refs[i].value.val1, i);
}
@@ -554,8 +679,6 @@ static void test_reftable_stack_tombstone(void)
logs[i].update_index = 42;
if (i % 2 == 0) {
logs[i].value_type = REFTABLE_LOG_UPDATE;
- logs[i].value.update.new_hash =
- reftable_malloc(GIT_SHA1_RAWSZ);
set_test_hash(logs[i].value.update.new_hash, i);
logs[i].value.update.email =
xstrdup("identity@invalid");
@@ -647,60 +770,13 @@ static void test_reftable_stack_hash_id(void)
clear_dir(dir);
}
-static void test_log2(void)
-{
- EXPECT(1 == fastlog2(3));
- EXPECT(2 == fastlog2(4));
- EXPECT(2 == fastlog2(5));
-}
-
-static void test_sizes_to_segments(void)
-{
- uint64_t sizes[] = { 2, 3, 4, 5, 7, 9 };
- /* .................0 1 2 3 4 5 */
-
- int seglen = 0;
- struct segment *segs =
- sizes_to_segments(&seglen, sizes, ARRAY_SIZE(sizes));
- EXPECT(segs[2].log == 3);
- EXPECT(segs[2].start == 5);
- EXPECT(segs[2].end == 6);
-
- EXPECT(segs[1].log == 2);
- EXPECT(segs[1].start == 2);
- EXPECT(segs[1].end == 5);
- reftable_free(segs);
-}
-
-static void test_sizes_to_segments_empty(void)
-{
- int seglen = 0;
- struct segment *segs = sizes_to_segments(&seglen, NULL, 0);
- EXPECT(seglen == 0);
- reftable_free(segs);
-}
-
-static void test_sizes_to_segments_all_equal(void)
-{
- uint64_t sizes[] = { 5, 5 };
-
- int seglen = 0;
- struct segment *segs =
- sizes_to_segments(&seglen, sizes, ARRAY_SIZE(sizes));
- EXPECT(seglen == 1);
- EXPECT(segs[0].start == 0);
- EXPECT(segs[0].end == 2);
- reftable_free(segs);
-}
-
static void test_suggest_compaction_segment(void)
{
- uint64_t sizes[] = { 128, 64, 17, 16, 9, 9, 9, 16, 16 };
- /* .................0 1 2 3 4 5 6 */
+ uint64_t sizes[] = { 512, 64, 17, 16, 9, 9, 9, 16, 2, 16 };
struct segment min =
suggest_compaction_segment(sizes, ARRAY_SIZE(sizes));
- EXPECT(min.start == 2);
- EXPECT(min.end == 7);
+ EXPECT(min.start == 1);
+ EXPECT(min.end == 10);
}
static void test_suggest_compaction_segment_nothing(void)
@@ -738,7 +814,6 @@ static void test_reflog_expire(void)
logs[i].update_index = i;
logs[i].value_type = REFTABLE_LOG_UPDATE;
logs[i].value.update.time = i;
- logs[i].value.update.new_hash = reftable_malloc(GIT_SHA1_RAWSZ);
logs[i].value.update.email = xstrdup("identity@invalid");
set_test_hash(logs[i].value.update.new_hash, i);
}
@@ -812,9 +887,21 @@ static void test_empty_add(void)
reftable_stack_destroy(st2);
}
+static int fastlog2(uint64_t sz)
+{
+ int l = 0;
+ if (sz == 0)
+ return 0;
+ for (; sz; sz /= 2)
+ l++;
+ return l - 1;
+}
+
static void test_reftable_stack_auto_compaction(void)
{
- struct reftable_write_options cfg = { 0 };
+ struct reftable_write_options cfg = {
+ .disable_auto_compact = 1,
+ };
struct reftable_stack *st = NULL;
char *dir = get_tmp_dir(__LINE__);
@@ -824,7 +911,6 @@ static void test_reftable_stack_auto_compaction(void)
err = reftable_new_stack(&st, dir, cfg);
EXPECT_ERR(err);
- st->disable_auto_compact = 1; /* call manually below for coverage. */
for (i = 0; i < N; i++) {
char name[100];
struct reftable_ref_record ref = {
@@ -850,6 +936,54 @@ static void test_reftable_stack_auto_compaction(void)
clear_dir(dir);
}
+static void test_reftable_stack_add_performs_auto_compaction(void)
+{
+ struct reftable_write_options cfg = { 0 };
+ struct reftable_stack *st = NULL;
+ struct strbuf refname = STRBUF_INIT;
+ char *dir = get_tmp_dir(__LINE__);
+ int err, i, n = 20;
+
+ err = reftable_new_stack(&st, dir, cfg);
+ EXPECT_ERR(err);
+
+ for (i = 0; i <= n; i++) {
+ struct reftable_ref_record ref = {
+ .update_index = reftable_stack_next_update_index(st),
+ .value_type = REFTABLE_REF_SYMREF,
+ .value.symref = "master",
+ };
+
+ /*
+ * Disable auto-compaction for all but the last runs. Like this
+ * we can ensure that we indeed honor this setting and have
+ * better control over when exactly auto compaction runs.
+ */
+ st->config.disable_auto_compact = i != n;
+
+ strbuf_reset(&refname);
+ strbuf_addf(&refname, "branch-%04d", i);
+ ref.refname = refname.buf;
+
+ err = reftable_stack_add(st, &write_test_ref, &ref);
+ EXPECT_ERR(err);
+
+ /*
+ * The stack length should grow continuously for all runs where
+ * auto compaction is disabled. When enabled, we should merge
+ * all tables in the stack.
+ */
+ if (i != n)
+ EXPECT(st->merged->stack_len == i + 1);
+ else
+ EXPECT(st->merged->stack_len == 1);
+ }
+
+ reftable_stack_destroy(st);
+ strbuf_release(&refname);
+ clear_dir(dir);
+}
+
static void test_reftable_stack_compaction_concurrent(void)
{
struct reftable_write_options cfg = { 0 };
@@ -952,7 +1086,6 @@ static void test_reftable_stack_compaction_concurrent_clean(void)
int stack_test_main(int argc, const char *argv[])
{
RUN_TEST(test_empty_add);
- RUN_TEST(test_log2);
RUN_TEST(test_names_equal);
RUN_TEST(test_parse_names);
RUN_TEST(test_read_file);
@@ -960,6 +1093,7 @@ int stack_test_main(int argc, const char *argv[])
RUN_TEST(test_reftable_stack_add);
RUN_TEST(test_reftable_stack_add_one);
RUN_TEST(test_reftable_stack_auto_compaction);
+ RUN_TEST(test_reftable_stack_add_performs_auto_compaction);
RUN_TEST(test_reftable_stack_compaction_concurrent);
RUN_TEST(test_reftable_stack_compaction_concurrent_clean);
RUN_TEST(test_reftable_stack_hash_id);
@@ -967,12 +1101,11 @@ int stack_test_main(int argc, const char *argv[])
RUN_TEST(test_reftable_stack_log_normalize);
RUN_TEST(test_reftable_stack_tombstone);
RUN_TEST(test_reftable_stack_transaction_api);
+ RUN_TEST(test_reftable_stack_transaction_api_performs_auto_compaction);
+ RUN_TEST(test_reftable_stack_auto_compaction_fails_gracefully);
RUN_TEST(test_reftable_stack_update_index_check);
RUN_TEST(test_reftable_stack_uptodate);
RUN_TEST(test_reftable_stack_validate_refname);
- RUN_TEST(test_sizes_to_segments);
- RUN_TEST(test_sizes_to_segments_all_equal);
- RUN_TEST(test_sizes_to_segments_empty);
RUN_TEST(test_suggest_compaction_segment);
RUN_TEST(test_suggest_compaction_segment_nothing);
return 0;