summaryrefslogtreecommitdiffstats
path: root/src/libsystemd/sd-journal
diff options
context:
space:
mode:
Diffstat (limited to 'src/libsystemd/sd-journal')
-rw-r--r--src/libsystemd/sd-journal/audit-type.c6
-rw-r--r--src/libsystemd/sd-journal/audit-type.h22
-rw-r--r--src/libsystemd/sd-journal/audit_type-to-name.awk14
-rw-r--r--src/libsystemd/sd-journal/catalog.c743
-rw-r--r--src/libsystemd/sd-journal/catalog.h19
-rw-r--r--src/libsystemd/sd-journal/fsprg.c381
-rw-r--r--src/libsystemd/sd-journal/fsprg.h61
-rwxr-xr-xsrc/libsystemd/sd-journal/generate-audit_type-list.sh17
-rw-r--r--src/libsystemd/sd-journal/journal-authenticate.c525
-rw-r--r--src/libsystemd/sd-journal/journal-authenticate.h23
-rw-r--r--src/libsystemd/sd-journal/journal-def.h269
-rw-r--r--src/libsystemd/sd-journal/journal-file.c4696
-rw-r--r--src/libsystemd/sd-journal/journal-file.h393
-rw-r--r--src/libsystemd/sd-journal/journal-internal.h142
-rw-r--r--src/libsystemd/sd-journal/journal-send.c576
-rw-r--r--src/libsystemd/sd-journal/journal-send.h7
-rw-r--r--src/libsystemd/sd-journal/journal-vacuum.c330
-rw-r--r--src/libsystemd/sd-journal/journal-vacuum.h9
-rw-r--r--src/libsystemd/sd-journal/journal-verify.c1436
-rw-r--r--src/libsystemd/sd-journal/journal-verify.h6
-rw-r--r--src/libsystemd/sd-journal/lookup3.c1002
-rw-r--r--src/libsystemd/sd-journal/lookup3.h23
-rw-r--r--src/libsystemd/sd-journal/mmap-cache.c562
-rw-r--r--src/libsystemd/sd-journal/mmap-cache.h60
-rw-r--r--src/libsystemd/sd-journal/sd-journal.c3528
-rw-r--r--src/libsystemd/sd-journal/test-audit-type.c24
-rw-r--r--src/libsystemd/sd-journal/test-catalog.c235
-rw-r--r--src/libsystemd/sd-journal/test-journal-append.c269
-rw-r--r--src/libsystemd/sd-journal/test-journal-enum.c37
-rw-r--r--src/libsystemd/sd-journal/test-journal-file.c45
-rw-r--r--src/libsystemd/sd-journal/test-journal-flush.c118
-rw-r--r--src/libsystemd/sd-journal/test-journal-init.c68
-rw-r--r--src/libsystemd/sd-journal/test-journal-interleaving.c737
-rw-r--r--src/libsystemd/sd-journal/test-journal-match.c61
-rw-r--r--src/libsystemd/sd-journal/test-journal-send.c111
-rw-r--r--src/libsystemd/sd-journal/test-journal-stream.c201
-rw-r--r--src/libsystemd/sd-journal/test-journal-verify.c210
-rw-r--r--src/libsystemd/sd-journal/test-journal.c280
-rw-r--r--src/libsystemd/sd-journal/test-mmap-cache.c68
39 files changed, 17314 insertions, 0 deletions
diff --git a/src/libsystemd/sd-journal/audit-type.c b/src/libsystemd/sd-journal/audit-type.c
new file mode 100644
index 0000000..122cdf5
--- /dev/null
+++ b/src/libsystemd/sd-journal/audit-type.c
@@ -0,0 +1,6 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+
+#include "audit-type.h"
+#include "missing_audit.h"
+
+#include "audit_type-to-name.h"
diff --git a/src/libsystemd/sd-journal/audit-type.h b/src/libsystemd/sd-journal/audit-type.h
new file mode 100644
index 0000000..f2c4898
--- /dev/null
+++ b/src/libsystemd/sd-journal/audit-type.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+#pragma once
+
+#include <stdio.h>
+
+#include "alloc-util.h"
+#include "macro.h"
+
+const char *audit_type_to_string(int type);
+int audit_type_from_string(const char *s);
+
+/* This is inspired by DNS TYPEnnn formatting */
+#define audit_type_name_alloca(type) \
+ ({ \
+ const char *_s_; \
+ _s_ = audit_type_to_string(type); \
+ if (!_s_) { \
+ _s_ = newa(char, STRLEN("AUDIT") + DECIMAL_STR_MAX(int)); \
+ sprintf((char*) _s_, "AUDIT%04i", type); \
+ } \
+ _s_; \
+ })
diff --git a/src/libsystemd/sd-journal/audit_type-to-name.awk b/src/libsystemd/sd-journal/audit_type-to-name.awk
new file mode 100644
index 0000000..a859c44
--- /dev/null
+++ b/src/libsystemd/sd-journal/audit_type-to-name.awk
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: LGPL-2.1-or-later
+
+BEGIN{
+ print "const char *audit_type_to_string(int type) {"
+ print " switch (type) {"
+}
+{
+ printf " case AUDIT_%s: return \"%s\";\n", $1, $1
+}
+END{
+ print " default: return NULL;"
+ print " }"
+ print "}"
+}
diff --git a/src/libsystemd/sd-journal/catalog.c b/src/libsystemd/sd-journal/catalog.c
new file mode 100644
index 0000000..ae91534
--- /dev/null
+++ b/src/libsystemd/sd-journal/catalog.c
@@ -0,0 +1,743 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+
+#include <errno.h>
+#include <fcntl.h>
+#include <locale.h>
+#include <stdio.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#include "sd-id128.h"
+
+#include "alloc-util.h"
+#include "catalog.h"
+#include "conf-files.h"
+#include "fd-util.h"
+#include "fileio.h"
+#include "hashmap.h"
+#include "log.h"
+#include "memory-util.h"
+#include "mkdir.h"
+#include "path-util.h"
+#include "siphash24.h"
+#include "sort-util.h"
+#include "sparse-endian.h"
+#include "strbuf.h"
+#include "string-util.h"
+#include "strv.h"
+#include "tmpfile-util.h"
+
+const char * const catalog_file_dirs[] = {
+ "/usr/local/lib/systemd/catalog/",
+ "/usr/lib/systemd/catalog/",
+ NULL
+};
+
+#define CATALOG_SIGNATURE { 'R', 'H', 'H', 'H', 'K', 'S', 'L', 'P' }
+
+typedef struct CatalogHeader {
+ uint8_t signature[8]; /* "RHHHKSLP" */
+ le32_t compatible_flags;
+ le32_t incompatible_flags;
+ le64_t header_size;
+ le64_t n_items;
+ le64_t catalog_item_size;
+} CatalogHeader;
+
+typedef struct CatalogItem {
+ sd_id128_t id;
+ char language[32]; /* One byte is used for termination, so the maximum allowed
+ * length of the string is actually 31 bytes. */
+ le64_t offset;
+} CatalogItem;
+
+static void catalog_hash_func(const CatalogItem *i, struct siphash *state) {
+ siphash24_compress(&i->id, sizeof(i->id), state);
+ siphash24_compress_string(i->language, state);
+}
+
+static int catalog_compare_func(const CatalogItem *a, const CatalogItem *b) {
+ unsigned k;
+ int r;
+
+ for (k = 0; k < ELEMENTSOF(b->id.bytes); k++) {
+ r = CMP(a->id.bytes[k], b->id.bytes[k]);
+ if (r != 0)
+ return r;
+ }
+
+ return strcmp(a->language, b->language);
+}
+
+DEFINE_HASH_OPS(catalog_hash_ops, CatalogItem, catalog_hash_func, catalog_compare_func);
+
+static bool next_header(const char **s) {
+ const char *e;
+
+ e = strchr(*s, '\n');
+
+ /* Unexpected end */
+ if (!e)
+ return false;
+
+ /* End of headers */
+ if (e == *s)
+ return false;
+
+ *s = e + 1;
+ return true;
+}
+
+static const char *skip_header(const char *s) {
+ while (next_header(&s))
+ ;
+ return s;
+}
+
+static char *combine_entries(const char *one, const char *two) {
+ const char *b1, *b2;
+ size_t l1, l2, n;
+ char *dest, *p;
+
+ /* Find split point of headers to body */
+ b1 = skip_header(one);
+ b2 = skip_header(two);
+
+ l1 = strlen(one);
+ l2 = strlen(two);
+ dest = new(char, l1 + l2 + 1);
+ if (!dest) {
+ log_oom();
+ return NULL;
+ }
+
+ p = dest;
+
+ /* Headers from @one */
+ n = b1 - one;
+ p = mempcpy(p, one, n);
+
+ /* Headers from @two, these will only be found if not present above */
+ n = b2 - two;
+ p = mempcpy(p, two, n);
+
+ /* Body from @one */
+ n = l1 - (b1 - one);
+ if (n > 0)
+ p = mempcpy(p, b1, n);
+ /* Body from @two */
+ else {
+ n = l2 - (b2 - two);
+ p = mempcpy(p, b2, n);
+ }
+
+ assert(p - dest <= (ptrdiff_t)(l1 + l2));
+ p[0] = '\0';
+ return dest;
+}
+
+static int finish_item(
+ OrderedHashmap *h,
+ sd_id128_t id,
+ const char *language,
+ char *payload, size_t payload_size) {
+
+ _cleanup_free_ CatalogItem *i = NULL;
+ _cleanup_free_ char *combined = NULL;
+ char *prev;
+ int r;
+
+ assert(h);
+ assert(payload);
+ assert(payload_size > 0);
+
+ i = new0(CatalogItem, 1);
+ if (!i)
+ return log_oom();
+
+ i->id = id;
+ if (language) {
+ assert(strlen(language) > 1 && strlen(language) < 32);
+ strcpy(i->language, language);
+ }
+
+ prev = ordered_hashmap_get(h, i);
+ if (prev) {
+ /* Already have such an item, combine them */
+ combined = combine_entries(payload, prev);
+ if (!combined)
+ return log_oom();
+
+ r = ordered_hashmap_update(h, i, combined);
+ if (r < 0)
+ return log_error_errno(r, "Failed to update catalog item: %m");
+
+ TAKE_PTR(combined);
+ free(prev);
+ } else {
+ /* A new item */
+ combined = memdup(payload, payload_size + 1);
+ if (!combined)
+ return log_oom();
+
+ r = ordered_hashmap_put(h, i, combined);
+ if (r < 0)
+ return log_error_errno(r, "Failed to insert catalog item: %m");
+
+ TAKE_PTR(i);
+ TAKE_PTR(combined);
+ }
+
+ return 0;
+}
+
+int catalog_file_lang(const char* filename, char **lang) {
+ char *beg, *end, *_lang;
+
+ end = endswith(filename, ".catalog");
+ if (!end)
+ return 0;
+
+ beg = end - 1;
+ while (beg > filename && !IN_SET(*beg, '.', '/') && end - beg < 32)
+ beg--;
+
+ if (*beg != '.' || end <= beg + 1)
+ return 0;
+
+ _lang = strndup(beg + 1, end - beg - 1);
+ if (!_lang)
+ return -ENOMEM;
+
+ *lang = _lang;
+ return 1;
+}
+
+static int catalog_entry_lang(
+ const char* filename,
+ unsigned line,
+ const char* t,
+ const char* deflang,
+ char **ret) {
+
+ size_t c;
+ char *z;
+
+ c = strlen(t);
+ if (c < 2)
+ return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
+ "[%s:%u] Language too short.", filename, line);
+ if (c > 31)
+ return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
+ "[%s:%u] language too long.", filename, line);
+
+ if (deflang) {
+ if (streq(t, deflang)) {
+ log_warning("[%s:%u] language specified unnecessarily", filename, line);
+ return 0;
+ }
+
+ log_warning("[%s:%u] language differs from default for file", filename, line);
+ }
+
+ z = strdup(t);
+ if (!z)
+ return -ENOMEM;
+
+ *ret = z;
+ return 0;
+}
+
+int catalog_import_file(OrderedHashmap *h, const char *path) {
+ _cleanup_fclose_ FILE *f = NULL;
+ _cleanup_free_ char *payload = NULL;
+ size_t payload_size = 0;
+ unsigned n = 0;
+ sd_id128_t id;
+ _cleanup_free_ char *deflang = NULL, *lang = NULL;
+ bool got_id = false, empty_line = true;
+ int r;
+
+ assert(h);
+ assert(path);
+
+ f = fopen(path, "re");
+ if (!f)
+ return log_error_errno(errno, "Failed to open file %s: %m", path);
+
+ r = catalog_file_lang(path, &deflang);
+ if (r < 0)
+ log_error_errno(r, "Failed to determine language for file %s: %m", path);
+ if (r == 1)
+ log_debug("File %s has language %s.", path, deflang);
+
+ for (;;) {
+ _cleanup_free_ char *line = NULL;
+ size_t line_len;
+
+ r = read_line(f, LONG_LINE_MAX, &line);
+ if (r < 0)
+ return log_error_errno(r, "Failed to read file %s: %m", path);
+ if (r == 0)
+ break;
+
+ n++;
+
+ if (isempty(line)) {
+ empty_line = true;
+ continue;
+ }
+
+ if (strchr(COMMENTS, line[0]))
+ continue;
+
+ if (empty_line &&
+ strlen(line) >= 2+1+32 &&
+ line[0] == '-' &&
+ line[1] == '-' &&
+ line[2] == ' ' &&
+ IN_SET(line[2+1+32], ' ', '\0')) {
+
+ bool with_language;
+ sd_id128_t jd;
+
+ /* New entry */
+
+ with_language = line[2+1+32] != '\0';
+ line[2+1+32] = '\0';
+
+ if (sd_id128_from_string(line + 2 + 1, &jd) >= 0) {
+
+ if (got_id) {
+ if (payload_size == 0)
+ return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
+ "[%s:%u] No payload text.",
+ path,
+ n);
+
+ r = finish_item(h, id, lang ?: deflang, payload, payload_size);
+ if (r < 0)
+ return r;
+
+ lang = mfree(lang);
+ payload_size = 0;
+ }
+
+ if (with_language) {
+ char *t;
+
+ t = strstrip(line + 2 + 1 + 32 + 1);
+ r = catalog_entry_lang(path, n, t, deflang, &lang);
+ if (r < 0)
+ return r;
+ }
+
+ got_id = true;
+ empty_line = false;
+ id = jd;
+
+ continue;
+ }
+ }
+
+ /* Payload */
+ if (!got_id)
+ return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
+ "[%s:%u] Got payload before ID.",
+ path, n);
+
+ line_len = strlen(line);
+ if (!GREEDY_REALLOC(payload, payload_size + (empty_line ? 1 : 0) + line_len + 1 + 1))
+ return log_oom();
+
+ if (empty_line)
+ payload[payload_size++] = '\n';
+ memcpy(payload + payload_size, line, line_len);
+ payload_size += line_len;
+ payload[payload_size++] = '\n';
+ payload[payload_size] = '\0';
+
+ empty_line = false;
+ }
+
+ if (got_id) {
+ if (payload_size == 0)
+ return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
+ "[%s:%u] No payload text.",
+ path, n);
+
+ r = finish_item(h, id, lang ?: deflang, payload, payload_size);
+ if (r < 0)
+ return r;
+ }
+
+ return 0;
+}
+
+static int64_t write_catalog(
+ const char *database,
+ struct strbuf *sb,
+ CatalogItem *items,
+ size_t n) {
+
+ _cleanup_fclose_ FILE *w = NULL;
+ _cleanup_free_ char *p = NULL;
+ CatalogHeader header;
+ size_t k;
+ int r;
+
+ r = mkdir_parents(database, 0755);
+ if (r < 0)
+ return log_error_errno(r, "Failed to create parent directories of %s: %m", database);
+
+ r = fopen_temporary(database, &w, &p);
+ if (r < 0)
+ return log_error_errno(r, "Failed to open database for writing: %s: %m",
+ database);
+
+ header = (CatalogHeader) {
+ .signature = CATALOG_SIGNATURE,
+ .header_size = htole64(CONST_ALIGN_TO(sizeof(CatalogHeader), 8)),
+ .catalog_item_size = htole64(sizeof(CatalogItem)),
+ .n_items = htole64(n),
+ };
+
+ r = -EIO;
+
+ k = fwrite(&header, 1, sizeof(header), w);
+ if (k != sizeof(header)) {
+ log_error("%s: failed to write header.", p);
+ goto error;
+ }
+
+ k = fwrite(items, 1, n * sizeof(CatalogItem), w);
+ if (k != n * sizeof(CatalogItem)) {
+ log_error("%s: failed to write database.", p);
+ goto error;
+ }
+
+ k = fwrite(sb->buf, 1, sb->len, w);
+ if (k != sb->len) {
+ log_error("%s: failed to write strings.", p);
+ goto error;
+ }
+
+ r = fflush_and_check(w);
+ if (r < 0) {
+ log_error_errno(r, "%s: failed to write database: %m", p);
+ goto error;
+ }
+
+ (void) fchmod(fileno(w), 0644);
+
+ if (rename(p, database) < 0) {
+ r = log_error_errno(errno, "rename (%s -> %s) failed: %m", p, database);
+ goto error;
+ }
+
+ return ftello(w);
+
+error:
+ (void) unlink(p);
+ return r;
+}
+
+int catalog_update(const char* database, const char* root, const char* const* dirs) {
+ _cleanup_strv_free_ char **files = NULL;
+ _cleanup_(strbuf_freep) struct strbuf *sb = NULL;
+ _cleanup_ordered_hashmap_free_free_free_ OrderedHashmap *h = NULL;
+ _cleanup_free_ CatalogItem *items = NULL;
+ ssize_t offset;
+ char *payload;
+ CatalogItem *i;
+ unsigned n;
+ int r;
+ int64_t sz;
+
+ h = ordered_hashmap_new(&catalog_hash_ops);
+ sb = strbuf_new();
+ if (!h || !sb)
+ return log_oom();
+
+ r = conf_files_list_strv(&files, ".catalog", root, 0, dirs);
+ if (r < 0)
+ return log_error_errno(r, "Failed to get catalog files: %m");
+
+ STRV_FOREACH(f, files) {
+ log_debug("Reading file '%s'", *f);
+ r = catalog_import_file(h, *f);
+ if (r < 0)
+ return log_error_errno(r, "Failed to import file '%s': %m", *f);
+ }
+
+ if (ordered_hashmap_size(h) <= 0) {
+ log_info("No items in catalog.");
+ return 0;
+ } else
+ log_debug("Found %u items in catalog.", ordered_hashmap_size(h));
+
+ items = new(CatalogItem, ordered_hashmap_size(h));
+ if (!items)
+ return log_oom();
+
+ n = 0;
+ ORDERED_HASHMAP_FOREACH_KEY(payload, i, h) {
+ log_trace("Found " SD_ID128_FORMAT_STR ", language %s",
+ SD_ID128_FORMAT_VAL(i->id),
+ isempty(i->language) ? "C" : i->language);
+
+ offset = strbuf_add_string(sb, payload, strlen(payload));
+ if (offset < 0)
+ return log_oom();
+
+ i->offset = htole64((uint64_t) offset);
+ items[n++] = *i;
+ }
+
+ assert(n == ordered_hashmap_size(h));
+ typesafe_qsort(items, n, catalog_compare_func);
+
+ strbuf_complete(sb);
+
+ sz = write_catalog(database, sb, items, n);
+ if (sz < 0)
+ return log_error_errno(sz, "Failed to write %s: %m", database);
+
+ log_debug("%s: wrote %u items, with %zu bytes of strings, %"PRIi64" total size.",
+ database, n, sb->len, sz);
+ return 0;
+}
+
+static int open_mmap(const char *database, int *_fd, struct stat *_st, void **_p) {
+ _cleanup_close_ int fd = -EBADF;
+ const CatalogHeader *h;
+ struct stat st;
+ void *p;
+
+ assert(_fd);
+ assert(_st);
+ assert(_p);
+
+ fd = open(database, O_RDONLY|O_CLOEXEC);
+ if (fd < 0)
+ return -errno;
+
+ if (fstat(fd, &st) < 0)
+ return -errno;
+
+ if (st.st_size < (off_t) sizeof(CatalogHeader) || file_offset_beyond_memory_size(st.st_size))
+ return -EINVAL;
+
+ p = mmap(NULL, st.st_size, PROT_READ, MAP_SHARED, fd, 0);
+ if (p == MAP_FAILED)
+ return -errno;
+
+ h = p;
+ if (memcmp(h->signature, (const uint8_t[]) CATALOG_SIGNATURE, sizeof(h->signature)) != 0 ||
+ le64toh(h->header_size) < sizeof(CatalogHeader) ||
+ le64toh(h->catalog_item_size) < sizeof(CatalogItem) ||
+ h->incompatible_flags != 0 ||
+ le64toh(h->n_items) <= 0 ||
+ st.st_size < (off_t) (le64toh(h->header_size) + le64toh(h->catalog_item_size) * le64toh(h->n_items))) {
+ munmap(p, st.st_size);
+ return -EBADMSG;
+ }
+
+ *_fd = TAKE_FD(fd);
+ *_st = st;
+ *_p = p;
+
+ return 0;
+}
+
+static const char *find_id(void *p, sd_id128_t id) {
+ CatalogItem *f = NULL, key = { .id = id };
+ const CatalogHeader *h = p;
+ const char *loc;
+
+ loc = setlocale(LC_MESSAGES, NULL);
+ if (!isempty(loc) && !STR_IN_SET(loc, "C", "POSIX")) {
+ size_t len;
+
+ len = strcspn(loc, ".@");
+ if (len > sizeof(key.language) - 1)
+ log_debug("LC_MESSAGES value too long, ignoring: \"%.*s\"", (int) len, loc);
+ else {
+ strncpy(key.language, loc, len);
+ key.language[len] = '\0';
+
+ f = bsearch(&key,
+ (const uint8_t*) p + le64toh(h->header_size),
+ le64toh(h->n_items),
+ le64toh(h->catalog_item_size),
+ (comparison_fn_t) catalog_compare_func);
+ if (!f) {
+ char *e;
+
+ e = strchr(key.language, '_');
+ if (e) {
+ *e = 0;
+ f = bsearch(&key,
+ (const uint8_t*) p + le64toh(h->header_size),
+ le64toh(h->n_items),
+ le64toh(h->catalog_item_size),
+ (comparison_fn_t) catalog_compare_func);
+ }
+ }
+ }
+ }
+
+ if (!f) {
+ zero(key.language);
+ f = bsearch(&key,
+ (const uint8_t*) p + le64toh(h->header_size),
+ le64toh(h->n_items),
+ le64toh(h->catalog_item_size),
+ (comparison_fn_t) catalog_compare_func);
+ }
+
+ if (!f)
+ return NULL;
+
+ return (const char*) p +
+ le64toh(h->header_size) +
+ le64toh(h->n_items) * le64toh(h->catalog_item_size) +
+ le64toh(f->offset);
+}
+
+int catalog_get(const char* database, sd_id128_t id, char **_text) {
+ _cleanup_close_ int fd = -EBADF;
+ void *p = NULL;
+ struct stat st = {};
+ char *text = NULL;
+ int r;
+ const char *s;
+
+ assert(_text);
+
+ r = open_mmap(database, &fd, &st, &p);
+ if (r < 0)
+ return r;
+
+ s = find_id(p, id);
+ if (!s) {
+ r = -ENOENT;
+ goto finish;
+ }
+
+ text = strdup(s);
+ if (!text) {
+ r = -ENOMEM;
+ goto finish;
+ }
+
+ *_text = text;
+ r = 0;
+
+finish:
+ if (p)
+ munmap(p, st.st_size);
+
+ return r;
+}
+
+static char *find_header(const char *s, const char *header) {
+
+ for (;;) {
+ const char *v;
+
+ v = startswith(s, header);
+ if (v) {
+ v += strspn(v, WHITESPACE);
+ return strndup(v, strcspn(v, NEWLINE));
+ }
+
+ if (!next_header(&s))
+ return NULL;
+ }
+}
+
+static void dump_catalog_entry(FILE *f, sd_id128_t id, const char *s, bool oneline) {
+ if (oneline) {
+ _cleanup_free_ char *subject = NULL, *defined_by = NULL;
+
+ subject = find_header(s, "Subject:");
+ defined_by = find_header(s, "Defined-By:");
+
+ fprintf(f, SD_ID128_FORMAT_STR " %s: %s\n",
+ SD_ID128_FORMAT_VAL(id),
+ strna(defined_by), strna(subject));
+ } else
+ fprintf(f, "-- " SD_ID128_FORMAT_STR "\n%s\n",
+ SD_ID128_FORMAT_VAL(id), s);
+}
+
+int catalog_list(FILE *f, const char *database, bool oneline) {
+ _cleanup_close_ int fd = -EBADF;
+ void *p = NULL;
+ struct stat st;
+ const CatalogHeader *h;
+ const CatalogItem *items;
+ int r;
+ unsigned n;
+ sd_id128_t last_id;
+ bool last_id_set = false;
+
+ r = open_mmap(database, &fd, &st, &p);
+ if (r < 0)
+ return r;
+
+ h = p;
+ items = (const CatalogItem*) ((const uint8_t*) p + le64toh(h->header_size));
+
+ for (n = 0; n < le64toh(h->n_items); n++) {
+ const char *s;
+
+ if (last_id_set && sd_id128_equal(last_id, items[n].id))
+ continue;
+
+ assert_se(s = find_id(p, items[n].id));
+
+ dump_catalog_entry(f, items[n].id, s, oneline);
+
+ last_id_set = true;
+ last_id = items[n].id;
+ }
+
+ munmap(p, st.st_size);
+
+ return 0;
+}
+
+int catalog_list_items(FILE *f, const char *database, bool oneline, char **items) {
+ int r = 0;
+
+ STRV_FOREACH(item, items) {
+ sd_id128_t id;
+ int k;
+ _cleanup_free_ char *msg = NULL;
+
+ k = sd_id128_from_string(*item, &id);
+ if (k < 0) {
+ log_error_errno(k, "Failed to parse id128 '%s': %m", *item);
+ if (r == 0)
+ r = k;
+ continue;
+ }
+
+ k = catalog_get(database, id, &msg);
+ if (k < 0) {
+ log_full_errno(k == -ENOENT ? LOG_NOTICE : LOG_ERR, k,
+ "Failed to retrieve catalog entry for '%s': %m", *item);
+ if (r == 0)
+ r = k;
+ continue;
+ }
+
+ dump_catalog_entry(f, id, msg, oneline);
+ }
+
+ return r;
+}
diff --git a/src/libsystemd/sd-journal/catalog.h b/src/libsystemd/sd-journal/catalog.h
new file mode 100644
index 0000000..df27869
--- /dev/null
+++ b/src/libsystemd/sd-journal/catalog.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+#pragma once
+
+#include <stdbool.h>
+#include <stdio.h>
+
+#include "sd-id128.h"
+
+#include "hashmap.h"
+#include "strbuf.h"
+
+int catalog_import_file(OrderedHashmap *h, const char *path);
+int catalog_update(const char* database, const char* root, const char* const* dirs);
+int catalog_get(const char* database, sd_id128_t id, char **data);
+int catalog_list(FILE *f, const char* database, bool oneline);
+int catalog_list_items(FILE *f, const char* database, bool oneline, char **items);
+int catalog_file_lang(const char *filename, char **lang);
+extern const char * const catalog_file_dirs[];
+extern const struct hash_ops catalog_hash_ops;
diff --git a/src/libsystemd/sd-journal/fsprg.c b/src/libsystemd/sd-journal/fsprg.c
new file mode 100644
index 0000000..e86be6a
--- /dev/null
+++ b/src/libsystemd/sd-journal/fsprg.c
@@ -0,0 +1,381 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later
+ *
+ * fsprg v0.1 - (seekable) forward-secure pseudorandom generator
+ * Copyright © 2012 B. Poettering
+ * Contact: fsprg@point-at-infinity.org
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+/*
+ * See "Practical Secure Logging: Seekable Sequential Key Generators"
+ * by G. A. Marson, B. Poettering for details:
+ *
+ * http://eprint.iacr.org/2013/397
+ */
+
+#include <string.h>
+
+#include "fsprg.h"
+#include "gcrypt-util.h"
+#include "memory-util.h"
+
+#define ISVALID_SECPAR(secpar) (((secpar) % 16 == 0) && ((secpar) >= 16) && ((secpar) <= 16384))
+#define VALIDATE_SECPAR(secpar) assert(ISVALID_SECPAR(secpar));
+
+#define RND_HASH GCRY_MD_SHA256
+#define RND_GEN_P 0x01
+#define RND_GEN_Q 0x02
+#define RND_GEN_X 0x03
+
+#pragma GCC diagnostic ignored "-Wpointer-arith"
+/* TODO: remove void* arithmetic and this work-around */
+
+/******************************************************************************/
+
+static void mpi_export(void *buf, size_t buflen, const gcry_mpi_t x) {
+ unsigned len;
+ size_t nwritten;
+
+ assert(gcry_mpi_cmp_ui(x, 0) >= 0);
+ len = (gcry_mpi_get_nbits(x) + 7) / 8;
+ assert(len <= buflen);
+ memzero(buf, buflen);
+ gcry_mpi_print(GCRYMPI_FMT_USG, buf + (buflen - len), len, &nwritten, x);
+ assert(nwritten == len);
+}
+
+static gcry_mpi_t mpi_import(const void *buf, size_t buflen) {
+ gcry_mpi_t h;
+ _unused_ unsigned len;
+
+ assert_se(gcry_mpi_scan(&h, GCRYMPI_FMT_USG, buf, buflen, NULL) == 0);
+ len = (gcry_mpi_get_nbits(h) + 7) / 8;
+ assert(len <= buflen);
+ assert(gcry_mpi_cmp_ui(h, 0) >= 0);
+
+ return h;
+}
+
+static void uint64_export(void *buf, size_t buflen, uint64_t x) {
+ assert(buflen == 8);
+ ((uint8_t*) buf)[0] = (x >> 56) & 0xff;
+ ((uint8_t*) buf)[1] = (x >> 48) & 0xff;
+ ((uint8_t*) buf)[2] = (x >> 40) & 0xff;
+ ((uint8_t*) buf)[3] = (x >> 32) & 0xff;
+ ((uint8_t*) buf)[4] = (x >> 24) & 0xff;
+ ((uint8_t*) buf)[5] = (x >> 16) & 0xff;
+ ((uint8_t*) buf)[6] = (x >> 8) & 0xff;
+ ((uint8_t*) buf)[7] = (x >> 0) & 0xff;
+}
+
+static uint64_t uint64_import(const void *buf, size_t buflen) {
+ assert(buflen == 8);
+ return
+ (uint64_t)(((uint8_t*) buf)[0]) << 56 |
+ (uint64_t)(((uint8_t*) buf)[1]) << 48 |
+ (uint64_t)(((uint8_t*) buf)[2]) << 40 |
+ (uint64_t)(((uint8_t*) buf)[3]) << 32 |
+ (uint64_t)(((uint8_t*) buf)[4]) << 24 |
+ (uint64_t)(((uint8_t*) buf)[5]) << 16 |
+ (uint64_t)(((uint8_t*) buf)[6]) << 8 |
+ (uint64_t)(((uint8_t*) buf)[7]) << 0;
+}
+
+/* deterministically generate from seed/idx a string of buflen pseudorandom bytes */
+static void det_randomize(void *buf, size_t buflen, const void *seed, size_t seedlen, uint32_t idx) {
+ gcry_md_hd_t hd, hd2;
+ size_t olen, cpylen;
+ gcry_error_t err;
+ uint32_t ctr;
+
+ olen = gcry_md_get_algo_dlen(RND_HASH);
+ err = gcry_md_open(&hd, RND_HASH, 0);
+ assert_se(gcry_err_code(err) == GPG_ERR_NO_ERROR); /* This shouldn't happen */
+ gcry_md_write(hd, seed, seedlen);
+ gcry_md_putc(hd, (idx >> 24) & 0xff);
+ gcry_md_putc(hd, (idx >> 16) & 0xff);
+ gcry_md_putc(hd, (idx >> 8) & 0xff);
+ gcry_md_putc(hd, (idx >> 0) & 0xff);
+
+ for (ctr = 0; buflen; ctr++) {
+ err = gcry_md_copy(&hd2, hd);
+ assert_se(gcry_err_code(err) == GPG_ERR_NO_ERROR); /* This shouldn't happen */
+ gcry_md_putc(hd2, (ctr >> 24) & 0xff);
+ gcry_md_putc(hd2, (ctr >> 16) & 0xff);
+ gcry_md_putc(hd2, (ctr >> 8) & 0xff);
+ gcry_md_putc(hd2, (ctr >> 0) & 0xff);
+ gcry_md_final(hd2);
+ cpylen = (buflen < olen) ? buflen : olen;
+ memcpy(buf, gcry_md_read(hd2, RND_HASH), cpylen);
+ gcry_md_close(hd2);
+ buf += cpylen;
+ buflen -= cpylen;
+ }
+ gcry_md_close(hd);
+}
+
+/* deterministically generate from seed/idx a prime of length `bits' that is 3 (mod 4) */
+static gcry_mpi_t genprime3mod4(int bits, const void *seed, size_t seedlen, uint32_t idx) {
+ size_t buflen = bits / 8;
+ uint8_t buf[buflen];
+ gcry_mpi_t p;
+
+ assert(bits % 8 == 0);
+ assert(buflen > 0);
+
+ det_randomize(buf, buflen, seed, seedlen, idx);
+ buf[0] |= 0xc0; /* set upper two bits, so that n=pq has maximum size */
+ buf[buflen - 1] |= 0x03; /* set lower two bits, to have result 3 (mod 4) */
+
+ p = mpi_import(buf, buflen);
+ while (gcry_prime_check(p, 0))
+ gcry_mpi_add_ui(p, p, 4);
+
+ return p;
+}
+
+/* deterministically generate from seed/idx a quadratic residue (mod n) */
+static gcry_mpi_t gensquare(const gcry_mpi_t n, const void *seed, size_t seedlen, uint32_t idx, unsigned secpar) {
+ size_t buflen = secpar / 8;
+ uint8_t buf[buflen];
+ gcry_mpi_t x;
+
+ det_randomize(buf, buflen, seed, seedlen, idx);
+ buf[0] &= 0x7f; /* clear upper bit, so that we have x < n */
+ x = mpi_import(buf, buflen);
+ assert(gcry_mpi_cmp(x, n) < 0);
+ gcry_mpi_mulm(x, x, x, n);
+ return x;
+}
+
+/* compute 2^m (mod phi(p)), for a prime p */
+static gcry_mpi_t twopowmodphi(uint64_t m, const gcry_mpi_t p) {
+ gcry_mpi_t phi, r;
+ int n;
+
+ phi = gcry_mpi_new(0);
+ gcry_mpi_sub_ui(phi, p, 1);
+
+ /* count number of used bits in m */
+ for (n = 0; (1ULL << n) <= m; n++)
+ ;
+
+ r = gcry_mpi_new(0);
+ gcry_mpi_set_ui(r, 1);
+ while (n) { /* square and multiply algorithm for fast exponentiation */
+ n--;
+ gcry_mpi_mulm(r, r, r, phi);
+ if (m & ((uint64_t)1 << n)) {
+ gcry_mpi_add(r, r, r);
+ if (gcry_mpi_cmp(r, phi) >= 0)
+ gcry_mpi_sub(r, r, phi);
+ }
+ }
+
+ gcry_mpi_release(phi);
+ return r;
+}
+
+/* Decompose $x \in Z_n$ into $(xp,xq) \in Z_p \times Z_q$ using Chinese Remainder Theorem */
+static void CRT_decompose(gcry_mpi_t *xp, gcry_mpi_t *xq, const gcry_mpi_t x, const gcry_mpi_t p, const gcry_mpi_t q) {
+ *xp = gcry_mpi_new(0);
+ *xq = gcry_mpi_new(0);
+ gcry_mpi_mod(*xp, x, p);
+ gcry_mpi_mod(*xq, x, q);
+}
+
+/* Compose $(xp,xq) \in Z_p \times Z_q$ into $x \in Z_n$ using Chinese Remainder Theorem */
+static void CRT_compose(gcry_mpi_t *x, const gcry_mpi_t xp, const gcry_mpi_t xq, const gcry_mpi_t p, const gcry_mpi_t q) {
+ gcry_mpi_t a, u;
+
+ a = gcry_mpi_new(0);
+ u = gcry_mpi_new(0);
+ *x = gcry_mpi_new(0);
+ gcry_mpi_subm(a, xq, xp, q);
+ gcry_mpi_invm(u, p, q);
+ gcry_mpi_mulm(a, a, u, q); /* a = (xq - xp) / p (mod q) */
+ gcry_mpi_mul(*x, p, a);
+ gcry_mpi_add(*x, *x, xp); /* x = p * ((xq - xp) / p mod q) + xp */
+ gcry_mpi_release(a);
+ gcry_mpi_release(u);
+}
+
+/******************************************************************************/
+
+size_t FSPRG_mskinbytes(unsigned _secpar) {
+ VALIDATE_SECPAR(_secpar);
+ return 2 + 2 * (_secpar / 2) / 8; /* to store header,p,q */
+}
+
+size_t FSPRG_mpkinbytes(unsigned _secpar) {
+ VALIDATE_SECPAR(_secpar);
+ return 2 + _secpar / 8; /* to store header,n */
+}
+
+size_t FSPRG_stateinbytes(unsigned _secpar) {
+ VALIDATE_SECPAR(_secpar);
+ return 2 + 2 * _secpar / 8 + 8; /* to store header,n,x,epoch */
+}
+
+static void store_secpar(void *buf, uint16_t secpar) {
+ secpar = secpar / 16 - 1;
+ ((uint8_t*) buf)[0] = (secpar >> 8) & 0xff;
+ ((uint8_t*) buf)[1] = (secpar >> 0) & 0xff;
+}
+
+static uint16_t read_secpar(const void *buf) {
+ uint16_t secpar;
+ secpar =
+ (uint16_t)(((uint8_t*) buf)[0]) << 8 |
+ (uint16_t)(((uint8_t*) buf)[1]) << 0;
+ return 16 * (secpar + 1);
+}
+
+void FSPRG_GenMK(void *msk, void *mpk, const void *seed, size_t seedlen, unsigned _secpar) {
+ uint8_t iseed[FSPRG_RECOMMENDED_SEEDLEN];
+ gcry_mpi_t n, p, q;
+ uint16_t secpar;
+
+ VALIDATE_SECPAR(_secpar);
+ secpar = _secpar;
+
+ initialize_libgcrypt(false);
+
+ if (!seed) {
+ gcry_randomize(iseed, FSPRG_RECOMMENDED_SEEDLEN, GCRY_STRONG_RANDOM);
+ seed = iseed;
+ seedlen = FSPRG_RECOMMENDED_SEEDLEN;
+ }
+
+ p = genprime3mod4(secpar / 2, seed, seedlen, RND_GEN_P);
+ q = genprime3mod4(secpar / 2, seed, seedlen, RND_GEN_Q);
+
+ if (msk) {
+ store_secpar(msk + 0, secpar);
+ mpi_export(msk + 2 + 0 * (secpar / 2) / 8, (secpar / 2) / 8, p);
+ mpi_export(msk + 2 + 1 * (secpar / 2) / 8, (secpar / 2) / 8, q);
+ }
+
+ if (mpk) {
+ n = gcry_mpi_new(0);
+ gcry_mpi_mul(n, p, q);
+ assert(gcry_mpi_get_nbits(n) == secpar);
+
+ store_secpar(mpk + 0, secpar);
+ mpi_export(mpk + 2, secpar / 8, n);
+
+ gcry_mpi_release(n);
+ }
+
+ gcry_mpi_release(p);
+ gcry_mpi_release(q);
+}
+
+void FSPRG_GenState0(void *state, const void *mpk, const void *seed, size_t seedlen) {
+ gcry_mpi_t n, x;
+ uint16_t secpar;
+
+ initialize_libgcrypt(false);
+
+ secpar = read_secpar(mpk + 0);
+ n = mpi_import(mpk + 2, secpar / 8);
+ x = gensquare(n, seed, seedlen, RND_GEN_X, secpar);
+
+ memcpy(state, mpk, 2 + secpar / 8);
+ mpi_export(state + 2 + 1 * secpar / 8, secpar / 8, x);
+ memzero(state + 2 + 2 * secpar / 8, 8);
+
+ gcry_mpi_release(n);
+ gcry_mpi_release(x);
+}
+
+void FSPRG_Evolve(void *state) {
+ gcry_mpi_t n, x;
+ uint16_t secpar;
+ uint64_t epoch;
+
+ initialize_libgcrypt(false);
+
+ secpar = read_secpar(state + 0);
+ n = mpi_import(state + 2 + 0 * secpar / 8, secpar / 8);
+ x = mpi_import(state + 2 + 1 * secpar / 8, secpar / 8);
+ epoch = uint64_import(state + 2 + 2 * secpar / 8, 8);
+
+ gcry_mpi_mulm(x, x, x, n);
+ epoch++;
+
+ mpi_export(state + 2 + 1 * secpar / 8, secpar / 8, x);
+ uint64_export(state + 2 + 2 * secpar / 8, 8, epoch);
+
+ gcry_mpi_release(n);
+ gcry_mpi_release(x);
+}
+
+uint64_t FSPRG_GetEpoch(const void *state) {
+ uint16_t secpar;
+ secpar = read_secpar(state + 0);
+ return uint64_import(state + 2 + 2 * secpar / 8, 8);
+}
+
+void FSPRG_Seek(void *state, uint64_t epoch, const void *msk, const void *seed, size_t seedlen) {
+ gcry_mpi_t p, q, n, x, xp, xq, kp, kq, xm;
+ uint16_t secpar;
+
+ initialize_libgcrypt(false);
+
+ secpar = read_secpar(msk + 0);
+ p = mpi_import(msk + 2 + 0 * (secpar / 2) / 8, (secpar / 2) / 8);
+ q = mpi_import(msk + 2 + 1 * (secpar / 2) / 8, (secpar / 2) / 8);
+
+ n = gcry_mpi_new(0);
+ gcry_mpi_mul(n, p, q);
+
+ x = gensquare(n, seed, seedlen, RND_GEN_X, secpar);
+ CRT_decompose(&xp, &xq, x, p, q); /* split (mod n) into (mod p) and (mod q) using CRT */
+
+ kp = twopowmodphi(epoch, p); /* compute 2^epoch (mod phi(p)) */
+ kq = twopowmodphi(epoch, q); /* compute 2^epoch (mod phi(q)) */
+
+ gcry_mpi_powm(xp, xp, kp, p); /* compute x^(2^epoch) (mod p) */
+ gcry_mpi_powm(xq, xq, kq, q); /* compute x^(2^epoch) (mod q) */
+
+ CRT_compose(&xm, xp, xq, p, q); /* combine (mod p) and (mod q) to (mod n) using CRT */
+
+ store_secpar(state + 0, secpar);
+ mpi_export(state + 2 + 0 * secpar / 8, secpar / 8, n);
+ mpi_export(state + 2 + 1 * secpar / 8, secpar / 8, xm);
+ uint64_export(state + 2 + 2 * secpar / 8, 8, epoch);
+
+ gcry_mpi_release(p);
+ gcry_mpi_release(q);
+ gcry_mpi_release(n);
+ gcry_mpi_release(x);
+ gcry_mpi_release(xp);
+ gcry_mpi_release(xq);
+ gcry_mpi_release(kp);
+ gcry_mpi_release(kq);
+ gcry_mpi_release(xm);
+}
+
+void FSPRG_GetKey(const void *state, void *key, size_t keylen, uint32_t idx) {
+ uint16_t secpar;
+
+ initialize_libgcrypt(false);
+
+ secpar = read_secpar(state + 0);
+ det_randomize(key, keylen, state + 2, 2 * secpar / 8 + 8, idx);
+}
diff --git a/src/libsystemd/sd-journal/fsprg.h b/src/libsystemd/sd-journal/fsprg.h
new file mode 100644
index 0000000..d3d88aa
--- /dev/null
+++ b/src/libsystemd/sd-journal/fsprg.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+#pragma once
+
+/*
+ * fsprg v0.1 - (seekable) forward-secure pseudorandom generator
+ * Copyright © 2012 B. Poettering
+ * Contact: fsprg@point-at-infinity.org
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#include <inttypes.h>
+#include <sys/types.h>
+
+#include "macro.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define FSPRG_RECOMMENDED_SECPAR 1536
+#define FSPRG_RECOMMENDED_SEEDLEN (96/8)
+
+size_t FSPRG_mskinbytes(unsigned secpar) _const_;
+size_t FSPRG_mpkinbytes(unsigned secpar) _const_;
+size_t FSPRG_stateinbytes(unsigned secpar) _const_;
+
+/* Setup msk and mpk. Providing seed != NULL makes this algorithm deterministic. */
+void FSPRG_GenMK(void *msk, void *mpk, const void *seed, size_t seedlen, unsigned secpar);
+
+/* Initialize state deterministically in dependence on seed. */
+/* Note: in case one wants to run only one GenState0 per GenMK it is safe to use
+ the same seed for both GenMK and GenState0.
+*/
+void FSPRG_GenState0(void *state, const void *mpk, const void *seed, size_t seedlen);
+
+void FSPRG_Evolve(void *state);
+
+uint64_t FSPRG_GetEpoch(const void *state) _pure_;
+
+/* Seek to any arbitrary state (by providing msk together with seed from GenState0). */
+void FSPRG_Seek(void *state, uint64_t epoch, const void *msk, const void *seed, size_t seedlen);
+
+void FSPRG_GetKey(const void *state, void *key, size_t keylen, uint32_t idx);
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/src/libsystemd/sd-journal/generate-audit_type-list.sh b/src/libsystemd/sd-journal/generate-audit_type-list.sh
new file mode 100755
index 0000000..3851ea1
--- /dev/null
+++ b/src/libsystemd/sd-journal/generate-audit_type-list.sh
@@ -0,0 +1,17 @@
+#!/usr/bin/env bash
+# SPDX-License-Identifier: LGPL-2.1-or-later
+set -eu
+set -o pipefail
+
+cpp="${1:?}"
+shift
+
+includes=()
+for i in "$@"; do
+ includes+=(-include "$i")
+done
+
+$cpp -dM "${includes[@]}" - </dev/null | \
+ grep -vE 'AUDIT_.*(FIRST|LAST)_' | \
+ sed -r -n 's/^#define\s+AUDIT_(\w+)\s+([0-9]{4})\s*$$/\1\t\2/p' | \
+ sort -k2
diff --git a/src/libsystemd/sd-journal/journal-authenticate.c b/src/libsystemd/sd-journal/journal-authenticate.c
new file mode 100644
index 0000000..8e7533e
--- /dev/null
+++ b/src/libsystemd/sd-journal/journal-authenticate.c
@@ -0,0 +1,525 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+
+#include <fcntl.h>
+#include <sys/mman.h>
+
+#include "fd-util.h"
+#include "fsprg.h"
+#include "gcrypt-util.h"
+#include "hexdecoct.h"
+#include "journal-authenticate.h"
+#include "journal-def.h"
+#include "journal-file.h"
+#include "memory-util.h"
+#include "time-util.h"
+
+static void* fssheader_free(FSSHeader *p) {
+ /* mmap() returns MAP_FAILED on error and sets the errno */
+ if (!p || p == MAP_FAILED)
+ return NULL;
+
+ assert_se(munmap(p, PAGE_ALIGN(sizeof(FSSHeader))) >= 0);
+ return NULL;
+}
+
+DEFINE_TRIVIAL_CLEANUP_FUNC(FSSHeader*, fssheader_free);
+
+static uint64_t journal_file_tag_seqnum(JournalFile *f) {
+ uint64_t r;
+
+ assert(f);
+
+ r = le64toh(f->header->n_tags) + 1;
+ f->header->n_tags = htole64(r);
+
+ return r;
+}
+
+int journal_file_append_tag(JournalFile *f) {
+ Object *o;
+ uint64_t p;
+ int r;
+
+ assert(f);
+
+ if (!JOURNAL_HEADER_SEALED(f->header))
+ return 0;
+
+ if (!f->hmac_running) {
+ r = journal_file_hmac_start(f);
+ if (r < 0)
+ return r;
+ }
+
+ assert(f->hmac);
+
+ r = journal_file_append_object(f, OBJECT_TAG, sizeof(struct TagObject), &o, &p);
+ if (r < 0)
+ return r;
+
+ o->tag.seqnum = htole64(journal_file_tag_seqnum(f));
+ o->tag.epoch = htole64(FSPRG_GetEpoch(f->fsprg_state));
+
+ log_debug("Writing tag %"PRIu64" for epoch %"PRIu64"",
+ le64toh(o->tag.seqnum),
+ FSPRG_GetEpoch(f->fsprg_state));
+
+ /* Add the tag object itself, so that we can protect its
+ * header. This will exclude the actual hash value in it */
+ r = journal_file_hmac_put_object(f, OBJECT_TAG, o, p);
+ if (r < 0)
+ return r;
+
+ /* Get the HMAC tag and store it in the object */
+ memcpy(o->tag.tag, gcry_md_read(f->hmac, 0), TAG_LENGTH);
+ f->hmac_running = false;
+
+ return 0;
+}
+
+int journal_file_hmac_start(JournalFile *f) {
+ uint8_t key[256 / 8]; /* Let's pass 256 bit from FSPRG to HMAC */
+ gcry_error_t err;
+
+ assert(f);
+
+ if (!JOURNAL_HEADER_SEALED(f->header))
+ return 0;
+
+ if (f->hmac_running)
+ return 0;
+
+ /* Prepare HMAC for next cycle */
+ gcry_md_reset(f->hmac);
+ FSPRG_GetKey(f->fsprg_state, key, sizeof(key), 0);
+ err = gcry_md_setkey(f->hmac, key, sizeof(key));
+ if (gcry_err_code(err) != GPG_ERR_NO_ERROR)
+ return log_debug_errno(SYNTHETIC_ERRNO(EIO),
+ "gcry_md_setkey() failed with error code: %s",
+ gcry_strerror(err));
+
+ f->hmac_running = true;
+
+ return 0;
+}
+
+static int journal_file_get_epoch(JournalFile *f, uint64_t realtime, uint64_t *epoch) {
+ uint64_t t;
+
+ assert(f);
+ assert(epoch);
+ assert(JOURNAL_HEADER_SEALED(f->header));
+
+ if (f->fss_start_usec == 0 || f->fss_interval_usec == 0)
+ return -EOPNOTSUPP;
+
+ if (realtime < f->fss_start_usec)
+ return -ESTALE;
+
+ t = realtime - f->fss_start_usec;
+ t = t / f->fss_interval_usec;
+
+ *epoch = t;
+
+ return 0;
+}
+
+static int journal_file_fsprg_need_evolve(JournalFile *f, uint64_t realtime) {
+ uint64_t goal, epoch;
+ int r;
+
+ assert(f);
+
+ if (!JOURNAL_HEADER_SEALED(f->header))
+ return 0;
+
+ r = journal_file_get_epoch(f, realtime, &goal);
+ if (r < 0)
+ return r;
+
+ epoch = FSPRG_GetEpoch(f->fsprg_state);
+ if (epoch > goal)
+ return -ESTALE;
+
+ return epoch != goal;
+}
+
+int journal_file_fsprg_evolve(JournalFile *f, uint64_t realtime) {
+ uint64_t goal, epoch;
+ int r;
+
+ assert(f);
+
+ if (!JOURNAL_HEADER_SEALED(f->header))
+ return 0;
+
+ r = journal_file_get_epoch(f, realtime, &goal);
+ if (r < 0)
+ return r;
+
+ epoch = FSPRG_GetEpoch(f->fsprg_state);
+ if (epoch < goal)
+ log_debug("Evolving FSPRG key from epoch %"PRIu64" to %"PRIu64".", epoch, goal);
+
+ for (;;) {
+ if (epoch > goal)
+ return -ESTALE;
+ if (epoch == goal)
+ return 0;
+
+ FSPRG_Evolve(f->fsprg_state);
+ epoch = FSPRG_GetEpoch(f->fsprg_state);
+ if (epoch < goal) {
+ r = journal_file_append_tag(f);
+ if (r < 0)
+ return r;
+ }
+ }
+}
+
+int journal_file_fsprg_seek(JournalFile *f, uint64_t goal) {
+ void *msk;
+ uint64_t epoch;
+
+ assert(f);
+
+ if (!JOURNAL_HEADER_SEALED(f->header))
+ return 0;
+
+ assert(f->fsprg_seed);
+
+ if (f->fsprg_state) {
+ /* Cheaper... */
+
+ epoch = FSPRG_GetEpoch(f->fsprg_state);
+ if (goal == epoch)
+ return 0;
+
+ if (goal == epoch + 1) {
+ FSPRG_Evolve(f->fsprg_state);
+ return 0;
+ }
+ } else {
+ f->fsprg_state_size = FSPRG_stateinbytes(FSPRG_RECOMMENDED_SECPAR);
+ f->fsprg_state = malloc(f->fsprg_state_size);
+ if (!f->fsprg_state)
+ return -ENOMEM;
+ }
+
+ log_debug("Seeking FSPRG key to %"PRIu64".", goal);
+
+ msk = alloca_safe(FSPRG_mskinbytes(FSPRG_RECOMMENDED_SECPAR));
+ FSPRG_GenMK(msk, NULL, f->fsprg_seed, f->fsprg_seed_size, FSPRG_RECOMMENDED_SECPAR);
+ FSPRG_Seek(f->fsprg_state, goal, msk, f->fsprg_seed, f->fsprg_seed_size);
+
+ return 0;
+}
+
+int journal_file_maybe_append_tag(JournalFile *f, uint64_t realtime) {
+ int r;
+
+ assert(f);
+
+ if (!JOURNAL_HEADER_SEALED(f->header))
+ return 0;
+
+ if (realtime <= 0)
+ realtime = now(CLOCK_REALTIME);
+
+ r = journal_file_fsprg_need_evolve(f, realtime);
+ if (r <= 0)
+ return 0;
+
+ r = journal_file_append_tag(f);
+ if (r < 0)
+ return r;
+
+ r = journal_file_fsprg_evolve(f, realtime);
+ if (r < 0)
+ return r;
+
+ return 0;
+}
+
+int journal_file_hmac_put_object(JournalFile *f, ObjectType type, Object *o, uint64_t p) {
+ int r;
+
+ assert(f);
+
+ if (!JOURNAL_HEADER_SEALED(f->header))
+ return 0;
+
+ r = journal_file_hmac_start(f);
+ if (r < 0)
+ return r;
+
+ if (!o) {
+ r = journal_file_move_to_object(f, type, p, &o);
+ if (r < 0)
+ return r;
+ } else if (type > OBJECT_UNUSED && o->object.type != type)
+ return -EBADMSG;
+
+ gcry_md_write(f->hmac, o, offsetof(ObjectHeader, payload));
+
+ switch (o->object.type) {
+
+ case OBJECT_DATA:
+ /* All but hash and payload are mutable */
+ gcry_md_write(f->hmac, &o->data.hash, sizeof(o->data.hash));
+ gcry_md_write(f->hmac, journal_file_data_payload_field(f, o), le64toh(o->object.size) - journal_file_data_payload_offset(f));
+ break;
+
+ case OBJECT_FIELD:
+ /* Same here */
+ gcry_md_write(f->hmac, &o->field.hash, sizeof(o->field.hash));
+ gcry_md_write(f->hmac, o->field.payload, le64toh(o->object.size) - offsetof(Object, field.payload));
+ break;
+
+ case OBJECT_ENTRY:
+ /* All */
+ gcry_md_write(f->hmac, &o->entry.seqnum, le64toh(o->object.size) - offsetof(Object, entry.seqnum));
+ break;
+
+ case OBJECT_FIELD_HASH_TABLE:
+ case OBJECT_DATA_HASH_TABLE:
+ case OBJECT_ENTRY_ARRAY:
+ /* Nothing: everything is mutable */
+ break;
+
+ case OBJECT_TAG:
+ /* All but the tag itself */
+ gcry_md_write(f->hmac, &o->tag.seqnum, sizeof(o->tag.seqnum));
+ gcry_md_write(f->hmac, &o->tag.epoch, sizeof(o->tag.epoch));
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int journal_file_hmac_put_header(JournalFile *f) {
+ int r;
+
+ assert(f);
+
+ if (!JOURNAL_HEADER_SEALED(f->header))
+ return 0;
+
+ r = journal_file_hmac_start(f);
+ if (r < 0)
+ return r;
+
+ /* All but state+reserved, boot_id, arena_size,
+ * tail_object_offset, n_objects, n_entries,
+ * tail_entry_seqnum, head_entry_seqnum, entry_array_offset,
+ * head_entry_realtime, tail_entry_realtime,
+ * tail_entry_monotonic, n_data, n_fields, n_tags,
+ * n_entry_arrays. */
+
+ gcry_md_write(f->hmac, f->header->signature, offsetof(Header, state) - offsetof(Header, signature));
+ gcry_md_write(f->hmac, &f->header->file_id, offsetof(Header, tail_entry_boot_id) - offsetof(Header, file_id));
+ gcry_md_write(f->hmac, &f->header->seqnum_id, offsetof(Header, arena_size) - offsetof(Header, seqnum_id));
+ gcry_md_write(f->hmac, &f->header->data_hash_table_offset, offsetof(Header, tail_object_offset) - offsetof(Header, data_hash_table_offset));
+
+ return 0;
+}
+
+int journal_file_fss_load(JournalFile *f) {
+ _cleanup_close_ int fd = -EBADF;
+ _cleanup_free_ char *path = NULL;
+ _cleanup_(fssheader_freep) FSSHeader *header = NULL;
+ struct stat st;
+ sd_id128_t machine;
+ int r;
+
+ assert(f);
+
+ /* This function is used to determine whether sealing should be enabled in the journal header so we
+ * can't check the header to check if sealing is enabled here. */
+
+ r = sd_id128_get_machine(&machine);
+ if (r < 0)
+ return r;
+
+ if (asprintf(&path, "/var/log/journal/" SD_ID128_FORMAT_STR "/fss",
+ SD_ID128_FORMAT_VAL(machine)) < 0)
+ return -ENOMEM;
+
+ fd = open(path, O_RDWR|O_CLOEXEC|O_NOCTTY, 0600);
+ if (fd < 0) {
+ if (errno != ENOENT)
+ log_error_errno(errno, "Failed to open %s: %m", path);
+
+ return -errno;
+ }
+
+ if (fstat(fd, &st) < 0)
+ return -errno;
+
+ if (st.st_size < (off_t) sizeof(FSSHeader))
+ return -ENODATA;
+
+ header = mmap(NULL, PAGE_ALIGN(sizeof(FSSHeader)), PROT_READ, MAP_SHARED, fd, 0);
+ if (header == MAP_FAILED)
+ return -errno;
+
+ if (memcmp(header->signature, FSS_HEADER_SIGNATURE, 8) != 0)
+ return -EBADMSG;
+
+ if (header->incompatible_flags != 0)
+ return -EPROTONOSUPPORT;
+
+ if (le64toh(header->header_size) < sizeof(FSSHeader))
+ return -EBADMSG;
+
+ if (le64toh(header->fsprg_state_size) != FSPRG_stateinbytes(le16toh(header->fsprg_secpar)))
+ return -EBADMSG;
+
+ f->fss_file_size = le64toh(header->header_size) + le64toh(header->fsprg_state_size);
+ if ((uint64_t) st.st_size < f->fss_file_size)
+ return -ENODATA;
+
+ if (!sd_id128_equal(machine, header->machine_id))
+ return -EHOSTDOWN;
+
+ if (le64toh(header->start_usec) <= 0 || le64toh(header->interval_usec) <= 0)
+ return -EBADMSG;
+
+ size_t sz = PAGE_ALIGN(f->fss_file_size);
+ assert(sz < SIZE_MAX);
+ f->fss_file = mmap(NULL, sz, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
+ if (f->fss_file == MAP_FAILED) {
+ f->fss_file = NULL;
+ return -errno;
+ }
+
+ f->fss_start_usec = le64toh(f->fss_file->start_usec);
+ f->fss_interval_usec = le64toh(f->fss_file->interval_usec);
+
+ f->fsprg_state = (uint8_t*) f->fss_file + le64toh(f->fss_file->header_size);
+ f->fsprg_state_size = le64toh(f->fss_file->fsprg_state_size);
+
+ return 0;
+}
+
+int journal_file_hmac_setup(JournalFile *f) {
+ gcry_error_t e;
+
+ if (!JOURNAL_HEADER_SEALED(f->header))
+ return 0;
+
+ initialize_libgcrypt(true);
+
+ e = gcry_md_open(&f->hmac, GCRY_MD_SHA256, GCRY_MD_FLAG_HMAC);
+ if (e != 0)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+int journal_file_append_first_tag(JournalFile *f) {
+ uint64_t p;
+ int r;
+
+ if (!JOURNAL_HEADER_SEALED(f->header))
+ return 0;
+
+ log_debug("Calculating first tag...");
+
+ r = journal_file_hmac_put_header(f);
+ if (r < 0)
+ return r;
+
+ p = le64toh(f->header->field_hash_table_offset);
+ if (p < offsetof(Object, hash_table.items))
+ return -EINVAL;
+ p -= offsetof(Object, hash_table.items);
+
+ r = journal_file_hmac_put_object(f, OBJECT_FIELD_HASH_TABLE, NULL, p);
+ if (r < 0)
+ return r;
+
+ p = le64toh(f->header->data_hash_table_offset);
+ if (p < offsetof(Object, hash_table.items))
+ return -EINVAL;
+ p -= offsetof(Object, hash_table.items);
+
+ r = journal_file_hmac_put_object(f, OBJECT_DATA_HASH_TABLE, NULL, p);
+ if (r < 0)
+ return r;
+
+ r = journal_file_append_tag(f);
+ if (r < 0)
+ return r;
+
+ return 0;
+}
+
+int journal_file_parse_verification_key(JournalFile *f, const char *key) {
+ _cleanup_free_ uint8_t *seed = NULL;
+ size_t seed_size;
+ const char *k;
+ unsigned long long start, interval;
+ int r;
+
+ assert(f);
+ assert(key);
+
+ seed_size = FSPRG_RECOMMENDED_SEEDLEN;
+ seed = malloc(seed_size);
+ if (!seed)
+ return -ENOMEM;
+
+ k = key;
+ for (size_t c = 0; c < seed_size; c++) {
+ int x, y;
+
+ k = skip_leading_chars(k, "-");
+
+ x = unhexchar(*k);
+ if (x < 0)
+ return -EINVAL;
+ k++;
+
+ y = unhexchar(*k);
+ if (y < 0)
+ return -EINVAL;
+ k++;
+
+ seed[c] = (uint8_t) (x * 16 + y);
+ }
+
+ if (*k != '/')
+ return -EINVAL;
+ k++;
+
+ r = sscanf(k, "%llx-%llx", &start, &interval);
+ if (r != 2)
+ return -EINVAL;
+
+ f->fsprg_seed = TAKE_PTR(seed);
+ f->fsprg_seed_size = seed_size;
+
+ f->fss_start_usec = start * interval;
+ f->fss_interval_usec = interval;
+
+ return 0;
+}
+
+bool journal_file_next_evolve_usec(JournalFile *f, usec_t *u) {
+ uint64_t epoch;
+
+ assert(f);
+ assert(u);
+
+ if (!JOURNAL_HEADER_SEALED(f->header))
+ return false;
+
+ epoch = FSPRG_GetEpoch(f->fsprg_state);
+
+ *u = (usec_t) (f->fss_start_usec + f->fss_interval_usec * epoch + f->fss_interval_usec);
+
+ return true;
+}
diff --git a/src/libsystemd/sd-journal/journal-authenticate.h b/src/libsystemd/sd-journal/journal-authenticate.h
new file mode 100644
index 0000000..e895722
--- /dev/null
+++ b/src/libsystemd/sd-journal/journal-authenticate.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+#pragma once
+
+#include <stdbool.h>
+
+#include "journal-file.h"
+
+int journal_file_append_tag(JournalFile *f);
+int journal_file_maybe_append_tag(JournalFile *f, uint64_t realtime);
+int journal_file_append_first_tag(JournalFile *f);
+
+int journal_file_hmac_setup(JournalFile *f);
+int journal_file_hmac_start(JournalFile *f);
+int journal_file_hmac_put_header(JournalFile *f);
+int journal_file_hmac_put_object(JournalFile *f, ObjectType type, Object *o, uint64_t p);
+
+int journal_file_fss_load(JournalFile *f);
+int journal_file_parse_verification_key(JournalFile *f, const char *key);
+
+int journal_file_fsprg_evolve(JournalFile *f, uint64_t realtime);
+int journal_file_fsprg_seek(JournalFile *f, uint64_t epoch);
+
+bool journal_file_next_evolve_usec(JournalFile *f, usec_t *u);
diff --git a/src/libsystemd/sd-journal/journal-def.h b/src/libsystemd/sd-journal/journal-def.h
new file mode 100644
index 0000000..1b10f24
--- /dev/null
+++ b/src/libsystemd/sd-journal/journal-def.h
@@ -0,0 +1,269 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+#pragma once
+
+#include "sd-id128.h"
+
+#include "macro.h"
+#include "sparse-endian.h"
+
+/*
+ * If you change this file you probably should also change its documentation:
+ *
+ * https://systemd.io/JOURNAL_FILE_FORMAT
+ */
+
+typedef struct Header Header;
+
+typedef struct ObjectHeader ObjectHeader;
+typedef union Object Object;
+
+typedef struct DataObject DataObject;
+typedef struct FieldObject FieldObject;
+typedef struct EntryObject EntryObject;
+typedef struct HashTableObject HashTableObject;
+typedef struct EntryArrayObject EntryArrayObject;
+typedef struct TagObject TagObject;
+
+typedef struct HashItem HashItem;
+
+typedef struct FSSHeader FSSHeader;
+
+/* Object types */
+typedef enum ObjectType {
+ OBJECT_UNUSED, /* also serves as "any type" or "additional category" */
+ OBJECT_DATA,
+ OBJECT_FIELD,
+ OBJECT_ENTRY,
+ OBJECT_DATA_HASH_TABLE,
+ OBJECT_FIELD_HASH_TABLE,
+ OBJECT_ENTRY_ARRAY,
+ OBJECT_TAG,
+ _OBJECT_TYPE_MAX,
+ _OBJECT_TYPE_INVALID = -EINVAL,
+} ObjectType;
+
+/* Object flags (note that src/basic/compress.h uses the same values for the compression types) */
+enum {
+ OBJECT_COMPRESSED_XZ = 1 << 0,
+ OBJECT_COMPRESSED_LZ4 = 1 << 1,
+ OBJECT_COMPRESSED_ZSTD = 1 << 2,
+ _OBJECT_COMPRESSED_MASK = OBJECT_COMPRESSED_XZ | OBJECT_COMPRESSED_LZ4 | OBJECT_COMPRESSED_ZSTD,
+};
+
+struct ObjectHeader {
+ uint8_t type;
+ uint8_t flags;
+ uint8_t reserved[6];
+ le64_t size;
+ uint8_t payload[];
+} _packed_;
+
+#define DataObject__contents { \
+ ObjectHeader object; \
+ le64_t hash; \
+ le64_t next_hash_offset; \
+ le64_t next_field_offset; \
+ le64_t entry_offset; /* the first array entry we store inline */ \
+ le64_t entry_array_offset; \
+ le64_t n_entries; \
+ union { \
+ struct { \
+ uint8_t payload[0]; \
+ } regular; \
+ struct { \
+ le32_t tail_entry_array_offset; \
+ le32_t tail_entry_array_n_entries; \
+ uint8_t payload[0]; \
+ } compact; \
+ }; \
+}
+
+struct DataObject DataObject__contents;
+struct DataObject__packed DataObject__contents _packed_;
+assert_cc(sizeof(struct DataObject) == sizeof(struct DataObject__packed));
+
+#define FieldObject__contents { \
+ ObjectHeader object; \
+ le64_t hash; \
+ le64_t next_hash_offset; \
+ le64_t head_data_offset; \
+ uint8_t payload[]; \
+}
+
+struct FieldObject FieldObject__contents;
+struct FieldObject__packed FieldObject__contents _packed_;
+assert_cc(sizeof(struct FieldObject) == sizeof(struct FieldObject__packed));
+
+#define EntryObject__contents { \
+ ObjectHeader object; \
+ le64_t seqnum; \
+ le64_t realtime; \
+ le64_t monotonic; \
+ sd_id128_t boot_id; \
+ le64_t xor_hash; \
+ union { \
+ struct { \
+ dummy_t __empty__regular; \
+ struct { \
+ le64_t object_offset; \
+ le64_t hash; \
+ } regular[]; \
+ }; \
+ struct { \
+ dummy_t __empty_compact; \
+ struct { \
+ le32_t object_offset; \
+ } compact[]; \
+ }; \
+ } items; \
+}
+
+struct EntryObject EntryObject__contents;
+struct EntryObject__packed EntryObject__contents _packed_;
+assert_cc(sizeof(struct EntryObject) == sizeof(struct EntryObject__packed));
+
+struct HashItem {
+ le64_t head_hash_offset;
+ le64_t tail_hash_offset;
+} _packed_;
+
+struct HashTableObject {
+ ObjectHeader object;
+ HashItem items[];
+} _packed_;
+
+struct EntryArrayObject {
+ ObjectHeader object;
+ le64_t next_entry_array_offset;
+ union {
+ DECLARE_FLEX_ARRAY(le64_t, regular);
+ DECLARE_FLEX_ARRAY(le32_t, compact);
+ } items;
+} _packed_;
+
+#define TAG_LENGTH (256/8)
+
+struct TagObject {
+ ObjectHeader object;
+ le64_t seqnum;
+ le64_t epoch;
+ uint8_t tag[TAG_LENGTH]; /* SHA-256 HMAC */
+} _packed_;
+
+union Object {
+ ObjectHeader object;
+ DataObject data;
+ FieldObject field;
+ EntryObject entry;
+ HashTableObject hash_table;
+ EntryArrayObject entry_array;
+ TagObject tag;
+};
+
+enum {
+ STATE_OFFLINE = 0,
+ STATE_ONLINE = 1,
+ STATE_ARCHIVED = 2,
+ _STATE_MAX
+};
+
+/* Header flags */
+enum {
+ HEADER_INCOMPATIBLE_COMPRESSED_XZ = 1 << 0,
+ HEADER_INCOMPATIBLE_COMPRESSED_LZ4 = 1 << 1,
+ HEADER_INCOMPATIBLE_KEYED_HASH = 1 << 2,
+ HEADER_INCOMPATIBLE_COMPRESSED_ZSTD = 1 << 3,
+ HEADER_INCOMPATIBLE_COMPACT = 1 << 4,
+
+ HEADER_INCOMPATIBLE_ANY = HEADER_INCOMPATIBLE_COMPRESSED_XZ |
+ HEADER_INCOMPATIBLE_COMPRESSED_LZ4 |
+ HEADER_INCOMPATIBLE_KEYED_HASH |
+ HEADER_INCOMPATIBLE_COMPRESSED_ZSTD |
+ HEADER_INCOMPATIBLE_COMPACT,
+
+ HEADER_INCOMPATIBLE_SUPPORTED = (HAVE_XZ ? HEADER_INCOMPATIBLE_COMPRESSED_XZ : 0) |
+ (HAVE_LZ4 ? HEADER_INCOMPATIBLE_COMPRESSED_LZ4 : 0) |
+ (HAVE_ZSTD ? HEADER_INCOMPATIBLE_COMPRESSED_ZSTD : 0) |
+ HEADER_INCOMPATIBLE_KEYED_HASH |
+ HEADER_INCOMPATIBLE_COMPACT,
+};
+
+
+enum {
+ HEADER_COMPATIBLE_SEALED = 1 << 0,
+ HEADER_COMPATIBLE_TAIL_ENTRY_BOOT_ID = 1 << 1, /* if set, the last_entry_boot_id field in the header is exclusively refreshed when an entry is appended */
+ HEADER_COMPATIBLE_SEALED_CONTINUOUS = 1 << 2,
+ HEADER_COMPATIBLE_ANY = HEADER_COMPATIBLE_SEALED |
+ HEADER_COMPATIBLE_TAIL_ENTRY_BOOT_ID |
+ HEADER_COMPATIBLE_SEALED_CONTINUOUS,
+
+ HEADER_COMPATIBLE_SUPPORTED = (HAVE_GCRYPT ? HEADER_COMPATIBLE_SEALED | HEADER_COMPATIBLE_SEALED_CONTINUOUS : 0) |
+ HEADER_COMPATIBLE_TAIL_ENTRY_BOOT_ID,
+};
+
+
+#define HEADER_SIGNATURE \
+ ((const uint8_t[]) { 'L', 'P', 'K', 'S', 'H', 'H', 'R', 'H' })
+
+#define struct_Header__contents { \
+ uint8_t signature[8]; /* "LPKSHHRH" */ \
+ le32_t compatible_flags; \
+ le32_t incompatible_flags; \
+ uint8_t state; \
+ uint8_t reserved[7]; \
+ sd_id128_t file_id; \
+ sd_id128_t machine_id; \
+ sd_id128_t tail_entry_boot_id; \
+ sd_id128_t seqnum_id; \
+ le64_t header_size; \
+ le64_t arena_size; \
+ le64_t data_hash_table_offset; \
+ le64_t data_hash_table_size; \
+ le64_t field_hash_table_offset; \
+ le64_t field_hash_table_size; \
+ le64_t tail_object_offset; \
+ le64_t n_objects; \
+ le64_t n_entries; \
+ le64_t tail_entry_seqnum; \
+ le64_t head_entry_seqnum; \
+ le64_t entry_array_offset; \
+ le64_t head_entry_realtime; \
+ le64_t tail_entry_realtime; \
+ le64_t tail_entry_monotonic; \
+ /* Added in 187 */ \
+ le64_t n_data; \
+ le64_t n_fields; \
+ /* Added in 189 */ \
+ le64_t n_tags; \
+ le64_t n_entry_arrays; \
+ /* Added in 246 */ \
+ le64_t data_hash_chain_depth; \
+ le64_t field_hash_chain_depth; \
+ /* Added in 252 */ \
+ le32_t tail_entry_array_offset; \
+ le32_t tail_entry_array_n_entries; \
+ /* Added in 254 */ \
+ le64_t tail_entry_offset; \
+ }
+
+struct Header struct_Header__contents;
+struct Header__packed struct_Header__contents _packed_;
+assert_cc(sizeof(struct Header) == sizeof(struct Header__packed));
+assert_cc(sizeof(struct Header) == 272);
+
+#define FSS_HEADER_SIGNATURE \
+ ((const char[]) { 'K', 'S', 'H', 'H', 'R', 'H', 'L', 'P' })
+
+struct FSSHeader {
+ uint8_t signature[8]; /* "KSHHRHLP" */
+ le32_t compatible_flags;
+ le32_t incompatible_flags;
+ sd_id128_t machine_id;
+ sd_id128_t boot_id; /* last writer */
+ le64_t header_size;
+ le64_t start_usec;
+ le64_t interval_usec;
+ le16_t fsprg_secpar;
+ le16_t reserved[3];
+ le64_t fsprg_state_size;
+} _packed_;
diff --git a/src/libsystemd/sd-journal/journal-file.c b/src/libsystemd/sd-journal/journal-file.c
new file mode 100644
index 0000000..d2493a0
--- /dev/null
+++ b/src/libsystemd/sd-journal/journal-file.c
@@ -0,0 +1,4696 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+
+#include <errno.h>
+#include <fcntl.h>
+#include <linux/fs.h>
+#include <linux/magic.h>
+#include <pthread.h>
+#include <stddef.h>
+#include <sys/mman.h>
+#include <sys/statvfs.h>
+#include <sys/uio.h>
+#include <unistd.h>
+
+#include "sd-event.h"
+
+#include "alloc-util.h"
+#include "chattr-util.h"
+#include "compress.h"
+#include "env-util.h"
+#include "fd-util.h"
+#include "format-util.h"
+#include "fs-util.h"
+#include "id128-util.h"
+#include "journal-authenticate.h"
+#include "journal-def.h"
+#include "journal-file.h"
+#include "journal-internal.h"
+#include "lookup3.h"
+#include "memory-util.h"
+#include "missing_threads.h"
+#include "path-util.h"
+#include "prioq.h"
+#include "random-util.h"
+#include "set.h"
+#include "sort-util.h"
+#include "stat-util.h"
+#include "string-table.h"
+#include "string-util.h"
+#include "strv.h"
+#include "sync-util.h"
+#include "user-util.h"
+#include "xattr-util.h"
+
+#define DEFAULT_DATA_HASH_TABLE_SIZE (2047ULL*sizeof(HashItem))
+#define DEFAULT_FIELD_HASH_TABLE_SIZE (333ULL*sizeof(HashItem))
+
+#define DEFAULT_COMPRESS_THRESHOLD (512ULL)
+#define MIN_COMPRESS_THRESHOLD (8ULL)
+
+#define U64_KB UINT64_C(1024)
+#define U64_MB (UINT64_C(1024) * U64_KB)
+#define U64_GB (UINT64_C(1024) * U64_MB)
+
+/* This is the minimum journal file size */
+#define JOURNAL_FILE_SIZE_MIN (512 * U64_KB) /* 512 KiB */
+#define JOURNAL_COMPACT_SIZE_MAX ((uint64_t) UINT32_MAX) /* 4 GiB */
+
+/* These are the lower and upper bounds if we deduce the max_use value from the file system size */
+#define MAX_USE_LOWER (1 * U64_MB) /* 1 MiB */
+#define MAX_USE_UPPER (4 * U64_GB) /* 4 GiB */
+
+/* Those are the lower and upper bounds for the minimal use limit,
+ * i.e. how much we'll use even if keep_free suggests otherwise. */
+#define MIN_USE_LOW (1 * U64_MB) /* 1 MiB */
+#define MIN_USE_HIGH (16 * U64_MB) /* 16 MiB */
+
+/* This is the upper bound if we deduce max_size from max_use */
+#define MAX_SIZE_UPPER (128 * U64_MB) /* 128 MiB */
+
+/* This is the upper bound if we deduce the keep_free value from the file system size */
+#define KEEP_FREE_UPPER (4 * U64_GB) /* 4 GiB */
+
+/* This is the keep_free value when we can't determine the system size */
+#define DEFAULT_KEEP_FREE (1 * U64_MB) /* 1 MB */
+
+/* This is the default maximum number of journal files to keep around. */
+#define DEFAULT_N_MAX_FILES 100
+
+/* n_data was the first entry we added after the initial file format design */
+#define HEADER_SIZE_MIN ALIGN64(offsetof(Header, n_data))
+
+/* How many entries to keep in the entry array chain cache at max */
+#define CHAIN_CACHE_MAX 20
+
+/* How much to increase the journal file size at once each time we allocate something new. */
+#define FILE_SIZE_INCREASE (8 * U64_MB) /* 8MB */
+
+/* Reread fstat() of the file for detecting deletions at least this often */
+#define LAST_STAT_REFRESH_USEC (5*USEC_PER_SEC)
+
+/* Longest hash chain to rotate after */
+#define HASH_CHAIN_DEPTH_MAX 100
+
+#ifdef __clang__
+# pragma GCC diagnostic ignored "-Waddress-of-packed-member"
+#endif
+
+static int mmap_prot_from_open_flags(int flags) {
+ switch (flags & O_ACCMODE) {
+ case O_RDONLY:
+ return PROT_READ;
+ case O_WRONLY:
+ return PROT_WRITE;
+ case O_RDWR:
+ return PROT_READ|PROT_WRITE;
+ default:
+ assert_not_reached();
+ }
+}
+
+int journal_file_tail_end_by_pread(JournalFile *f, uint64_t *ret_offset) {
+ uint64_t p;
+ int r;
+
+ assert(f);
+ assert(f->header);
+ assert(ret_offset);
+
+ /* Same as journal_file_tail_end_by_mmap() below, but operates with pread() to avoid the mmap cache
+ * (and thus is thread safe) */
+
+ p = le64toh(f->header->tail_object_offset);
+ if (p == 0)
+ p = le64toh(f->header->header_size);
+ else {
+ Object tail;
+ uint64_t sz;
+
+ r = journal_file_read_object_header(f, OBJECT_UNUSED, p, &tail);
+ if (r < 0)
+ return r;
+
+ sz = le64toh(tail.object.size);
+ if (sz > UINT64_MAX - sizeof(uint64_t) + 1)
+ return -EBADMSG;
+
+ sz = ALIGN64(sz);
+ if (p > UINT64_MAX - sz)
+ return -EBADMSG;
+
+ p += sz;
+ }
+
+ *ret_offset = p;
+
+ return 0;
+}
+
+int journal_file_tail_end_by_mmap(JournalFile *f, uint64_t *ret_offset) {
+ uint64_t p;
+ int r;
+
+ assert(f);
+ assert(f->header);
+ assert(ret_offset);
+
+ /* Same as journal_file_tail_end_by_pread() above, but operates with the usual mmap logic */
+
+ p = le64toh(f->header->tail_object_offset);
+ if (p == 0)
+ p = le64toh(f->header->header_size);
+ else {
+ Object *tail;
+ uint64_t sz;
+
+ r = journal_file_move_to_object(f, OBJECT_UNUSED, p, &tail);
+ if (r < 0)
+ return r;
+
+ sz = le64toh(READ_NOW(tail->object.size));
+ if (sz > UINT64_MAX - sizeof(uint64_t) + 1)
+ return -EBADMSG;
+
+ sz = ALIGN64(sz);
+ if (p > UINT64_MAX - sz)
+ return -EBADMSG;
+
+ p += sz;
+ }
+
+ *ret_offset = p;
+
+ return 0;
+}
+
+int journal_file_set_offline_thread_join(JournalFile *f) {
+ int r;
+
+ assert(f);
+
+ if (f->offline_state == OFFLINE_JOINED)
+ return 0;
+
+ r = pthread_join(f->offline_thread, NULL);
+ if (r)
+ return -r;
+
+ f->offline_state = OFFLINE_JOINED;
+
+ if (mmap_cache_fd_got_sigbus(f->cache_fd))
+ return -EIO;
+
+ return 0;
+}
+
+static int journal_file_set_online(JournalFile *f) {
+ bool wait = true;
+
+ assert(f);
+
+ if (!journal_file_writable(f))
+ return -EPERM;
+
+ if (f->fd < 0 || !f->header)
+ return -EINVAL;
+
+ while (wait) {
+ switch (f->offline_state) {
+ case OFFLINE_JOINED:
+ /* No offline thread, no need to wait. */
+ wait = false;
+ break;
+
+ case OFFLINE_SYNCING: {
+ OfflineState tmp_state = OFFLINE_SYNCING;
+ if (!__atomic_compare_exchange_n(&f->offline_state, &tmp_state, OFFLINE_CANCEL,
+ false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
+ continue;
+ }
+ /* Canceled syncing prior to offlining, no need to wait. */
+ wait = false;
+ break;
+
+ case OFFLINE_AGAIN_FROM_SYNCING: {
+ OfflineState tmp_state = OFFLINE_AGAIN_FROM_SYNCING;
+ if (!__atomic_compare_exchange_n(&f->offline_state, &tmp_state, OFFLINE_CANCEL,
+ false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
+ continue;
+ }
+ /* Canceled restart from syncing, no need to wait. */
+ wait = false;
+ break;
+
+ case OFFLINE_AGAIN_FROM_OFFLINING: {
+ OfflineState tmp_state = OFFLINE_AGAIN_FROM_OFFLINING;
+ if (!__atomic_compare_exchange_n(&f->offline_state, &tmp_state, OFFLINE_CANCEL,
+ false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
+ continue;
+ }
+ /* Canceled restart from offlining, must wait for offlining to complete however. */
+ _fallthrough_;
+ default: {
+ int r;
+
+ r = journal_file_set_offline_thread_join(f);
+ if (r < 0)
+ return r;
+
+ wait = false;
+ break;
+ }
+ }
+ }
+
+ if (mmap_cache_fd_got_sigbus(f->cache_fd))
+ return -EIO;
+
+ switch (f->header->state) {
+ case STATE_ONLINE:
+ return 0;
+
+ case STATE_OFFLINE:
+ f->header->state = STATE_ONLINE;
+ (void) fsync(f->fd);
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+JournalFile* journal_file_close(JournalFile *f) {
+ if (!f)
+ return NULL;
+
+ assert(f->newest_boot_id_prioq_idx == PRIOQ_IDX_NULL);
+
+ if (f->cache_fd)
+ mmap_cache_fd_free(f->cache_fd);
+
+ if (f->close_fd)
+ safe_close(f->fd);
+ free(f->path);
+
+ ordered_hashmap_free_free(f->chain_cache);
+
+#if HAVE_COMPRESSION
+ free(f->compress_buffer);
+#endif
+
+#if HAVE_GCRYPT
+ if (f->fss_file) {
+ size_t sz = PAGE_ALIGN(f->fss_file_size);
+ assert(sz < SIZE_MAX);
+ munmap(f->fss_file, sz);
+ } else
+ free(f->fsprg_state);
+
+ free(f->fsprg_seed);
+
+ if (f->hmac)
+ gcry_md_close(f->hmac);
+#endif
+
+ return mfree(f);
+}
+
+static bool keyed_hash_requested(void) {
+ static thread_local int cached = -1;
+ int r;
+
+ if (cached < 0) {
+ r = getenv_bool("SYSTEMD_JOURNAL_KEYED_HASH");
+ if (r < 0) {
+ if (r != -ENXIO)
+ log_debug_errno(r, "Failed to parse $SYSTEMD_JOURNAL_KEYED_HASH environment variable, ignoring: %m");
+ cached = true;
+ } else
+ cached = r;
+ }
+
+ return cached;
+}
+
+static bool compact_mode_requested(void) {
+ static thread_local int cached = -1;
+ int r;
+
+ if (cached < 0) {
+ r = getenv_bool("SYSTEMD_JOURNAL_COMPACT");
+ if (r < 0) {
+ if (r != -ENXIO)
+ log_debug_errno(r, "Failed to parse $SYSTEMD_JOURNAL_COMPACT environment variable, ignoring: %m");
+ cached = true;
+ } else
+ cached = r;
+ }
+
+ return cached;
+}
+
+#if HAVE_COMPRESSION
+static Compression getenv_compression(void) {
+ Compression c;
+ const char *e;
+ int r;
+
+ e = getenv("SYSTEMD_JOURNAL_COMPRESS");
+ if (!e)
+ return DEFAULT_COMPRESSION;
+
+ r = parse_boolean(e);
+ if (r >= 0)
+ return r ? DEFAULT_COMPRESSION : COMPRESSION_NONE;
+
+ c = compression_from_string(e);
+ if (c < 0) {
+ log_debug_errno(c, "Failed to parse SYSTEMD_JOURNAL_COMPRESS value, ignoring: %s", e);
+ return DEFAULT_COMPRESSION;
+ }
+
+ if (!compression_supported(c)) {
+ log_debug("Unsupported compression algorithm specified, ignoring: %s", e);
+ return DEFAULT_COMPRESSION;
+ }
+
+ return c;
+}
+#endif
+
+static Compression compression_requested(void) {
+#if HAVE_COMPRESSION
+ static thread_local Compression cached = _COMPRESSION_INVALID;
+
+ if (cached < 0)
+ cached = getenv_compression();
+
+ return cached;
+#else
+ return COMPRESSION_NONE;
+#endif
+}
+
+static int journal_file_init_header(
+ JournalFile *f,
+ JournalFileFlags file_flags,
+ JournalFile *template) {
+
+ bool seal = false;
+ ssize_t k;
+ int r;
+
+ assert(f);
+
+#if HAVE_GCRYPT
+ /* Try to load the FSPRG state, and if we can't, then just don't do sealing */
+ seal = FLAGS_SET(file_flags, JOURNAL_SEAL) && journal_file_fss_load(f) >= 0;
+#endif
+
+ Header h = {
+ .header_size = htole64(ALIGN64(sizeof(h))),
+ .incompatible_flags = htole32(
+ FLAGS_SET(file_flags, JOURNAL_COMPRESS) * COMPRESSION_TO_HEADER_INCOMPATIBLE_FLAG(compression_requested()) |
+ keyed_hash_requested() * HEADER_INCOMPATIBLE_KEYED_HASH |
+ compact_mode_requested() * HEADER_INCOMPATIBLE_COMPACT),
+ .compatible_flags = htole32(
+ (seal * (HEADER_COMPATIBLE_SEALED | HEADER_COMPATIBLE_SEALED_CONTINUOUS) ) |
+ HEADER_COMPATIBLE_TAIL_ENTRY_BOOT_ID),
+ };
+
+ assert_cc(sizeof(h.signature) == sizeof(HEADER_SIGNATURE));
+ memcpy(h.signature, HEADER_SIGNATURE, sizeof(HEADER_SIGNATURE));
+
+ r = sd_id128_randomize(&h.file_id);
+ if (r < 0)
+ return r;
+
+ r = sd_id128_get_machine(&h.machine_id);
+ if (r < 0 && !ERRNO_IS_MACHINE_ID_UNSET(r))
+ return r; /* If we have no valid machine ID (test environment?), let's simply leave the
+ * machine ID field all zeroes. */
+
+ if (template) {
+ h.seqnum_id = template->header->seqnum_id;
+ h.tail_entry_seqnum = template->header->tail_entry_seqnum;
+ } else
+ h.seqnum_id = h.file_id;
+
+ k = pwrite(f->fd, &h, sizeof(h), 0);
+ if (k < 0)
+ return -errno;
+ if (k != sizeof(h))
+ return -EIO;
+
+ return 0;
+}
+
+static int journal_file_refresh_header(JournalFile *f) {
+ int r;
+
+ assert(f);
+ assert(f->header);
+
+ /* We used to update the header's boot ID field here, but we don't do that anymore, as per
+ * HEADER_COMPATIBLE_TAIL_ENTRY_BOOT_ID */
+
+ r = journal_file_set_online(f);
+
+ /* Sync the online state to disk; likely just created a new file, also sync the directory this file
+ * is located in. */
+ (void) fsync_full(f->fd);
+
+ return r;
+}
+
+static bool warn_wrong_flags(const JournalFile *f, bool compatible) {
+ const uint32_t any = compatible ? HEADER_COMPATIBLE_ANY : HEADER_INCOMPATIBLE_ANY,
+ supported = compatible ? HEADER_COMPATIBLE_SUPPORTED : HEADER_INCOMPATIBLE_SUPPORTED;
+ const char *type = compatible ? "compatible" : "incompatible";
+ uint32_t flags;
+
+ assert(f);
+ assert(f->header);
+
+ flags = le32toh(compatible ? f->header->compatible_flags : f->header->incompatible_flags);
+
+ if (flags & ~supported) {
+ if (flags & ~any)
+ log_debug("Journal file %s has unknown %s flags 0x%"PRIx32,
+ f->path, type, flags & ~any);
+ flags = (flags & any) & ~supported;
+ if (flags) {
+ const char* strv[6];
+ size_t n = 0;
+ _cleanup_free_ char *t = NULL;
+
+ if (compatible) {
+ if (flags & HEADER_COMPATIBLE_SEALED)
+ strv[n++] = "sealed";
+ if (flags & HEADER_COMPATIBLE_SEALED_CONTINUOUS)
+ strv[n++] = "sealed-continuous";
+ } else {
+ if (flags & HEADER_INCOMPATIBLE_COMPRESSED_XZ)
+ strv[n++] = "xz-compressed";
+ if (flags & HEADER_INCOMPATIBLE_COMPRESSED_LZ4)
+ strv[n++] = "lz4-compressed";
+ if (flags & HEADER_INCOMPATIBLE_COMPRESSED_ZSTD)
+ strv[n++] = "zstd-compressed";
+ if (flags & HEADER_INCOMPATIBLE_KEYED_HASH)
+ strv[n++] = "keyed-hash";
+ if (flags & HEADER_INCOMPATIBLE_COMPACT)
+ strv[n++] = "compact";
+ }
+ strv[n] = NULL;
+ assert(n < ELEMENTSOF(strv));
+
+ t = strv_join((char**) strv, ", ");
+ log_debug("Journal file %s uses %s %s %s disabled at compilation time.",
+ f->path, type, n > 1 ? "flags" : "flag", strnull(t));
+ }
+ return true;
+ }
+
+ return false;
+}
+
+static bool offset_is_valid(uint64_t offset, uint64_t header_size, uint64_t tail_object_offset) {
+ if (offset == 0)
+ return true;
+ if (!VALID64(offset))
+ return false;
+ if (offset < header_size)
+ return false;
+ if (offset > tail_object_offset)
+ return false;
+ return true;
+}
+
+static bool hash_table_is_valid(uint64_t offset, uint64_t size, uint64_t header_size, uint64_t arena_size, uint64_t tail_object_offset) {
+ if ((offset == 0) != (size == 0))
+ return false;
+ if (offset == 0)
+ return true;
+ if (offset <= offsetof(Object, hash_table.items))
+ return false;
+ offset -= offsetof(Object, hash_table.items);
+ if (!offset_is_valid(offset, header_size, tail_object_offset))
+ return false;
+ assert(offset <= header_size + arena_size);
+ if (size > header_size + arena_size - offset)
+ return false;
+ return true;
+}
+
+static int journal_file_verify_header(JournalFile *f) {
+ uint64_t arena_size, header_size;
+
+ assert(f);
+ assert(f->header);
+
+ if (memcmp(f->header->signature, HEADER_SIGNATURE, 8))
+ return -EBADMSG;
+
+ /* In both read and write mode we refuse to open files with incompatible
+ * flags we don't know. */
+ if (warn_wrong_flags(f, false))
+ return -EPROTONOSUPPORT;
+
+ /* When open for writing we refuse to open files with compatible flags, too. */
+ if (journal_file_writable(f) && warn_wrong_flags(f, true))
+ return -EPROTONOSUPPORT;
+
+ if (f->header->state >= _STATE_MAX)
+ return -EBADMSG;
+
+ header_size = le64toh(READ_NOW(f->header->header_size));
+
+ /* The first addition was n_data, so check that we are at least this large */
+ if (header_size < HEADER_SIZE_MIN)
+ return -EBADMSG;
+
+ /* When open for writing we refuse to open files with a mismatch of the header size, i.e. writing to
+ * files implementing older or new header structures. */
+ if (journal_file_writable(f) && header_size != sizeof(Header))
+ return -EPROTONOSUPPORT;
+
+ /* Don't write to journal files without the new boot ID update behavior guarantee. */
+ if (journal_file_writable(f) && !JOURNAL_HEADER_TAIL_ENTRY_BOOT_ID(f->header))
+ return -EPROTONOSUPPORT;
+
+ if (JOURNAL_HEADER_SEALED(f->header) && !JOURNAL_HEADER_CONTAINS(f->header, n_entry_arrays))
+ return -EBADMSG;
+
+ arena_size = le64toh(READ_NOW(f->header->arena_size));
+
+ if (UINT64_MAX - header_size < arena_size || header_size + arena_size > (uint64_t) f->last_stat.st_size)
+ return -ENODATA;
+
+ uint64_t tail_object_offset = le64toh(f->header->tail_object_offset);
+ if (!offset_is_valid(tail_object_offset, header_size, UINT64_MAX))
+ return -ENODATA;
+ if (header_size + arena_size < tail_object_offset)
+ return -ENODATA;
+ if (header_size + arena_size - tail_object_offset < sizeof(ObjectHeader))
+ return -ENODATA;
+
+ if (!hash_table_is_valid(le64toh(f->header->data_hash_table_offset),
+ le64toh(f->header->data_hash_table_size),
+ header_size, arena_size, tail_object_offset))
+ return -ENODATA;
+
+ if (!hash_table_is_valid(le64toh(f->header->field_hash_table_offset),
+ le64toh(f->header->field_hash_table_size),
+ header_size, arena_size, tail_object_offset))
+ return -ENODATA;
+
+ uint64_t entry_array_offset = le64toh(f->header->entry_array_offset);
+ if (!offset_is_valid(entry_array_offset, header_size, tail_object_offset))
+ return -ENODATA;
+
+ if (JOURNAL_HEADER_CONTAINS(f->header, tail_entry_array_offset)) {
+ uint32_t offset = le32toh(f->header->tail_entry_array_offset);
+ uint32_t n = le32toh(f->header->tail_entry_array_n_entries);
+
+ if (!offset_is_valid(offset, header_size, tail_object_offset))
+ return -ENODATA;
+ if (entry_array_offset > offset)
+ return -ENODATA;
+ if (entry_array_offset == 0 && offset != 0)
+ return -ENODATA;
+ if ((offset == 0) != (n == 0))
+ return -ENODATA;
+ assert(offset <= header_size + arena_size);
+ if ((uint64_t) n * journal_file_entry_array_item_size(f) > header_size + arena_size - offset)
+ return -ENODATA;
+ }
+
+ if (JOURNAL_HEADER_CONTAINS(f->header, tail_entry_offset)) {
+ uint64_t offset = le64toh(f->header->tail_entry_offset);
+
+ if (!offset_is_valid(offset, header_size, tail_object_offset))
+ return -ENODATA;
+
+ if (offset > 0) {
+ /* When there is an entry object, then these fields must be filled. */
+ if (sd_id128_is_null(f->header->tail_entry_boot_id))
+ return -ENODATA;
+ if (!VALID_REALTIME(le64toh(f->header->head_entry_realtime)))
+ return -ENODATA;
+ if (!VALID_REALTIME(le64toh(f->header->tail_entry_realtime)))
+ return -ENODATA;
+ if (!VALID_MONOTONIC(le64toh(f->header->tail_entry_realtime)))
+ return -ENODATA;
+ } else {
+ /* Otherwise, the fields must be zero. */
+ if (JOURNAL_HEADER_TAIL_ENTRY_BOOT_ID(f->header) &&
+ !sd_id128_is_null(f->header->tail_entry_boot_id))
+ return -ENODATA;
+ if (f->header->head_entry_realtime != 0)
+ return -ENODATA;
+ if (f->header->tail_entry_realtime != 0)
+ return -ENODATA;
+ if (f->header->tail_entry_realtime != 0)
+ return -ENODATA;
+ }
+ }
+
+ /* Verify number of objects */
+ uint64_t n_objects = le64toh(f->header->n_objects);
+ if (n_objects > arena_size / sizeof(ObjectHeader))
+ return -ENODATA;
+
+ uint64_t n_entries = le64toh(f->header->n_entries);
+ if (n_entries > n_objects)
+ return -ENODATA;
+
+ if (JOURNAL_HEADER_CONTAINS(f->header, n_data) &&
+ le64toh(f->header->n_data) > n_objects)
+ return -ENODATA;
+
+ if (JOURNAL_HEADER_CONTAINS(f->header, n_fields) &&
+ le64toh(f->header->n_fields) > n_objects)
+ return -ENODATA;
+
+ if (JOURNAL_HEADER_CONTAINS(f->header, n_tags) &&
+ le64toh(f->header->n_tags) > n_objects)
+ return -ENODATA;
+
+ if (JOURNAL_HEADER_CONTAINS(f->header, n_entry_arrays) &&
+ le64toh(f->header->n_entry_arrays) > n_objects)
+ return -ENODATA;
+
+ if (JOURNAL_HEADER_CONTAINS(f->header, tail_entry_array_n_entries) &&
+ le32toh(f->header->tail_entry_array_n_entries) > n_entries)
+ return -ENODATA;
+
+ if (journal_file_writable(f)) {
+ sd_id128_t machine_id;
+ uint8_t state;
+ int r;
+
+ r = sd_id128_get_machine(&machine_id);
+ if (ERRNO_IS_NEG_MACHINE_ID_UNSET(r)) /* Gracefully handle the machine ID not being initialized yet */
+ machine_id = SD_ID128_NULL;
+ else if (r < 0)
+ return r;
+
+ if (!sd_id128_equal(machine_id, f->header->machine_id))
+ return log_debug_errno(SYNTHETIC_ERRNO(EHOSTDOWN),
+ "Trying to open journal file from different host for writing, refusing.");
+
+ state = f->header->state;
+
+ if (state == STATE_ARCHIVED)
+ return -ESHUTDOWN; /* Already archived */
+ if (state == STATE_ONLINE)
+ return log_debug_errno(SYNTHETIC_ERRNO(EBUSY),
+ "Journal file %s is already online. Assuming unclean closing.",
+ f->path);
+ if (state != STATE_OFFLINE)
+ return log_debug_errno(SYNTHETIC_ERRNO(EBUSY),
+ "Journal file %s has unknown state %i.",
+ f->path, state);
+
+ if (f->header->field_hash_table_size == 0 || f->header->data_hash_table_size == 0)
+ return -EBADMSG;
+ }
+
+ return 0;
+}
+
+int journal_file_fstat(JournalFile *f) {
+ int r;
+
+ assert(f);
+ assert(f->fd >= 0);
+
+ if (fstat(f->fd, &f->last_stat) < 0)
+ return -errno;
+
+ f->last_stat_usec = now(CLOCK_MONOTONIC);
+
+ /* Refuse dealing with files that aren't regular */
+ r = stat_verify_regular(&f->last_stat);
+ if (r < 0)
+ return r;
+
+ /* Refuse appending to files that are already deleted */
+ if (f->last_stat.st_nlink <= 0)
+ return -EIDRM;
+
+ return 0;
+}
+
+static int journal_file_allocate(JournalFile *f, uint64_t offset, uint64_t size) {
+ uint64_t old_size, new_size, old_header_size, old_arena_size;
+ int r;
+
+ assert(f);
+ assert(f->header);
+
+ /* We assume that this file is not sparse, and we know that for sure, since we always call
+ * posix_fallocate() ourselves */
+
+ if (size > PAGE_ALIGN_DOWN_U64(UINT64_MAX) - offset)
+ return -EINVAL;
+
+ if (mmap_cache_fd_got_sigbus(f->cache_fd))
+ return -EIO;
+
+ old_header_size = le64toh(READ_NOW(f->header->header_size));
+ old_arena_size = le64toh(READ_NOW(f->header->arena_size));
+ if (old_arena_size > PAGE_ALIGN_DOWN_U64(UINT64_MAX) - old_header_size)
+ return -EBADMSG;
+
+ old_size = old_header_size + old_arena_size;
+
+ new_size = MAX(PAGE_ALIGN_U64(offset + size), old_header_size);
+
+ if (new_size <= old_size) {
+
+ /* We already pre-allocated enough space, but before
+ * we write to it, let's check with fstat() if the
+ * file got deleted, in order make sure we don't throw
+ * away the data immediately. Don't check fstat() for
+ * all writes though, but only once ever 10s. */
+
+ if (f->last_stat_usec + LAST_STAT_REFRESH_USEC > now(CLOCK_MONOTONIC))
+ return 0;
+
+ return journal_file_fstat(f);
+ }
+
+ /* Allocate more space. */
+
+ if (f->metrics.max_size > 0 && new_size > f->metrics.max_size)
+ return -E2BIG;
+
+ /* Refuse to go over 4G in compact mode so offsets can be stored in 32-bit. */
+ if (JOURNAL_HEADER_COMPACT(f->header) && new_size > UINT32_MAX)
+ return -E2BIG;
+
+ if (new_size > f->metrics.min_size && f->metrics.keep_free > 0) {
+ struct statvfs svfs;
+
+ if (fstatvfs(f->fd, &svfs) >= 0) {
+ uint64_t available;
+
+ available = LESS_BY(u64_multiply_safe(svfs.f_bfree, svfs.f_bsize), f->metrics.keep_free);
+
+ if (new_size - old_size > available)
+ return -E2BIG;
+ }
+ }
+
+ /* Increase by larger blocks at once */
+ new_size = ROUND_UP(new_size, FILE_SIZE_INCREASE);
+ if (f->metrics.max_size > 0 && new_size > f->metrics.max_size)
+ new_size = f->metrics.max_size;
+
+ /* Note that the glibc fallocate() fallback is very
+ inefficient, hence we try to minimize the allocation area
+ as we can. */
+ r = posix_fallocate_loop(f->fd, old_size, new_size - old_size);
+ if (r < 0)
+ return r;
+
+ f->header->arena_size = htole64(new_size - old_header_size);
+
+ return journal_file_fstat(f);
+}
+
+static int journal_file_move_to(
+ JournalFile *f,
+ ObjectType type,
+ bool keep_always,
+ uint64_t offset,
+ uint64_t size,
+ void **ret) {
+
+ int r;
+
+ assert(f);
+ assert(ret);
+
+ /* This function may clear, overwrite, or alter previously cached entries with the same type. After
+ * this function has been called, all previously read objects with the same type may be invalidated,
+ * hence must be re-read before use. */
+
+ if (size <= 0)
+ return -EINVAL;
+
+ if (size > UINT64_MAX - offset)
+ return -EBADMSG;
+
+ /* Avoid SIGBUS on invalid accesses */
+ if (offset + size > (uint64_t) f->last_stat.st_size) {
+ /* Hmm, out of range? Let's refresh the fstat() data
+ * first, before we trust that check. */
+
+ r = journal_file_fstat(f);
+ if (r < 0)
+ return r;
+
+ if (offset + size > (uint64_t) f->last_stat.st_size)
+ return -EADDRNOTAVAIL;
+ }
+
+ return mmap_cache_fd_get(f->cache_fd, type_to_category(type), keep_always, offset, size, &f->last_stat, ret);
+}
+
+static uint64_t minimum_header_size(JournalFile *f, Object *o) {
+
+ static const uint64_t table[] = {
+ [OBJECT_DATA] = sizeof(DataObject),
+ [OBJECT_FIELD] = sizeof(FieldObject),
+ [OBJECT_ENTRY] = sizeof(EntryObject),
+ [OBJECT_DATA_HASH_TABLE] = sizeof(HashTableObject),
+ [OBJECT_FIELD_HASH_TABLE] = sizeof(HashTableObject),
+ [OBJECT_ENTRY_ARRAY] = sizeof(EntryArrayObject),
+ [OBJECT_TAG] = sizeof(TagObject),
+ };
+
+ assert(f);
+ assert(o);
+
+ if (o->object.type == OBJECT_DATA)
+ return journal_file_data_payload_offset(f);
+
+ if (o->object.type >= ELEMENTSOF(table) || table[o->object.type] <= 0)
+ return sizeof(ObjectHeader);
+
+ return table[o->object.type];
+}
+
+static int check_object_header(JournalFile *f, Object *o, ObjectType type, uint64_t offset) {
+ uint64_t s;
+
+ assert(f);
+ assert(o);
+
+ s = le64toh(READ_NOW(o->object.size));
+ if (s == 0)
+ return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
+ "Attempt to move to uninitialized object: %" PRIu64,
+ offset);
+
+ if (s < sizeof(ObjectHeader))
+ return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
+ "Attempt to move to overly short object with size %"PRIu64": %" PRIu64,
+ s, offset);
+
+ if (o->object.type <= OBJECT_UNUSED || o->object.type >= _OBJECT_TYPE_MAX)
+ return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
+ "Attempt to move to object with invalid type (%u): %" PRIu64,
+ o->object.type, offset);
+
+ if (type > OBJECT_UNUSED && o->object.type != type)
+ return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
+ "Found %s object while expecting %s object: %" PRIu64,
+ journal_object_type_to_string(o->object.type),
+ journal_object_type_to_string(type),
+ offset);
+
+ if (s < minimum_header_size(f, o))
+ return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
+ "Size of %s object (%"PRIu64") is smaller than the minimum object size (%"PRIu64"): %" PRIu64,
+ journal_object_type_to_string(o->object.type),
+ s,
+ minimum_header_size(f, o),
+ offset);
+
+ return 0;
+}
+
+/* Lightweight object checks. We want this to be fast, so that we won't
+ * slowdown every journal_file_move_to_object() call too much. */
+static int check_object(JournalFile *f, Object *o, uint64_t offset) {
+ assert(f);
+ assert(o);
+
+ switch (o->object.type) {
+
+ case OBJECT_DATA:
+ if ((le64toh(o->data.entry_offset) == 0) ^ (le64toh(o->data.n_entries) == 0))
+ return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
+ "Bad data n_entries: %" PRIu64 ": %" PRIu64,
+ le64toh(o->data.n_entries),
+ offset);
+
+ if (le64toh(o->object.size) <= journal_file_data_payload_offset(f))
+ return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
+ "Bad data size (<= %zu): %" PRIu64 ": %" PRIu64,
+ journal_file_data_payload_offset(f),
+ le64toh(o->object.size),
+ offset);
+
+ if (!VALID64(le64toh(o->data.next_hash_offset)) ||
+ !VALID64(le64toh(o->data.next_field_offset)) ||
+ !VALID64(le64toh(o->data.entry_offset)) ||
+ !VALID64(le64toh(o->data.entry_array_offset)))
+ return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
+ "Invalid offset, next_hash_offset=" OFSfmt ", next_field_offset=" OFSfmt ", entry_offset=" OFSfmt ", entry_array_offset=" OFSfmt ": %" PRIu64,
+ le64toh(o->data.next_hash_offset),
+ le64toh(o->data.next_field_offset),
+ le64toh(o->data.entry_offset),
+ le64toh(o->data.entry_array_offset),
+ offset);
+
+ break;
+
+ case OBJECT_FIELD:
+ if (le64toh(o->object.size) <= offsetof(Object, field.payload))
+ return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
+ "Bad field size (<= %zu): %" PRIu64 ": %" PRIu64,
+ offsetof(Object, field.payload),
+ le64toh(o->object.size),
+ offset);
+
+ if (!VALID64(le64toh(o->field.next_hash_offset)) ||
+ !VALID64(le64toh(o->field.head_data_offset)))
+ return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
+ "Invalid offset, next_hash_offset=" OFSfmt ", head_data_offset=" OFSfmt ": %" PRIu64,
+ le64toh(o->field.next_hash_offset),
+ le64toh(o->field.head_data_offset),
+ offset);
+ break;
+
+ case OBJECT_ENTRY: {
+ uint64_t sz;
+
+ sz = le64toh(READ_NOW(o->object.size));
+ if (sz < offsetof(Object, entry.items) ||
+ (sz - offsetof(Object, entry.items)) % journal_file_entry_item_size(f) != 0)
+ return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
+ "Bad entry size (<= %zu): %" PRIu64 ": %" PRIu64,
+ offsetof(Object, entry.items),
+ sz,
+ offset);
+
+ if ((sz - offsetof(Object, entry.items)) / journal_file_entry_item_size(f) <= 0)
+ return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
+ "Invalid number items in entry: %" PRIu64 ": %" PRIu64,
+ (sz - offsetof(Object, entry.items)) / journal_file_entry_item_size(f),
+ offset);
+
+ if (le64toh(o->entry.seqnum) <= 0)
+ return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
+ "Invalid entry seqnum: %" PRIx64 ": %" PRIu64,
+ le64toh(o->entry.seqnum),
+ offset);
+
+ if (!VALID_REALTIME(le64toh(o->entry.realtime)))
+ return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
+ "Invalid entry realtime timestamp: %" PRIu64 ": %" PRIu64,
+ le64toh(o->entry.realtime),
+ offset);
+
+ if (!VALID_MONOTONIC(le64toh(o->entry.monotonic)))
+ return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
+ "Invalid entry monotonic timestamp: %" PRIu64 ": %" PRIu64,
+ le64toh(o->entry.monotonic),
+ offset);
+
+ if (sd_id128_is_null(o->entry.boot_id))
+ return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
+ "Invalid object entry with an empty boot ID: %" PRIu64,
+ offset);
+
+ break;
+ }
+
+ case OBJECT_DATA_HASH_TABLE:
+ case OBJECT_FIELD_HASH_TABLE: {
+ uint64_t sz;
+
+ sz = le64toh(READ_NOW(o->object.size));
+ if (sz < offsetof(Object, hash_table.items) ||
+ (sz - offsetof(Object, hash_table.items)) % sizeof(HashItem) != 0 ||
+ (sz - offsetof(Object, hash_table.items)) / sizeof(HashItem) <= 0)
+ return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
+ "Invalid %s hash table size: %" PRIu64 ": %" PRIu64,
+ journal_object_type_to_string(o->object.type),
+ sz,
+ offset);
+
+ break;
+ }
+
+ case OBJECT_ENTRY_ARRAY: {
+ uint64_t sz, next;
+
+ sz = le64toh(READ_NOW(o->object.size));
+ if (sz < offsetof(Object, entry_array.items) ||
+ (sz - offsetof(Object, entry_array.items)) % journal_file_entry_array_item_size(f) != 0 ||
+ (sz - offsetof(Object, entry_array.items)) / journal_file_entry_array_item_size(f) <= 0)
+ return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
+ "Invalid object entry array size: %" PRIu64 ": %" PRIu64,
+ sz,
+ offset);
+ /* Here, we request that the offset of each entry array object is in strictly increasing order. */
+ next = le64toh(o->entry_array.next_entry_array_offset);
+ if (!VALID64(next) || (next > 0 && next <= offset))
+ return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
+ "Invalid object entry array next_entry_array_offset: %" PRIu64 ": %" PRIu64,
+ next,
+ offset);
+
+ break;
+ }
+
+ case OBJECT_TAG:
+ if (le64toh(o->object.size) != sizeof(TagObject))
+ return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
+ "Invalid object tag size: %" PRIu64 ": %" PRIu64,
+ le64toh(o->object.size),
+ offset);
+
+ if (!VALID_EPOCH(le64toh(o->tag.epoch)))
+ return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
+ "Invalid object tag epoch: %" PRIu64 ": %" PRIu64,
+ le64toh(o->tag.epoch), offset);
+
+ break;
+ }
+
+ return 0;
+}
+
+int journal_file_move_to_object(JournalFile *f, ObjectType type, uint64_t offset, Object **ret) {
+ int r;
+ Object *o;
+
+ assert(f);
+
+ /* Even if this function fails, it may clear, overwrite, or alter previously cached entries with the
+ * same type. After this function has been called, all previously read objects with the same type may
+ * be invalidated, hence must be re-read before use. */
+
+ /* Objects may only be located at multiple of 64 bit */
+ if (!VALID64(offset))
+ return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
+ "Attempt to move to %s object at non-64-bit boundary: %" PRIu64,
+ journal_object_type_to_string(type),
+ offset);
+
+ /* Object may not be located in the file header */
+ if (offset < le64toh(f->header->header_size))
+ return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
+ "Attempt to move to %s object located in file header: %" PRIu64,
+ journal_object_type_to_string(type),
+ offset);
+
+ r = journal_file_move_to(f, type, false, offset, sizeof(ObjectHeader), (void**) &o);
+ if (r < 0)
+ return r;
+
+ r = check_object_header(f, o, type, offset);
+ if (r < 0)
+ return r;
+
+ r = journal_file_move_to(f, type, false, offset, le64toh(READ_NOW(o->object.size)), (void**) &o);
+ if (r < 0)
+ return r;
+
+ r = check_object_header(f, o, type, offset);
+ if (r < 0)
+ return r;
+
+ r = check_object(f, o, offset);
+ if (r < 0)
+ return r;
+
+ if (ret)
+ *ret = o;
+
+ return 0;
+}
+
+int journal_file_pin_object(JournalFile *f, Object *o) {
+ assert(f);
+ assert(o);
+
+ /* This attaches the mmap window that provides the object to the 'pinning' category. So, reading
+ * another object with the same type will not invalidate the object, until this function is called
+ * for another object. */
+ return mmap_cache_fd_pin(f->cache_fd, type_to_category(o->object.type), o, le64toh(o->object.size));
+}
+
+int journal_file_read_object_header(JournalFile *f, ObjectType type, uint64_t offset, Object *ret) {
+ ssize_t n;
+ Object o;
+ int r;
+
+ assert(f);
+
+ /* Objects may only be located at multiple of 64 bit */
+ if (!VALID64(offset))
+ return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
+ "Attempt to read %s object at non-64-bit boundary: %" PRIu64,
+ journal_object_type_to_string(type), offset);
+
+ /* Object may not be located in the file header */
+ if (offset < le64toh(f->header->header_size))
+ return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
+ "Attempt to read %s object located in file header: %" PRIu64,
+ journal_object_type_to_string(type), offset);
+
+ /* This will likely read too much data but it avoids having to call pread() twice. */
+ n = pread(f->fd, &o, sizeof(o), offset);
+ if (n < 0)
+ return log_debug_errno(errno, "Failed to read journal %s object at offset: %" PRIu64,
+ journal_object_type_to_string(type), offset);
+
+ if ((size_t) n < sizeof(o.object))
+ return log_debug_errno(SYNTHETIC_ERRNO(EIO),
+ "Failed to read short %s object at offset: %" PRIu64,
+ journal_object_type_to_string(type), offset);
+
+ r = check_object_header(f, &o, type, offset);
+ if (r < 0)
+ return r;
+
+ if ((size_t) n < minimum_header_size(f, &o))
+ return log_debug_errno(SYNTHETIC_ERRNO(EIO),
+ "Short read while reading %s object: %" PRIu64,
+ journal_object_type_to_string(type), offset);
+
+ r = check_object(f, &o, offset);
+ if (r < 0)
+ return r;
+
+ if (ret)
+ *ret = o;
+
+ return 0;
+}
+
+static uint64_t inc_seqnum(uint64_t seqnum) {
+ if (seqnum < UINT64_MAX-1)
+ return seqnum + 1;
+
+ return 1; /* skip over UINT64_MAX and 0 when we run out of seqnums and start again */
+}
+
+static uint64_t journal_file_entry_seqnum(
+ JournalFile *f,
+ uint64_t *seqnum) {
+
+ uint64_t next_seqnum;
+
+ assert(f);
+ assert(f->header);
+
+ /* Picks a new sequence number for the entry we are about to add and returns it. */
+
+ next_seqnum = inc_seqnum(le64toh(f->header->tail_entry_seqnum));
+
+ /* If an external seqnum counter was passed, we update both the local and the external one, and set
+ * it to the maximum of both */
+ if (seqnum)
+ *seqnum = next_seqnum = MAX(inc_seqnum(*seqnum), next_seqnum);
+
+ f->header->tail_entry_seqnum = htole64(next_seqnum);
+
+ if (f->header->head_entry_seqnum == 0)
+ f->header->head_entry_seqnum = htole64(next_seqnum);
+
+ return next_seqnum;
+}
+
+int journal_file_append_object(
+ JournalFile *f,
+ ObjectType type,
+ uint64_t size,
+ Object **ret_object,
+ uint64_t *ret_offset) {
+
+ int r;
+ uint64_t p;
+ Object *o;
+
+ assert(f);
+ assert(f->header);
+ assert(type > OBJECT_UNUSED && type < _OBJECT_TYPE_MAX);
+ assert(size >= sizeof(ObjectHeader));
+
+ r = journal_file_set_online(f);
+ if (r < 0)
+ return r;
+
+ r = journal_file_tail_end_by_mmap(f, &p);
+ if (r < 0)
+ return r;
+
+ r = journal_file_allocate(f, p, size);
+ if (r < 0)
+ return r;
+
+ r = journal_file_move_to(f, type, false, p, size, (void**) &o);
+ if (r < 0)
+ return r;
+
+ o->object = (ObjectHeader) {
+ .type = type,
+ .size = htole64(size),
+ };
+
+ f->header->tail_object_offset = htole64(p);
+ f->header->n_objects = htole64(le64toh(f->header->n_objects) + 1);
+
+ if (ret_object)
+ *ret_object = o;
+
+ if (ret_offset)
+ *ret_offset = p;
+
+ return 0;
+}
+
+static int journal_file_setup_data_hash_table(JournalFile *f) {
+ uint64_t s, p;
+ Object *o;
+ int r;
+
+ assert(f);
+ assert(f->header);
+
+ /* We estimate that we need 1 hash table entry per 768 bytes
+ of journal file and we want to make sure we never get
+ beyond 75% fill level. Calculate the hash table size for
+ the maximum file size based on these metrics. */
+
+ s = (f->metrics.max_size * 4 / 768 / 3) * sizeof(HashItem);
+ if (s < DEFAULT_DATA_HASH_TABLE_SIZE)
+ s = DEFAULT_DATA_HASH_TABLE_SIZE;
+
+ log_debug("Reserving %"PRIu64" entries in data hash table.", s / sizeof(HashItem));
+
+ r = journal_file_append_object(f,
+ OBJECT_DATA_HASH_TABLE,
+ offsetof(Object, hash_table.items) + s,
+ &o, &p);
+ if (r < 0)
+ return r;
+
+ memzero(o->hash_table.items, s);
+
+ f->header->data_hash_table_offset = htole64(p + offsetof(Object, hash_table.items));
+ f->header->data_hash_table_size = htole64(s);
+
+ return 0;
+}
+
+static int journal_file_setup_field_hash_table(JournalFile *f) {
+ uint64_t s, p;
+ Object *o;
+ int r;
+
+ assert(f);
+ assert(f->header);
+
+ /* We use a fixed size hash table for the fields as this
+ * number should grow very slowly only */
+
+ s = DEFAULT_FIELD_HASH_TABLE_SIZE;
+ log_debug("Reserving %"PRIu64" entries in field hash table.", s / sizeof(HashItem));
+
+ r = journal_file_append_object(f,
+ OBJECT_FIELD_HASH_TABLE,
+ offsetof(Object, hash_table.items) + s,
+ &o, &p);
+ if (r < 0)
+ return r;
+
+ memzero(o->hash_table.items, s);
+
+ f->header->field_hash_table_offset = htole64(p + offsetof(Object, hash_table.items));
+ f->header->field_hash_table_size = htole64(s);
+
+ return 0;
+}
+
+int journal_file_map_data_hash_table(JournalFile *f) {
+ uint64_t s, p;
+ void *t;
+ int r;
+
+ assert(f);
+ assert(f->header);
+
+ if (f->data_hash_table)
+ return 0;
+
+ p = le64toh(f->header->data_hash_table_offset);
+ s = le64toh(f->header->data_hash_table_size);
+
+ r = journal_file_move_to(f,
+ OBJECT_DATA_HASH_TABLE,
+ true,
+ p, s,
+ &t);
+ if (r < 0)
+ return r;
+
+ f->data_hash_table = t;
+ return 0;
+}
+
+int journal_file_map_field_hash_table(JournalFile *f) {
+ uint64_t s, p;
+ void *t;
+ int r;
+
+ assert(f);
+ assert(f->header);
+
+ if (f->field_hash_table)
+ return 0;
+
+ p = le64toh(f->header->field_hash_table_offset);
+ s = le64toh(f->header->field_hash_table_size);
+
+ r = journal_file_move_to(f,
+ OBJECT_FIELD_HASH_TABLE,
+ true,
+ p, s,
+ &t);
+ if (r < 0)
+ return r;
+
+ f->field_hash_table = t;
+ return 0;
+}
+
+static int journal_file_link_field(
+ JournalFile *f,
+ Object *o,
+ uint64_t offset,
+ uint64_t hash) {
+
+ uint64_t p, h, m;
+ int r;
+
+ assert(f);
+ assert(f->header);
+ assert(f->field_hash_table);
+ assert(o);
+ assert(offset > 0);
+
+ if (o->object.type != OBJECT_FIELD)
+ return -EINVAL;
+
+ m = le64toh(READ_NOW(f->header->field_hash_table_size)) / sizeof(HashItem);
+ if (m <= 0)
+ return -EBADMSG;
+
+ /* This might alter the window we are looking at */
+ o->field.next_hash_offset = o->field.head_data_offset = 0;
+
+ h = hash % m;
+ p = le64toh(f->field_hash_table[h].tail_hash_offset);
+ if (p == 0)
+ f->field_hash_table[h].head_hash_offset = htole64(offset);
+ else {
+ r = journal_file_move_to_object(f, OBJECT_FIELD, p, &o);
+ if (r < 0)
+ return r;
+
+ o->field.next_hash_offset = htole64(offset);
+ }
+
+ f->field_hash_table[h].tail_hash_offset = htole64(offset);
+
+ if (JOURNAL_HEADER_CONTAINS(f->header, n_fields))
+ f->header->n_fields = htole64(le64toh(f->header->n_fields) + 1);
+
+ return 0;
+}
+
+static int journal_file_link_data(
+ JournalFile *f,
+ Object *o,
+ uint64_t offset,
+ uint64_t hash) {
+
+ uint64_t p, h, m;
+ int r;
+
+ assert(f);
+ assert(f->header);
+ assert(f->data_hash_table);
+ assert(o);
+ assert(offset > 0);
+
+ if (o->object.type != OBJECT_DATA)
+ return -EINVAL;
+
+ m = le64toh(READ_NOW(f->header->data_hash_table_size)) / sizeof(HashItem);
+ if (m <= 0)
+ return -EBADMSG;
+
+ /* This might alter the window we are looking at */
+ o->data.next_hash_offset = o->data.next_field_offset = 0;
+ o->data.entry_offset = o->data.entry_array_offset = 0;
+ o->data.n_entries = 0;
+
+ h = hash % m;
+ p = le64toh(f->data_hash_table[h].tail_hash_offset);
+ if (p == 0)
+ /* Only entry in the hash table is easy */
+ f->data_hash_table[h].head_hash_offset = htole64(offset);
+ else {
+ /* Move back to the previous data object, to patch in
+ * pointer */
+
+ r = journal_file_move_to_object(f, OBJECT_DATA, p, &o);
+ if (r < 0)
+ return r;
+
+ o->data.next_hash_offset = htole64(offset);
+ }
+
+ f->data_hash_table[h].tail_hash_offset = htole64(offset);
+
+ if (JOURNAL_HEADER_CONTAINS(f->header, n_data))
+ f->header->n_data = htole64(le64toh(f->header->n_data) + 1);
+
+ return 0;
+}
+
+static int get_next_hash_offset(
+ JournalFile *f,
+ uint64_t *p,
+ le64_t *next_hash_offset,
+ uint64_t *depth,
+ le64_t *header_max_depth) {
+
+ uint64_t nextp;
+
+ assert(f);
+ assert(p);
+ assert(next_hash_offset);
+ assert(depth);
+
+ nextp = le64toh(READ_NOW(*next_hash_offset));
+ if (nextp > 0) {
+ if (nextp <= *p) /* Refuse going in loops */
+ return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
+ "Detected hash item loop in %s, refusing.", f->path);
+
+ (*depth)++;
+
+ /* If the depth of this hash chain is larger than all others we have seen so far, record it */
+ if (header_max_depth && journal_file_writable(f))
+ *header_max_depth = htole64(MAX(*depth, le64toh(*header_max_depth)));
+ }
+
+ *p = nextp;
+ return 0;
+}
+
+int journal_file_find_field_object_with_hash(
+ JournalFile *f,
+ const void *field,
+ uint64_t size,
+ uint64_t hash,
+ Object **ret_object,
+ uint64_t *ret_offset) {
+
+ uint64_t p, osize, h, m, depth = 0;
+ int r;
+
+ assert(f);
+ assert(f->header);
+ assert(field);
+ assert(size > 0);
+
+ /* If the field hash table is empty, we can't find anything */
+ if (le64toh(f->header->field_hash_table_size) <= 0)
+ return 0;
+
+ /* Map the field hash table, if it isn't mapped yet. */
+ r = journal_file_map_field_hash_table(f);
+ if (r < 0)
+ return r;
+
+ osize = offsetof(Object, field.payload) + size;
+
+ m = le64toh(READ_NOW(f->header->field_hash_table_size)) / sizeof(HashItem);
+ if (m <= 0)
+ return -EBADMSG;
+
+ h = hash % m;
+ p = le64toh(f->field_hash_table[h].head_hash_offset);
+ while (p > 0) {
+ Object *o;
+
+ r = journal_file_move_to_object(f, OBJECT_FIELD, p, &o);
+ if (r < 0)
+ return r;
+
+ if (le64toh(o->field.hash) == hash &&
+ le64toh(o->object.size) == osize &&
+ memcmp(o->field.payload, field, size) == 0) {
+
+ if (ret_object)
+ *ret_object = o;
+ if (ret_offset)
+ *ret_offset = p;
+
+ return 1;
+ }
+
+ r = get_next_hash_offset(
+ f,
+ &p,
+ &o->field.next_hash_offset,
+ &depth,
+ JOURNAL_HEADER_CONTAINS(f->header, field_hash_chain_depth) ? &f->header->field_hash_chain_depth : NULL);
+ if (r < 0)
+ return r;
+ }
+
+ return 0;
+}
+
+uint64_t journal_file_hash_data(
+ JournalFile *f,
+ const void *data,
+ size_t sz) {
+
+ assert(f);
+ assert(f->header);
+ assert(data || sz == 0);
+
+ /* We try to unify our codebase on siphash, hence new-styled journal files utilizing the keyed hash
+ * function use siphash. Old journal files use the Jenkins hash. */
+
+ if (JOURNAL_HEADER_KEYED_HASH(f->header))
+ return siphash24(data, sz, f->header->file_id.bytes);
+
+ return jenkins_hash64(data, sz);
+}
+
+int journal_file_find_field_object(
+ JournalFile *f,
+ const void *field,
+ uint64_t size,
+ Object **ret_object,
+ uint64_t *ret_offset) {
+
+ assert(f);
+ assert(field);
+ assert(size > 0);
+
+ return journal_file_find_field_object_with_hash(
+ f,
+ field, size,
+ journal_file_hash_data(f, field, size),
+ ret_object, ret_offset);
+}
+
+int journal_file_find_data_object_with_hash(
+ JournalFile *f,
+ const void *data,
+ uint64_t size,
+ uint64_t hash,
+ Object **ret_object,
+ uint64_t *ret_offset) {
+
+ uint64_t p, h, m, depth = 0;
+ int r;
+
+ assert(f);
+ assert(f->header);
+ assert(data || size == 0);
+
+ /* If there's no data hash table, then there's no entry. */
+ if (le64toh(f->header->data_hash_table_size) <= 0)
+ return 0;
+
+ /* Map the data hash table, if it isn't mapped yet. */
+ r = journal_file_map_data_hash_table(f);
+ if (r < 0)
+ return r;
+
+ m = le64toh(READ_NOW(f->header->data_hash_table_size)) / sizeof(HashItem);
+ if (m <= 0)
+ return -EBADMSG;
+
+ h = hash % m;
+ p = le64toh(f->data_hash_table[h].head_hash_offset);
+
+ while (p > 0) {
+ Object *o;
+ void *d;
+ size_t rsize;
+
+ r = journal_file_move_to_object(f, OBJECT_DATA, p, &o);
+ if (r < 0)
+ return r;
+
+ if (le64toh(o->data.hash) != hash)
+ goto next;
+
+ r = journal_file_data_payload(f, o, p, NULL, 0, 0, &d, &rsize);
+ if (r < 0)
+ return r;
+ assert(r > 0); /* journal_file_data_payload() always returns > 0 if no field is provided. */
+
+ if (memcmp_nn(data, size, d, rsize) == 0) {
+ if (ret_object)
+ *ret_object = o;
+
+ if (ret_offset)
+ *ret_offset = p;
+
+ return 1;
+ }
+
+ next:
+ r = get_next_hash_offset(
+ f,
+ &p,
+ &o->data.next_hash_offset,
+ &depth,
+ JOURNAL_HEADER_CONTAINS(f->header, data_hash_chain_depth) ? &f->header->data_hash_chain_depth : NULL);
+ if (r < 0)
+ return r;
+ }
+
+ return 0;
+}
+
+int journal_file_find_data_object(
+ JournalFile *f,
+ const void *data,
+ uint64_t size,
+ Object **ret_object,
+ uint64_t *ret_offset) {
+
+ assert(f);
+ assert(data || size == 0);
+
+ return journal_file_find_data_object_with_hash(
+ f,
+ data, size,
+ journal_file_hash_data(f, data, size),
+ ret_object, ret_offset);
+}
+
+bool journal_field_valid(const char *p, size_t l, bool allow_protected) {
+ /* We kinda enforce POSIX syntax recommendations for
+ environment variables here, but make a couple of additional
+ requirements.
+
+ http://pubs.opengroup.org/onlinepubs/000095399/basedefs/xbd_chap08.html */
+
+ assert(p);
+
+ if (l == SIZE_MAX)
+ l = strlen(p);
+
+ /* No empty field names */
+ if (l <= 0)
+ return false;
+
+ /* Don't allow names longer than 64 chars */
+ if (l > 64)
+ return false;
+
+ /* Variables starting with an underscore are protected */
+ if (!allow_protected && p[0] == '_')
+ return false;
+
+ /* Don't allow digits as first character */
+ if (ascii_isdigit(p[0]))
+ return false;
+
+ /* Only allow A-Z0-9 and '_' */
+ for (const char *a = p; a < p + l; a++)
+ if ((*a < 'A' || *a > 'Z') &&
+ !ascii_isdigit(*a) &&
+ *a != '_')
+ return false;
+
+ return true;
+}
+
+static int journal_file_append_field(
+ JournalFile *f,
+ const void *field,
+ uint64_t size,
+ Object **ret_object,
+ uint64_t *ret_offset) {
+
+ uint64_t hash, p;
+ uint64_t osize;
+ Object *o;
+ int r;
+
+ assert(f);
+ assert(field);
+ assert(size > 0);
+
+ if (!journal_field_valid(field, size, true))
+ return -EBADMSG;
+
+ hash = journal_file_hash_data(f, field, size);
+
+ r = journal_file_find_field_object_with_hash(f, field, size, hash, ret_object, ret_offset);
+ if (r < 0)
+ return r;
+ if (r > 0)
+ return 0;
+
+ osize = offsetof(Object, field.payload) + size;
+ r = journal_file_append_object(f, OBJECT_FIELD, osize, &o, &p);
+ if (r < 0)
+ return r;
+
+ o->field.hash = htole64(hash);
+ memcpy(o->field.payload, field, size);
+
+ r = journal_file_link_field(f, o, p, hash);
+ if (r < 0)
+ return r;
+
+ /* The linking might have altered the window, so let's only pass the offset to hmac which will
+ * move to the object again if needed. */
+
+#if HAVE_GCRYPT
+ r = journal_file_hmac_put_object(f, OBJECT_FIELD, NULL, p);
+ if (r < 0)
+ return r;
+#endif
+
+ if (ret_object) {
+ r = journal_file_move_to_object(f, OBJECT_FIELD, p, ret_object);
+ if (r < 0)
+ return r;
+ }
+
+ if (ret_offset)
+ *ret_offset = p;
+
+ return 0;
+}
+
+static int maybe_compress_payload(JournalFile *f, uint8_t *dst, const uint8_t *src, uint64_t size, size_t *rsize) {
+ assert(f);
+ assert(f->header);
+
+#if HAVE_COMPRESSION
+ Compression c;
+ int r;
+
+ c = JOURNAL_FILE_COMPRESSION(f);
+ if (c == COMPRESSION_NONE || size < f->compress_threshold_bytes)
+ return 0;
+
+ r = compress_blob(c, src, size, dst, size - 1, rsize);
+ if (r < 0)
+ return log_debug_errno(r, "Failed to compress data object using %s, ignoring: %m", compression_to_string(c));
+
+ log_debug("Compressed data object %"PRIu64" -> %zu using %s", size, *rsize, compression_to_string(c));
+
+ return 1; /* compressed */
+#else
+ return 0;
+#endif
+}
+
+static int journal_file_append_data(
+ JournalFile *f,
+ const void *data,
+ uint64_t size,
+ Object **ret_object,
+ uint64_t *ret_offset) {
+
+ uint64_t hash, p, osize;
+ Object *o, *fo;
+ size_t rsize = 0;
+ const void *eq;
+ int r;
+
+ assert(f);
+
+ if (!data || size == 0)
+ return -EINVAL;
+
+ hash = journal_file_hash_data(f, data, size);
+
+ r = journal_file_find_data_object_with_hash(f, data, size, hash, ret_object, ret_offset);
+ if (r < 0)
+ return r;
+ if (r > 0)
+ return 0;
+
+ eq = memchr(data, '=', size);
+ if (!eq)
+ return -EINVAL;
+
+ osize = journal_file_data_payload_offset(f) + size;
+ r = journal_file_append_object(f, OBJECT_DATA, osize, &o, &p);
+ if (r < 0)
+ return r;
+
+ o->data.hash = htole64(hash);
+
+ r = maybe_compress_payload(f, journal_file_data_payload_field(f, o), data, size, &rsize);
+ if (r <= 0)
+ /* We don't really care failures, let's continue without compression */
+ memcpy_safe(journal_file_data_payload_field(f, o), data, size);
+ else {
+ Compression c = JOURNAL_FILE_COMPRESSION(f);
+
+ assert(c >= 0 && c < _COMPRESSION_MAX && c != COMPRESSION_NONE);
+
+ o->object.size = htole64(journal_file_data_payload_offset(f) + rsize);
+ o->object.flags |= COMPRESSION_TO_OBJECT_FLAG(c);
+ }
+
+ r = journal_file_link_data(f, o, p, hash);
+ if (r < 0)
+ return r;
+
+ /* The linking might have altered the window, so let's refresh our pointer. */
+ r = journal_file_move_to_object(f, OBJECT_DATA, p, &o);
+ if (r < 0)
+ return r;
+
+#if HAVE_GCRYPT
+ r = journal_file_hmac_put_object(f, OBJECT_DATA, o, p);
+ if (r < 0)
+ return r;
+#endif
+
+ /* Create field object ... */
+ r = journal_file_append_field(f, data, (uint8_t*) eq - (uint8_t*) data, &fo, NULL);
+ if (r < 0)
+ return r;
+
+ /* ... and link it in. */
+ o->data.next_field_offset = fo->field.head_data_offset;
+ fo->field.head_data_offset = le64toh(p);
+
+ if (ret_object)
+ *ret_object = o;
+
+ if (ret_offset)
+ *ret_offset = p;
+
+ return 0;
+}
+
+static int maybe_decompress_payload(
+ JournalFile *f,
+ uint8_t *payload,
+ uint64_t size,
+ Compression compression,
+ const char *field,
+ size_t field_length,
+ size_t data_threshold,
+ void **ret_data,
+ size_t *ret_size) {
+
+ assert(f);
+
+ /* We can't read objects larger than 4G on a 32-bit machine */
+ if ((uint64_t) (size_t) size != size)
+ return -E2BIG;
+
+ if (compression != COMPRESSION_NONE) {
+#if HAVE_COMPRESSION
+ size_t rsize;
+ int r;
+
+ if (field) {
+ r = decompress_startswith(compression, payload, size, &f->compress_buffer, field,
+ field_length, '=');
+ if (r < 0)
+ return log_debug_errno(r,
+ "Cannot decompress %s object of length %" PRIu64 ": %m",
+ compression_to_string(compression),
+ size);
+ if (r == 0) {
+ if (ret_data)
+ *ret_data = NULL;
+ if (ret_size)
+ *ret_size = 0;
+ return 0;
+ }
+ }
+
+ r = decompress_blob(compression, payload, size, &f->compress_buffer, &rsize, 0);
+ if (r < 0)
+ return r;
+
+ if (ret_data)
+ *ret_data = f->compress_buffer;
+ if (ret_size)
+ *ret_size = rsize;
+#else
+ return -EPROTONOSUPPORT;
+#endif
+ } else {
+ if (field && (size < field_length + 1 || memcmp(payload, field, field_length) != 0 || payload[field_length] != '=')) {
+ if (ret_data)
+ *ret_data = NULL;
+ if (ret_size)
+ *ret_size = 0;
+ return 0;
+ }
+
+ if (ret_data)
+ *ret_data = payload;
+ if (ret_size)
+ *ret_size = (size_t) size;
+ }
+
+ return 1;
+}
+
+int journal_file_data_payload(
+ JournalFile *f,
+ Object *o,
+ uint64_t offset,
+ const char *field,
+ size_t field_length,
+ size_t data_threshold,
+ void **ret_data,
+ size_t *ret_size) {
+
+ uint64_t size;
+ Compression c;
+ int r;
+
+ assert(f);
+ assert(!field == (field_length == 0)); /* These must be specified together. */
+
+ if (!o) {
+ r = journal_file_move_to_object(f, OBJECT_DATA, offset, &o);
+ if (r < 0)
+ return r;
+ }
+
+ size = le64toh(READ_NOW(o->object.size));
+ if (size < journal_file_data_payload_offset(f))
+ return -EBADMSG;
+
+ size -= journal_file_data_payload_offset(f);
+
+ c = COMPRESSION_FROM_OBJECT(o);
+ if (c < 0)
+ return -EPROTONOSUPPORT;
+
+ return maybe_decompress_payload(f, journal_file_data_payload_field(f, o), size, c, field,
+ field_length, data_threshold, ret_data, ret_size);
+}
+
+uint64_t journal_file_entry_n_items(JournalFile *f, Object *o) {
+ uint64_t sz;
+
+ assert(f);
+ assert(o);
+
+ if (o->object.type != OBJECT_ENTRY)
+ return 0;
+
+ sz = le64toh(READ_NOW(o->object.size));
+ if (sz < offsetof(Object, entry.items))
+ return 0;
+
+ return (sz - offsetof(Object, entry.items)) / journal_file_entry_item_size(f);
+}
+
+uint64_t journal_file_entry_array_n_items(JournalFile *f, Object *o) {
+ uint64_t sz;
+
+ assert(f);
+ assert(o);
+
+ if (o->object.type != OBJECT_ENTRY_ARRAY)
+ return 0;
+
+ sz = le64toh(READ_NOW(o->object.size));
+ if (sz < offsetof(Object, entry_array.items))
+ return 0;
+
+ return (sz - offsetof(Object, entry_array.items)) / journal_file_entry_array_item_size(f);
+}
+
+uint64_t journal_file_hash_table_n_items(Object *o) {
+ uint64_t sz;
+
+ assert(o);
+
+ if (!IN_SET(o->object.type, OBJECT_DATA_HASH_TABLE, OBJECT_FIELD_HASH_TABLE))
+ return 0;
+
+ sz = le64toh(READ_NOW(o->object.size));
+ if (sz < offsetof(Object, hash_table.items))
+ return 0;
+
+ return (sz - offsetof(Object, hash_table.items)) / sizeof(HashItem);
+}
+
+static void write_entry_array_item(JournalFile *f, Object *o, uint64_t i, uint64_t p) {
+ assert(f);
+ assert(o);
+
+ if (JOURNAL_HEADER_COMPACT(f->header)) {
+ assert(p <= UINT32_MAX);
+ o->entry_array.items.compact[i] = htole32(p);
+ } else
+ o->entry_array.items.regular[i] = htole64(p);
+}
+
+static int link_entry_into_array(
+ JournalFile *f,
+ le64_t *first,
+ le64_t *idx,
+ le32_t *tail,
+ le32_t *tidx,
+ uint64_t p) {
+
+ uint64_t n = 0, ap = 0, q, i, a, hidx;
+ Object *o;
+ int r;
+
+ assert(f);
+ assert(f->header);
+ assert(first);
+ assert(idx);
+ assert(p > 0);
+
+ a = tail ? le32toh(*tail) : le64toh(*first);
+ hidx = le64toh(READ_NOW(*idx));
+ i = tidx ? le32toh(READ_NOW(*tidx)) : hidx;
+
+ while (a > 0) {
+ r = journal_file_move_to_object(f, OBJECT_ENTRY_ARRAY, a, &o);
+ if (r < 0)
+ return r;
+
+ n = journal_file_entry_array_n_items(f, o);
+ if (i < n) {
+ write_entry_array_item(f, o, i, p);
+ *idx = htole64(hidx + 1);
+ if (tidx)
+ *tidx = htole32(le32toh(*tidx) + 1);
+ return 0;
+ }
+
+ i -= n;
+ ap = a;
+ a = le64toh(o->entry_array.next_entry_array_offset);
+ }
+
+ if (hidx > n)
+ n = (hidx+1) * 2;
+ else
+ n = n * 2;
+
+ if (n < 4)
+ n = 4;
+
+ r = journal_file_append_object(f, OBJECT_ENTRY_ARRAY,
+ offsetof(Object, entry_array.items) + n * journal_file_entry_array_item_size(f),
+ &o, &q);
+ if (r < 0)
+ return r;
+
+#if HAVE_GCRYPT
+ r = journal_file_hmac_put_object(f, OBJECT_ENTRY_ARRAY, o, q);
+ if (r < 0)
+ return r;
+#endif
+
+ write_entry_array_item(f, o, i, p);
+
+ if (ap == 0)
+ *first = htole64(q);
+ else {
+ r = journal_file_move_to_object(f, OBJECT_ENTRY_ARRAY, ap, &o);
+ if (r < 0)
+ return r;
+
+ o->entry_array.next_entry_array_offset = htole64(q);
+ }
+
+ if (tail)
+ *tail = htole32(q);
+
+ if (JOURNAL_HEADER_CONTAINS(f->header, n_entry_arrays))
+ f->header->n_entry_arrays = htole64(le64toh(f->header->n_entry_arrays) + 1);
+
+ *idx = htole64(hidx + 1);
+ if (tidx)
+ *tidx = htole32(1);
+
+ return 0;
+}
+
+static int link_entry_into_array_plus_one(
+ JournalFile *f,
+ le64_t *extra,
+ le64_t *first,
+ le64_t *idx,
+ le32_t *tail,
+ le32_t *tidx,
+ uint64_t p) {
+
+ uint64_t hidx;
+ int r;
+
+ assert(f);
+ assert(extra);
+ assert(first);
+ assert(idx);
+ assert(p > 0);
+
+ hidx = le64toh(READ_NOW(*idx));
+ if (hidx == UINT64_MAX)
+ return -EBADMSG;
+ if (hidx == 0)
+ *extra = htole64(p);
+ else {
+ le64_t i;
+
+ i = htole64(hidx - 1);
+ r = link_entry_into_array(f, first, &i, tail, tidx, p);
+ if (r < 0)
+ return r;
+ }
+
+ *idx = htole64(hidx + 1);
+ return 0;
+}
+
+static int journal_file_link_entry_item(JournalFile *f, uint64_t offset, uint64_t p) {
+ Object *o;
+ int r;
+
+ assert(f);
+ assert(offset > 0);
+
+ r = journal_file_move_to_object(f, OBJECT_DATA, p, &o);
+ if (r < 0)
+ return r;
+
+ return link_entry_into_array_plus_one(f,
+ &o->data.entry_offset,
+ &o->data.entry_array_offset,
+ &o->data.n_entries,
+ JOURNAL_HEADER_COMPACT(f->header) ? &o->data.compact.tail_entry_array_offset : NULL,
+ JOURNAL_HEADER_COMPACT(f->header) ? &o->data.compact.tail_entry_array_n_entries : NULL,
+ offset);
+}
+
+static int journal_file_link_entry(
+ JournalFile *f,
+ Object *o,
+ uint64_t offset,
+ const EntryItem items[],
+ size_t n_items) {
+
+ int r;
+
+ assert(f);
+ assert(f->header);
+ assert(o);
+ assert(offset > 0);
+
+ if (o->object.type != OBJECT_ENTRY)
+ return -EINVAL;
+
+ __atomic_thread_fence(__ATOMIC_SEQ_CST);
+
+ /* Link up the entry itself */
+ r = link_entry_into_array(f,
+ &f->header->entry_array_offset,
+ &f->header->n_entries,
+ JOURNAL_HEADER_CONTAINS(f->header, tail_entry_array_offset) ? &f->header->tail_entry_array_offset : NULL,
+ JOURNAL_HEADER_CONTAINS(f->header, tail_entry_array_n_entries) ? &f->header->tail_entry_array_n_entries : NULL,
+ offset);
+ if (r < 0)
+ return r;
+
+ /* log_debug("=> %s seqnr=%"PRIu64" n_entries=%"PRIu64, f->path, o->entry.seqnum, f->header->n_entries); */
+
+ if (f->header->head_entry_realtime == 0)
+ f->header->head_entry_realtime = o->entry.realtime;
+
+ f->header->tail_entry_realtime = o->entry.realtime;
+ f->header->tail_entry_monotonic = o->entry.monotonic;
+ if (JOURNAL_HEADER_CONTAINS(f->header, tail_entry_offset))
+ f->header->tail_entry_offset = htole64(offset);
+ f->newest_mtime = 0; /* we have a new tail entry now, explicitly invalidate newest boot id/timestamp info */
+
+ /* Link up the items */
+ for (uint64_t i = 0; i < n_items; i++) {
+ int k;
+
+ /* If we fail to link an entry item because we can't allocate a new entry array, don't fail
+ * immediately but try to link the other entry items since it might still be possible to link
+ * those if they don't require a new entry array to be allocated. */
+
+ k = journal_file_link_entry_item(f, offset, items[i].object_offset);
+ if (k == -E2BIG)
+ r = k;
+ else if (k < 0)
+ return k;
+ }
+
+ return r;
+}
+
+static void write_entry_item(JournalFile *f, Object *o, uint64_t i, const EntryItem *item) {
+ assert(f);
+ assert(o);
+ assert(item);
+
+ if (JOURNAL_HEADER_COMPACT(f->header)) {
+ assert(item->object_offset <= UINT32_MAX);
+ o->entry.items.compact[i].object_offset = htole32(item->object_offset);
+ } else {
+ o->entry.items.regular[i].object_offset = htole64(item->object_offset);
+ o->entry.items.regular[i].hash = htole64(item->hash);
+ }
+}
+
+static int journal_file_append_entry_internal(
+ JournalFile *f,
+ const dual_timestamp *ts,
+ const sd_id128_t *boot_id,
+ const sd_id128_t *machine_id,
+ uint64_t xor_hash,
+ const EntryItem items[],
+ size_t n_items,
+ uint64_t *seqnum,
+ sd_id128_t *seqnum_id,
+ Object **ret_object,
+ uint64_t *ret_offset) {
+
+ uint64_t np;
+ uint64_t osize;
+ Object *o;
+ int r;
+
+ assert(f);
+ assert(f->header);
+ assert(ts);
+ assert(boot_id);
+ assert(!sd_id128_is_null(*boot_id));
+ assert(items || n_items == 0);
+
+ if (f->strict_order) {
+ /* If requested be stricter with ordering in this journal file, to make searching via
+ * bisection fully deterministic. This is an optional feature, so that if desired journal
+ * files can be written where the ordering is not strictly enforced (in which case bisection
+ * will yield *a* result, but not the *only* result, when searching for points in
+ * time). Strict ordering mode is enabled when journald originally writes the files, but
+ * might not necessarily be if other tools (the remoting tools for example) write journal
+ * files from combined sources.
+ *
+ * Typically, if any of the errors generated here are seen journald will just rotate the
+ * journal files and start anew. */
+
+ if (ts->realtime < le64toh(f->header->tail_entry_realtime))
+ return log_debug_errno(SYNTHETIC_ERRNO(EREMCHG),
+ "Realtime timestamp %" PRIu64 " smaller than previous realtime "
+ "timestamp %" PRIu64 ", refusing entry.",
+ ts->realtime, le64toh(f->header->tail_entry_realtime));
+
+ if (sd_id128_equal(*boot_id, f->header->tail_entry_boot_id) &&
+ ts->monotonic < le64toh(f->header->tail_entry_monotonic))
+ return log_debug_errno(
+ SYNTHETIC_ERRNO(ENOTNAM),
+ "Monotonic timestamp %" PRIu64
+ " smaller than previous monotonic timestamp %" PRIu64
+ " while having the same boot ID, refusing entry.",
+ ts->monotonic,
+ le64toh(f->header->tail_entry_monotonic));
+ }
+
+ if (seqnum_id) {
+ /* Settle the passed in sequence number ID */
+
+ if (sd_id128_is_null(*seqnum_id))
+ *seqnum_id = f->header->seqnum_id; /* Caller has none assigned, then copy the one from the file */
+ else if (!sd_id128_equal(*seqnum_id, f->header->seqnum_id)) {
+ /* Different seqnum IDs? We can't allow entries from multiple IDs end up in the same journal.*/
+ if (le64toh(f->header->n_entries) == 0)
+ f->header->seqnum_id = *seqnum_id; /* Caller has one, and file so far has no entries, then copy the one from the caller */
+ else
+ return log_debug_errno(SYNTHETIC_ERRNO(EILSEQ),
+ "Sequence number IDs don't match, refusing entry.");
+ }
+ }
+
+ if (machine_id && sd_id128_is_null(f->header->machine_id))
+ /* Initialize machine ID when not set yet */
+ f->header->machine_id = *machine_id;
+
+ osize = offsetof(Object, entry.items) + (n_items * journal_file_entry_item_size(f));
+
+ r = journal_file_append_object(f, OBJECT_ENTRY, osize, &o, &np);
+ if (r < 0)
+ return r;
+
+ o->entry.seqnum = htole64(journal_file_entry_seqnum(f, seqnum));
+ o->entry.realtime = htole64(ts->realtime);
+ o->entry.monotonic = htole64(ts->monotonic);
+ o->entry.xor_hash = htole64(xor_hash);
+ o->entry.boot_id = f->header->tail_entry_boot_id = *boot_id;
+
+ for (size_t i = 0; i < n_items; i++)
+ write_entry_item(f, o, i, &items[i]);
+
+#if HAVE_GCRYPT
+ r = journal_file_hmac_put_object(f, OBJECT_ENTRY, o, np);
+ if (r < 0)
+ return r;
+#endif
+
+ r = journal_file_link_entry(f, o, np, items, n_items);
+ if (r < 0)
+ return r;
+
+ if (ret_object)
+ *ret_object = o;
+
+ if (ret_offset)
+ *ret_offset = np;
+
+ return r;
+}
+
+void journal_file_post_change(JournalFile *f) {
+ assert(f);
+
+ if (f->fd < 0)
+ return;
+
+ /* inotify() does not receive IN_MODIFY events from file
+ * accesses done via mmap(). After each access we hence
+ * trigger IN_MODIFY by truncating the journal file to its
+ * current size which triggers IN_MODIFY. */
+
+ __atomic_thread_fence(__ATOMIC_SEQ_CST);
+
+ if (ftruncate(f->fd, f->last_stat.st_size) < 0)
+ log_debug_errno(errno, "Failed to truncate file to its own size: %m");
+}
+
+static int post_change_thunk(sd_event_source *timer, uint64_t usec, void *userdata) {
+ assert(userdata);
+
+ journal_file_post_change(userdata);
+
+ return 1;
+}
+
+static void schedule_post_change(JournalFile *f) {
+ sd_event *e;
+ int r;
+
+ assert(f);
+ assert(f->post_change_timer);
+
+ assert_se(e = sd_event_source_get_event(f->post_change_timer));
+
+ /* If we are already going down, post the change immediately. */
+ if (IN_SET(sd_event_get_state(e), SD_EVENT_EXITING, SD_EVENT_FINISHED))
+ goto fail;
+
+ r = sd_event_source_get_enabled(f->post_change_timer, NULL);
+ if (r < 0) {
+ log_debug_errno(r, "Failed to get ftruncate timer state: %m");
+ goto fail;
+ }
+ if (r > 0)
+ return;
+
+ r = sd_event_source_set_time_relative(f->post_change_timer, f->post_change_timer_period);
+ if (r < 0) {
+ log_debug_errno(r, "Failed to set time for scheduling ftruncate: %m");
+ goto fail;
+ }
+
+ r = sd_event_source_set_enabled(f->post_change_timer, SD_EVENT_ONESHOT);
+ if (r < 0) {
+ log_debug_errno(r, "Failed to enable scheduled ftruncate: %m");
+ goto fail;
+ }
+
+ return;
+
+fail:
+ /* On failure, let's simply post the change immediately. */
+ journal_file_post_change(f);
+}
+
+/* Enable coalesced change posting in a timer on the provided sd_event instance */
+int journal_file_enable_post_change_timer(JournalFile *f, sd_event *e, usec_t t) {
+ _cleanup_(sd_event_source_unrefp) sd_event_source *timer = NULL;
+ int r;
+
+ assert(f);
+ assert_return(!f->post_change_timer, -EINVAL);
+ assert(e);
+ assert(t);
+
+ /* If we are already going down, we cannot install the timer.
+ * In such case, the caller needs to call journal_file_post_change() explicitly. */
+ if (IN_SET(sd_event_get_state(e), SD_EVENT_EXITING, SD_EVENT_FINISHED))
+ return 0;
+
+ r = sd_event_add_time(e, &timer, CLOCK_MONOTONIC, 0, 0, post_change_thunk, f);
+ if (r < 0)
+ return r;
+
+ r = sd_event_source_set_enabled(timer, SD_EVENT_OFF);
+ if (r < 0)
+ return r;
+
+ f->post_change_timer = TAKE_PTR(timer);
+ f->post_change_timer_period = t;
+
+ return 1;
+}
+
+static int entry_item_cmp(const EntryItem *a, const EntryItem *b) {
+ return CMP(ASSERT_PTR(a)->object_offset, ASSERT_PTR(b)->object_offset);
+}
+
+static size_t remove_duplicate_entry_items(EntryItem items[], size_t n) {
+ size_t j = 1;
+
+ assert(items || n == 0);
+
+ if (n <= 1)
+ return n;
+
+ for (size_t i = 1; i < n; i++)
+ if (items[i].object_offset != items[j - 1].object_offset)
+ items[j++] = items[i];
+
+ return j;
+}
+
+int journal_file_append_entry(
+ JournalFile *f,
+ const dual_timestamp *ts,
+ const sd_id128_t *boot_id,
+ const struct iovec iovec[],
+ size_t n_iovec,
+ uint64_t *seqnum,
+ sd_id128_t *seqnum_id,
+ Object **ret_object,
+ uint64_t *ret_offset) {
+
+ _cleanup_free_ EntryItem *items_alloc = NULL;
+ EntryItem *items;
+ uint64_t xor_hash = 0;
+ struct dual_timestamp _ts;
+ sd_id128_t _boot_id, _machine_id, *machine_id;
+ int r;
+
+ assert(f);
+ assert(f->header);
+ assert(iovec);
+ assert(n_iovec > 0);
+
+ if (ts) {
+ if (!VALID_REALTIME(ts->realtime))
+ return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
+ "Invalid realtime timestamp %" PRIu64 ", refusing entry.",
+ ts->realtime);
+ if (!VALID_MONOTONIC(ts->monotonic))
+ return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
+ "Invalid monotomic timestamp %" PRIu64 ", refusing entry.",
+ ts->monotonic);
+ } else {
+ dual_timestamp_now(&_ts);
+ ts = &_ts;
+ }
+
+ if (boot_id) {
+ if (sd_id128_is_null(*boot_id))
+ return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG), "Empty boot ID, refusing entry.");
+ } else {
+ r = sd_id128_get_boot(&_boot_id);
+ if (r < 0)
+ return r;
+
+ boot_id = &_boot_id;
+ }
+
+ r = sd_id128_get_machine(&_machine_id);
+ if (ERRNO_IS_NEG_MACHINE_ID_UNSET(r))
+ /* Gracefully handle the machine ID not being initialized yet */
+ machine_id = NULL;
+ else if (r < 0)
+ return r;
+ else
+ machine_id = &_machine_id;
+
+#if HAVE_GCRYPT
+ r = journal_file_maybe_append_tag(f, ts->realtime);
+ if (r < 0)
+ return r;
+#endif
+
+ if (n_iovec < ALLOCA_MAX / sizeof(EntryItem) / 2)
+ items = newa(EntryItem, n_iovec);
+ else {
+ items_alloc = new(EntryItem, n_iovec);
+ if (!items_alloc)
+ return -ENOMEM;
+
+ items = items_alloc;
+ }
+
+ for (size_t i = 0; i < n_iovec; i++) {
+ uint64_t p;
+ Object *o;
+
+ r = journal_file_append_data(f, iovec[i].iov_base, iovec[i].iov_len, &o, &p);
+ if (r < 0)
+ return r;
+
+ /* When calculating the XOR hash field, we need to take special care if the "keyed-hash"
+ * journal file flag is on. We use the XOR hash field to quickly determine the identity of a
+ * specific record, and give records with otherwise identical position (i.e. match in seqno,
+ * timestamp, …) a stable ordering. But for that we can't have it that the hash of the
+ * objects in each file is different since they are keyed. Hence let's calculate the Jenkins
+ * hash here for that. This also has the benefit that cursors for old and new journal files
+ * are completely identical (they include the XOR hash after all). For classic Jenkins-hash
+ * files things are easier, we can just take the value from the stored record directly. */
+
+ if (JOURNAL_HEADER_KEYED_HASH(f->header))
+ xor_hash ^= jenkins_hash64(iovec[i].iov_base, iovec[i].iov_len);
+ else
+ xor_hash ^= le64toh(o->data.hash);
+
+ items[i] = (EntryItem) {
+ .object_offset = p,
+ .hash = le64toh(o->data.hash),
+ };
+ }
+
+ /* Order by the position on disk, in order to improve seek
+ * times for rotating media. */
+ typesafe_qsort(items, n_iovec, entry_item_cmp);
+ n_iovec = remove_duplicate_entry_items(items, n_iovec);
+
+ r = journal_file_append_entry_internal(
+ f,
+ ts,
+ boot_id,
+ machine_id,
+ xor_hash,
+ items,
+ n_iovec,
+ seqnum,
+ seqnum_id,
+ ret_object,
+ ret_offset);
+
+ /* If the memory mapping triggered a SIGBUS then we return an
+ * IO error and ignore the error code passed down to us, since
+ * it is very likely just an effect of a nullified replacement
+ * mapping page */
+
+ if (mmap_cache_fd_got_sigbus(f->cache_fd))
+ r = -EIO;
+
+ if (f->post_change_timer)
+ schedule_post_change(f);
+ else
+ journal_file_post_change(f);
+
+ return r;
+}
+
+typedef struct ChainCacheItem {
+ uint64_t first; /* The offset of the entry array object at the beginning of the chain,
+ * i.e., le64toh(f->header->entry_array_offset), or le64toh(o->data.entry_offset). */
+ uint64_t array; /* The offset of the cached entry array object. */
+ uint64_t begin; /* The offset of the first item in the cached array. */
+ uint64_t total; /* The total number of items in all arrays before the cached one in the chain. */
+ uint64_t last_index; /* The last index we looked at in the cached array, to optimize locality when bisecting. */
+} ChainCacheItem;
+
+static void chain_cache_put(
+ OrderedHashmap *h,
+ ChainCacheItem *ci,
+ uint64_t first,
+ uint64_t array,
+ uint64_t begin,
+ uint64_t total,
+ uint64_t last_index) {
+
+ assert(h);
+
+ if (!ci) {
+ /* If the chain item to cache for this chain is the
+ * first one it's not worth caching anything */
+ if (array == first)
+ return;
+
+ if (ordered_hashmap_size(h) >= CHAIN_CACHE_MAX) {
+ ci = ordered_hashmap_steal_first(h);
+ assert(ci);
+ } else {
+ ci = new(ChainCacheItem, 1);
+ if (!ci)
+ return;
+ }
+
+ ci->first = first;
+
+ if (ordered_hashmap_put(h, &ci->first, ci) < 0) {
+ free(ci);
+ return;
+ }
+ } else
+ assert(ci->first == first);
+
+ ci->array = array;
+ ci->begin = begin;
+ ci->total = total;
+ ci->last_index = last_index;
+}
+
+static int bump_array_index(uint64_t *i, direction_t direction, uint64_t n) {
+ assert(i);
+
+ /* Increase or decrease the specified index, in the right direction. */
+
+ if (direction == DIRECTION_DOWN) {
+ if (*i >= n - 1)
+ return 0;
+
+ (*i)++;
+ } else {
+ if (*i <= 0)
+ return 0;
+
+ (*i)--;
+ }
+
+ return 1;
+}
+
+static int bump_entry_array(
+ JournalFile *f,
+ Object *o, /* the current entry array object. */
+ uint64_t offset, /* the offset of the entry array object. */
+ uint64_t first, /* The offset of the first entry array object in the chain. */
+ direction_t direction,
+ uint64_t *ret) {
+
+ int r;
+
+ assert(f);
+ assert(ret);
+
+ if (direction == DIRECTION_DOWN) {
+ assert(o);
+ assert(o->object.type == OBJECT_ENTRY_ARRAY);
+
+ *ret = le64toh(o->entry_array.next_entry_array_offset);
+ } else {
+
+ /* Entry array chains are a singly linked list, so to find the previous array in the chain, we have
+ * to start iterating from the top. */
+
+ assert(offset > 0);
+
+ uint64_t p = first, q = 0;
+ while (p > 0 && p != offset) {
+ r = journal_file_move_to_object(f, OBJECT_ENTRY_ARRAY, p, &o);
+ if (r < 0)
+ return r;
+
+ q = p;
+ p = le64toh(o->entry_array.next_entry_array_offset);
+ }
+
+ /* If we can't find the previous entry array in the entry array chain, we're likely dealing with a
+ * corrupted journal file. */
+ if (p == 0)
+ return -EBADMSG;
+
+ *ret = q;
+ }
+
+ return *ret > 0;
+}
+
+static int generic_array_get(
+ JournalFile *f,
+ uint64_t first, /* The offset of the first entry array object in the chain. */
+ uint64_t i, /* The index of the target object counted from the beginning of the entry array chain. */
+ direction_t direction,
+ Object **ret_object, /* The found object. */
+ uint64_t *ret_offset) { /* The offset of the found object. */
+
+ uint64_t a, t = 0, k;
+ ChainCacheItem *ci;
+ Object *o = NULL;
+ int r;
+
+ assert(f);
+
+ /* FIXME: fix return value assignment on success. */
+
+ a = first;
+
+ /* Try the chain cache first */
+ ci = ordered_hashmap_get(f->chain_cache, &first);
+ if (ci && i > ci->total) {
+ a = ci->array;
+ i -= ci->total;
+ t = ci->total;
+ }
+
+ while (a > 0) {
+ r = journal_file_move_to_object(f, OBJECT_ENTRY_ARRAY, a, &o);
+ if (IN_SET(r, -EBADMSG, -EADDRNOTAVAIL)) {
+ /* If there's corruption and we're going downwards, let's pretend we reached the
+ * final entry in the entry array chain. */
+
+ if (direction == DIRECTION_DOWN)
+ return 0;
+
+ /* If there's corruption and we're going upwards, move back to the previous entry
+ * array and start iterating entries from there. */
+
+ i = UINT64_MAX;
+ break;
+ }
+ if (r < 0)
+ return r;
+
+ k = journal_file_entry_array_n_items(f, o);
+ if (k == 0)
+ return 0;
+
+ if (i < k)
+ break;
+
+ /* The index is larger than the number of elements in the array. Let's move to the next array. */
+ i -= k;
+ t += k;
+ a = le64toh(o->entry_array.next_entry_array_offset);
+ }
+
+ /* If we've found the right location, now look for the first non-corrupt entry object (in the right
+ * direction). */
+
+ while (a > 0) {
+ if (i == UINT64_MAX) {
+ r = bump_entry_array(f, o, a, first, direction, &a);
+ if (r <= 0)
+ return r;
+
+ r = journal_file_move_to_object(f, OBJECT_ENTRY_ARRAY, a, &o);
+ if (r < 0)
+ return r;
+
+ k = journal_file_entry_array_n_items(f, o);
+ if (k == 0)
+ break;
+
+ if (direction == DIRECTION_DOWN)
+ i = 0;
+ else {
+ /* We moved to the previous array. The total must be decreased. */
+ if (t < k)
+ return -EBADMSG; /* chain cache is broken ? */
+
+ i = k - 1;
+ t -= k;
+ }
+ }
+
+ do {
+ uint64_t p;
+
+ p = journal_file_entry_array_item(f, o, i);
+
+ r = journal_file_move_to_object(f, OBJECT_ENTRY, p, ret_object);
+ if (r >= 0) {
+ /* Let's cache this item for the next invocation */
+ chain_cache_put(f->chain_cache, ci, first, a, journal_file_entry_array_item(f, o, 0), t, i);
+
+ if (ret_offset)
+ *ret_offset = p;
+
+ return 1;
+ }
+ if (!IN_SET(r, -EADDRNOTAVAIL, -EBADMSG))
+ return r;
+
+ /* OK, so this entry is borked. Most likely some entry didn't get synced to
+ * disk properly, let's see if the next one might work for us instead. */
+ log_debug_errno(r, "Entry item %" PRIu64 " is bad, skipping over it.", i);
+
+ } while (bump_array_index(&i, direction, k) > 0);
+
+ /* All entries tried in the above do-while loop are broken. Let's move to the next (or previous) array. */
+
+ if (direction == DIRECTION_DOWN)
+ /* We are going to the next array, the total must be incremented. */
+ t += k;
+
+ i = UINT64_MAX;
+ }
+
+ return 0;
+}
+
+enum {
+ TEST_FOUND, /* The current object passes the test. */
+ TEST_LEFT, /* The current object is in an earlier position, and the object we are looking
+ * for should exist in a later position. */
+ TEST_RIGHT, /* The current object is in a later position, and the object we are looking for
+ * should exist in an earlier position. */
+ TEST_GOTO_NEXT, /* No matching object exists in this array and earlier arrays, go to the next array. */
+ TEST_GOTO_PREVIOUS, /* No matching object exists in this array and later arrays, go to the previous array. */
+};
+
+static int generic_array_bisect_step(
+ JournalFile *f,
+ Object *array, /* entry array object */
+ uint64_t i, /* index of the entry item in the array we will test. */
+ uint64_t needle,
+ int (*test_object)(JournalFile *f, uint64_t p, uint64_t needle),
+ direction_t direction,
+ uint64_t *m, /* The maximum number of the entries we will check in the array. */
+ uint64_t *left, /* The index of the left boundary in the array. */
+ uint64_t *right) { /* The index of the right boundary in the array. */
+
+ uint64_t p;
+ int r;
+
+ assert(f);
+ assert(array);
+ assert(test_object);
+ assert(m);
+ assert(left);
+ assert(right);
+ assert(*left <= i);
+ assert(i <= *right);
+ assert(*right < *m);
+
+ p = journal_file_entry_array_item(f, array, i);
+ if (p <= 0)
+ r = -EBADMSG;
+ else
+ r = test_object(f, p, needle);
+ if (IN_SET(r, -EBADMSG, -EADDRNOTAVAIL)) {
+ log_debug_errno(r, "Encountered invalid entry while bisecting, cutting algorithm short.");
+
+ if (i == *left) {
+ /* This happens on two situations:
+ *
+ * a) i == 0 (hence, *left == 0):
+ * The first entry in the array is corrupted, let's go back to the previous array.
+ *
+ * b) *right == *left or *left + 1, and we are going to downwards:
+ * In that case, the (i-1)-th object has been already tested in the previous call,
+ * which returned TEST_LEFT. See below. So, there is no matching entry in this
+ * array nor in the whole entry array chain. */
+ assert(i == 0 || (*right - *left <= 1 && direction == DIRECTION_DOWN));
+ return TEST_GOTO_PREVIOUS;
+ }
+
+ /* Otherwise, cutting the array short. So, here we limit the number of elements we will see
+ * in this array, and set the right boundary to the last possibly non-corrupted object. */
+ *m = i;
+ *right = i - 1;
+ return TEST_RIGHT;
+ }
+ if (r < 0)
+ return r;
+
+ if (r == TEST_FOUND)
+ /* There may be multiple entries that match with the needle. When the direction is down, we
+ * need to find the first matching entry, hence the right boundary can be moved, but the left
+ * one cannot. Similarly, when the direction is up, we need to find the last matching entry,
+ * hence the left boundary can be moved, but the right one cannot. */
+ r = direction == DIRECTION_DOWN ? TEST_RIGHT : TEST_LEFT;
+
+ if (r == TEST_RIGHT) {
+ /* Currently, left --- needle --- i --- right, hence we can move the right boundary to i. */
+ if (direction == DIRECTION_DOWN)
+ *right = i;
+ else {
+ if (i == 0)
+ return TEST_GOTO_PREVIOUS;
+ *right = i - 1;
+ }
+ } else {
+ /* Currently, left --- i --- needle --- right, hence we can move the left boundary to i. */
+ if (direction == DIRECTION_DOWN) {
+ /* Note, here *m is always positive, as by the assertions at the beginning, we have
+ * 0 <= *left <= i <= *right < m */
+ if (i == *m - 1)
+ return TEST_GOTO_NEXT;
+
+ *left = i + 1;
+ } else
+ *left = i;
+ }
+
+ return r;
+}
+
+static int generic_array_bisect(
+ JournalFile *f,
+ uint64_t first, /* The offset of the first entry array object in the chain. */
+ uint64_t n, /* The total number of elements in the chain of the entry array. */
+ uint64_t needle, /* The target value (e.g. seqnum, monotonic, realtime, ...). */
+ int (*test_object)(JournalFile *f,
+ uint64_t p, /* the offset of the (data or entry) object that will be tested. */
+ uint64_t needle),
+ direction_t direction,
+ Object **ret_object, /* The found object. */
+ uint64_t *ret_offset, /* The offset of the found object. */
+ uint64_t *ret_idx) { /* The index of the found object counted from the beginning of the entry array chain. */
+
+ /* Given an entry array chain, this function finds the object "closest" to the given needle in the
+ * chain, taking into account the provided direction. A function can be provided to determine how
+ * an object is matched against the given needle.
+ *
+ * Given a journal file, the offset of an object and the needle, the test_object() function should
+ * return TEST_RIGHT if the needle is located earlier in the entry array chain, TEST_LEFT if the
+ * needle is located later in the entry array chain, and TEST_FOUND if the object matches the needle.
+ * If test_object() returns TEST_FOUND for a specific object, that object's information will be used
+ * to populate the return values of this function. If test_object() never returns TEST_FOUND, the
+ * return values are populated with the details of one of the objects closest to the needle. If the
+ * direction is DIRECTION_UP, the earlier object is used. Otherwise, the later object is used.
+ * If there are multiple objects that test_object() return TEST_FOUND for, then the first matching
+ * object returned when direction is DIRECTION_DOWN. Otherwise the last object is returned. */
+
+ uint64_t a, p, t = 0, i, last_index = UINT64_MAX;
+ ChainCacheItem *ci;
+ Object *array;
+ int r;
+
+ assert(f);
+ assert(test_object);
+
+ if (n <= 0)
+ return 0;
+
+ /* Start with the first array in the chain */
+ a = first;
+
+ ci = ordered_hashmap_get(f->chain_cache, &first);
+ if (ci && n > ci->total && ci->begin != 0) {
+ /* Ah, we have iterated this bisection array chain previously! Let's see if we can skip ahead
+ * in the chain, as far as the last time. But we can't jump backwards in the chain, so let's
+ * check that first. */
+
+ r = test_object(f, ci->begin, needle);
+ if (IN_SET(r, -EBADMSG, -EADDRNOTAVAIL))
+ log_debug_errno(r, "Cached entry is corrupted, ignoring: %m");
+ else if (r < 0)
+ return r;
+ else if (r == TEST_LEFT) {
+ /* OK, what we are looking for is right of the begin of this EntryArray, so let's
+ * jump straight to previously cached array in the chain */
+
+ a = ci->array;
+ n -= ci->total;
+ t = ci->total;
+ last_index = ci->last_index;
+ }
+ }
+
+ while (a > 0) {
+ uint64_t left, right, k, m, m_original;
+
+ r = journal_file_move_to_object(f, OBJECT_ENTRY_ARRAY, a, &array);
+ if (r < 0)
+ return r;
+
+ k = journal_file_entry_array_n_items(f, array);
+ m = m_original = MIN(k, n);
+ if (m <= 0)
+ return 0;
+
+ left = 0;
+ right = m - 1;
+
+ if (direction == DIRECTION_UP) {
+ /* If we're going upwards, the last entry of the previous array may pass the test,
+ * and the first entry of the current array may not pass. In that case, the last
+ * entry of the previous array must be returned. Hence, we need to test the first
+ * entry of the current array. */
+ r = generic_array_bisect_step(f, array, 0, needle, test_object, direction, &m, &left, &right);
+ if (r < 0)
+ return r;
+ if (r == TEST_GOTO_PREVIOUS)
+ goto previous;
+ }
+
+ /* Test the last entry of this array, to determine if we should go to the next array. */
+ r = generic_array_bisect_step(f, array, right, needle, test_object, direction, &m, &left, &right);
+ if (r < 0)
+ return r;
+ if (r == TEST_GOTO_PREVIOUS)
+ goto previous;
+
+ /* The expected entry should be in this array, (or the last entry of the previous array). */
+ if (r == TEST_RIGHT) {
+
+ /* If we cached the last index we looked at, let's try to not to jump too wildly
+ * around and see if we can limit the range to look at early to the immediate
+ * neighbors of the last index we looked at. */
+
+ if (last_index > 0 && left < last_index - 1 && last_index - 1 < right) {
+ r = generic_array_bisect_step(f, array, last_index - 1, needle, test_object, direction, &m, &left, &right);
+ if (r < 0)
+ return r;
+ if (r == TEST_GOTO_PREVIOUS)
+ goto previous;
+ }
+
+ if (last_index < UINT64_MAX && left < last_index + 1 && last_index + 1 < right) {
+ r = generic_array_bisect_step(f, array, last_index + 1, needle, test_object, direction, &m, &left, &right);
+ if (r < 0)
+ return r;
+ if (r == TEST_GOTO_PREVIOUS)
+ goto previous;
+ }
+
+ for (;;) {
+ if (left == right) {
+ /* We found one or more corrupted entries in generic_array_bisect_step().
+ * In that case, the entry pointed by 'right' may not be tested.
+ *
+ * When we are going to downwards, the entry object pointed by 'left'
+ * has not been tested yet, Hence, even if left == right, we still
+ * have to check the final entry to see if it actually matches.
+ *
+ * On the other hand, when we are going to upwards, the entry pointed
+ * by 'left' is always tested, So, it is not necessary to test the
+ * final entry again. */
+ if (m != m_original && direction == DIRECTION_DOWN) {
+ r = generic_array_bisect_step(f, array, left, needle, test_object, direction, &m, &left, &right);
+ if (r < 0)
+ return r;
+ if (IN_SET(r, TEST_GOTO_PREVIOUS, TEST_GOTO_NEXT))
+ return 0; /* The entry does not pass the test, or is corrupted */
+
+ assert(TEST_RIGHT);
+ assert(left == right);
+ }
+
+ i = left;
+ goto found;
+ }
+
+ assert(left < right);
+ i = (left + right + (direction == DIRECTION_UP)) / 2;
+
+ r = generic_array_bisect_step(f, array, i, needle, test_object, direction, &m, &left, &right);
+ if (r < 0)
+ return r;
+ if (r == TEST_GOTO_PREVIOUS)
+ goto previous;
+ if (r == TEST_GOTO_NEXT)
+ return 0; /* Found a corrupt entry, and the array was cut short. */
+ }
+ }
+
+ /* Not found in this array (or the last entry of this array should be returned), go to the next array. */
+ assert(r == (direction == DIRECTION_DOWN ? TEST_GOTO_NEXT : TEST_LEFT));
+
+ if (k >= n) {
+ if (direction == DIRECTION_UP) {
+ assert(n > 0);
+ i = n - 1;
+ goto found;
+ }
+
+ return 0;
+ }
+
+ n -= k;
+ t += k;
+ last_index = UINT64_MAX;
+ a = le64toh(array->entry_array.next_entry_array_offset);
+ }
+
+ return 0;
+
+previous:
+ /* Not found in the current array, return the last entry of the previous array. */
+ assert(r == TEST_GOTO_PREVIOUS);
+
+ /* The current array is the first in the chain. no previous array. */
+ if (t == 0)
+ return 0;
+
+ /* When we are going downwards, there is no matching entries in the previous array. */
+ if (direction == DIRECTION_DOWN)
+ return 0;
+
+ /* Indicate to go to the previous array later. Note, do not move to the previous array here,
+ * as that may invalidate the current array object in the mmap cache and
+ * journal_file_entry_array_item() below may read invalid address. */
+ i = UINT64_MAX;
+
+found:
+ p = journal_file_entry_array_item(f, array, 0);
+ if (p <= 0)
+ return -EBADMSG;
+
+ /* Let's cache this item for the next invocation */
+ chain_cache_put(f->chain_cache, ci, first, a, p, t, i);
+
+ if (i == UINT64_MAX) {
+ uint64_t m;
+
+ /* Get the last entry of the previous array. */
+
+ r = bump_entry_array(f, NULL, a, first, DIRECTION_UP, &a);
+ if (r <= 0)
+ return r;
+
+ r = journal_file_move_to_object(f, OBJECT_ENTRY_ARRAY, a, &array);
+ if (r < 0)
+ return r;
+
+ m = journal_file_entry_array_n_items(f, array);
+ if (m == 0 || t < m)
+ return -EBADMSG;
+
+ t -= m;
+ i = m - 1;
+ }
+
+ p = journal_file_entry_array_item(f, array, i);
+ if (p == 0)
+ return -EBADMSG;
+
+ if (ret_object) {
+ r = journal_file_move_to_object(f, OBJECT_ENTRY, p, ret_object);
+ if (r < 0)
+ return r;
+ }
+
+ if (ret_offset)
+ *ret_offset = p;
+
+ if (ret_idx)
+ *ret_idx = t + i;
+
+ return 1;
+}
+
+static int generic_array_bisect_for_data(
+ JournalFile *f,
+ Object *d,
+ uint64_t needle,
+ int (*test_object)(JournalFile *f, uint64_t p, uint64_t needle),
+ direction_t direction,
+ Object **ret_object,
+ uint64_t *ret_offset) {
+
+ uint64_t extra, first, n;
+ int r;
+
+ assert(f);
+ assert(d);
+ assert(d->object.type == OBJECT_DATA);
+ assert(test_object);
+
+ n = le64toh(d->data.n_entries);
+ if (n <= 0)
+ return 0;
+ n--; /* n_entries is the number of entries linked to the data object, including the 'extra' entry. */
+
+ extra = le64toh(d->data.entry_offset);
+ first = le64toh(d->data.entry_array_offset);
+
+ /* This bisects the array in object 'first', but first checks an extra. */
+ r = test_object(f, extra, needle);
+ if (r < 0)
+ return r;
+
+ if (direction == DIRECTION_DOWN) {
+ /* If we are going downwards, then we need to return the first object that passes the test.
+ * When there is no object that passes the test, we need to return the first object that
+ * test_object() returns TEST_RIGHT for. */
+ if (IN_SET(r,
+ TEST_FOUND, /* The 'extra' object passes the test. Hence, this is the first
+ * object that passes the test. */
+ TEST_RIGHT)) /* The 'extra' object is the first object that test_object() returns
+ * TEST_RIGHT for, and no object exists even in the chained arrays
+ * that passes the test. */
+ goto use_extra; /* The 'extra' object is exactly the one we are looking for. It is
+ * not necessary to bisect the chained arrays. */
+
+ /* Otherwise, the 'extra' object is not the one we are looking for. Search in the arrays. */
+
+ } else {
+ /* If we are going upwards, then we need to return the last object that passes the test.
+ * When there is no object that passes the test, we need to return the the last object that
+ * test_object() returns TEST_LEFT for. */
+ if (r == TEST_RIGHT)
+ return 0; /* Not only the 'extra' object, but also all objects in the chained arrays
+ * will never get TEST_FOUND or TEST_LEFT. The object we are looking for
+ * does not exist. */
+
+ /* Even if the 'extra' object passes the test, there may be multiple objects in the arrays
+ * that also pass the test. Hence, we need to bisect the arrays for finding the last matching
+ * object. */
+ }
+
+ r = generic_array_bisect(f, first, n, needle, test_object, direction, ret_object, ret_offset, NULL);
+ if (r != 0)
+ return r; /* When > 0, the found object is the first (or last, when DIRECTION_UP) object.
+ * Hence, return the found object now. */
+
+ /* No matching object found in the chained arrays.
+ * DIRECTION_DOWN : the 'extra' object neither matches the condition. There is no matching object.
+ * DIRECTION_UP : the 'extra' object matches the condition. So, return it. */
+ if (direction == DIRECTION_DOWN)
+ return 0;
+
+use_extra:
+ if (ret_object) {
+ r = journal_file_move_to_object(f, OBJECT_ENTRY, extra, ret_object);
+ if (r < 0)
+ return r;
+ }
+
+ if (ret_offset)
+ *ret_offset = extra;
+
+ return 1;
+}
+
+static int test_object_offset(JournalFile *f, uint64_t p, uint64_t needle) {
+ assert(f);
+ assert(p > 0);
+
+ if (p == needle)
+ return TEST_FOUND;
+ else if (p < needle)
+ return TEST_LEFT;
+ else
+ return TEST_RIGHT;
+}
+
+int journal_file_move_to_entry_by_offset(
+ JournalFile *f,
+ uint64_t p,
+ direction_t direction,
+ Object **ret_object,
+ uint64_t *ret_offset) {
+
+ assert(f);
+ assert(f->header);
+
+ return generic_array_bisect(
+ f,
+ le64toh(f->header->entry_array_offset),
+ le64toh(f->header->n_entries),
+ p,
+ test_object_offset,
+ direction,
+ ret_object, ret_offset, NULL);
+}
+
+static int test_object_seqnum(JournalFile *f, uint64_t p, uint64_t needle) {
+ uint64_t sq;
+ Object *o;
+ int r;
+
+ assert(f);
+ assert(p > 0);
+
+ r = journal_file_move_to_object(f, OBJECT_ENTRY, p, &o);
+ if (r < 0)
+ return r;
+
+ sq = le64toh(READ_NOW(o->entry.seqnum));
+ if (sq == needle)
+ return TEST_FOUND;
+ else if (sq < needle)
+ return TEST_LEFT;
+ else
+ return TEST_RIGHT;
+}
+
+int journal_file_move_to_entry_by_seqnum(
+ JournalFile *f,
+ uint64_t seqnum,
+ direction_t direction,
+ Object **ret_object,
+ uint64_t *ret_offset) {
+
+ assert(f);
+ assert(f->header);
+
+ return generic_array_bisect(
+ f,
+ le64toh(f->header->entry_array_offset),
+ le64toh(f->header->n_entries),
+ seqnum,
+ test_object_seqnum,
+ direction,
+ ret_object, ret_offset, NULL);
+}
+
+static int test_object_realtime(JournalFile *f, uint64_t p, uint64_t needle) {
+ Object *o;
+ uint64_t rt;
+ int r;
+
+ assert(f);
+ assert(p > 0);
+
+ r = journal_file_move_to_object(f, OBJECT_ENTRY, p, &o);
+ if (r < 0)
+ return r;
+
+ rt = le64toh(READ_NOW(o->entry.realtime));
+ if (rt == needle)
+ return TEST_FOUND;
+ else if (rt < needle)
+ return TEST_LEFT;
+ else
+ return TEST_RIGHT;
+}
+
+int journal_file_move_to_entry_by_realtime(
+ JournalFile *f,
+ uint64_t realtime,
+ direction_t direction,
+ Object **ret_object,
+ uint64_t *ret_offset) {
+
+ assert(f);
+ assert(f->header);
+
+ return generic_array_bisect(
+ f,
+ le64toh(f->header->entry_array_offset),
+ le64toh(f->header->n_entries),
+ realtime,
+ test_object_realtime,
+ direction,
+ ret_object, ret_offset, NULL);
+}
+
+static int test_object_monotonic(JournalFile *f, uint64_t p, uint64_t needle) {
+ Object *o;
+ uint64_t m;
+ int r;
+
+ assert(f);
+ assert(p > 0);
+
+ r = journal_file_move_to_object(f, OBJECT_ENTRY, p, &o);
+ if (r < 0)
+ return r;
+
+ m = le64toh(READ_NOW(o->entry.monotonic));
+ if (m == needle)
+ return TEST_FOUND;
+ else if (m < needle)
+ return TEST_LEFT;
+ else
+ return TEST_RIGHT;
+}
+
+static int find_data_object_by_boot_id(
+ JournalFile *f,
+ sd_id128_t boot_id,
+ Object **ret_object,
+ uint64_t *ret_offset) {
+
+ char t[STRLEN("_BOOT_ID=") + 32 + 1] = "_BOOT_ID=";
+
+ assert(f);
+
+ sd_id128_to_string(boot_id, t + 9);
+ return journal_file_find_data_object(f, t, sizeof(t) - 1, ret_object, ret_offset);
+}
+
+int journal_file_move_to_entry_by_monotonic(
+ JournalFile *f,
+ sd_id128_t boot_id,
+ uint64_t monotonic,
+ direction_t direction,
+ Object **ret_object,
+ uint64_t *ret_offset) {
+
+ Object *o;
+ int r;
+
+ assert(f);
+
+ r = find_data_object_by_boot_id(f, boot_id, &o, NULL);
+ if (r <= 0)
+ return r;
+
+ return generic_array_bisect_for_data(
+ f,
+ o,
+ monotonic,
+ test_object_monotonic,
+ direction,
+ ret_object, ret_offset);
+}
+
+void journal_file_reset_location(JournalFile *f) {
+ assert(f);
+
+ f->location_type = LOCATION_HEAD;
+ f->current_offset = 0;
+ f->current_seqnum = 0;
+ f->current_realtime = 0;
+ f->current_monotonic = 0;
+ zero(f->current_boot_id);
+ f->current_xor_hash = 0;
+
+ /* Also reset the previous reading direction. Otherwise, next_beyond_location() may wrongly handle we
+ * already hit EOF. See issue #29216. */
+ f->last_direction = _DIRECTION_INVALID;
+}
+
+void journal_file_save_location(JournalFile *f, Object *o, uint64_t offset) {
+ assert(f);
+ assert(o);
+
+ f->location_type = LOCATION_SEEK;
+ f->current_offset = offset;
+ f->current_seqnum = le64toh(o->entry.seqnum);
+ f->current_realtime = le64toh(o->entry.realtime);
+ f->current_monotonic = le64toh(o->entry.monotonic);
+ f->current_boot_id = o->entry.boot_id;
+ f->current_xor_hash = le64toh(o->entry.xor_hash);
+}
+
+static bool check_properly_ordered(uint64_t new_offset, uint64_t old_offset, direction_t direction) {
+
+ /* Consider it an error if any of the two offsets is uninitialized */
+ if (old_offset == 0 || new_offset == 0)
+ return false;
+
+ /* If we go down, the new offset must be larger than the old one. */
+ return direction == DIRECTION_DOWN ?
+ new_offset > old_offset :
+ new_offset < old_offset;
+}
+
+int journal_file_next_entry(
+ JournalFile *f,
+ uint64_t p,
+ direction_t direction,
+ Object **ret_object,
+ uint64_t *ret_offset) {
+
+ uint64_t i, n, q;
+ Object *o;
+ int r;
+
+ assert(f);
+ assert(f->header);
+
+ /* FIXME: fix return value assignment. */
+
+ n = le64toh(READ_NOW(f->header->n_entries));
+ if (n <= 0)
+ return 0;
+
+ /* When the input offset 'p' is zero, return the first (or last on DIRECTION_UP) entry. */
+ if (p == 0)
+ return generic_array_get(f,
+ le64toh(f->header->entry_array_offset),
+ direction == DIRECTION_DOWN ? 0 : n - 1,
+ direction,
+ ret_object, ret_offset);
+
+ /* Otherwise, first find the nearest entry object. */
+ r = generic_array_bisect(f,
+ le64toh(f->header->entry_array_offset),
+ le64toh(f->header->n_entries),
+ p,
+ test_object_offset,
+ direction,
+ ret_object ? &o : NULL, &q, &i);
+ if (r <= 0)
+ return r;
+
+ assert(direction == DIRECTION_DOWN ? p <= q : q <= p);
+
+ /* If the input offset 'p' points to an entry object, generic_array_bisect() should provides
+ * the same offset, and the index needs to be shifted. Otherwise, use the found object as is,
+ * as it is the nearest entry object from the input offset 'p'. */
+
+ if (p != q)
+ goto found;
+
+ r = bump_array_index(&i, direction, n);
+ if (r <= 0)
+ return r;
+
+ /* And jump to it */
+ r = generic_array_get(f, le64toh(f->header->entry_array_offset), i, direction, ret_object ? &o : NULL, &q);
+ if (r <= 0)
+ return r;
+
+ /* Ensure our array is properly ordered. */
+ if (!check_properly_ordered(q, p, direction))
+ return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
+ "%s: entry array not properly ordered at entry index %" PRIu64,
+ f->path, i);
+found:
+ if (ret_object)
+ *ret_object = o;
+ if (ret_offset)
+ *ret_offset = q;
+
+ return 1;
+}
+
+int journal_file_move_to_entry_for_data(
+ JournalFile *f,
+ Object *d,
+ direction_t direction,
+ Object **ret_object,
+ uint64_t *ret_offset) {
+
+ uint64_t extra, first, n;
+ int r = 0;
+
+ assert(f);
+ assert(d);
+ assert(d->object.type == OBJECT_DATA);
+ assert(IN_SET(direction, DIRECTION_DOWN, DIRECTION_UP));
+
+ /* FIXME: fix return value assignment. */
+
+ /* This returns the first (when the direction is down, otherwise the last) entry linked to the
+ * specified data object. */
+
+ n = le64toh(d->data.n_entries);
+ if (n <= 0)
+ return 0;
+ n--; /* n_entries is the number of entries linked to the data object, including the 'extra' entry. */
+
+ extra = le64toh(d->data.entry_offset);
+ first = le64toh(d->data.entry_array_offset);
+
+ if (direction == DIRECTION_DOWN && extra > 0) {
+ /* When we are going downwards, first try to read the extra entry. */
+ r = journal_file_move_to_object(f, OBJECT_ENTRY, extra, ret_object);
+ if (r >= 0)
+ goto use_extra;
+ if (!IN_SET(r, -EADDRNOTAVAIL, -EBADMSG))
+ return r;
+ }
+
+ if (n > 0) {
+ /* DIRECTION_DOWN : The extra entry is broken, falling back to the entries in the array.
+ * DIRECTION_UP : Try to find a valid entry in the array from the tail. */
+ r = generic_array_get(f,
+ first,
+ direction == DIRECTION_DOWN ? 0 : n - 1,
+ direction,
+ ret_object, ret_offset);
+ if (!IN_SET(r, 0, -EADDRNOTAVAIL, -EBADMSG))
+ return r; /* found or critical error. */
+ }
+
+ if (direction == DIRECTION_UP && extra > 0) {
+ /* No valid entry exists in the chained array, falling back to the extra entry. */
+ r = journal_file_move_to_object(f, OBJECT_ENTRY, extra, ret_object);
+ if (r >= 0)
+ goto use_extra;
+ }
+
+ return r;
+
+use_extra:
+ if (ret_offset)
+ *ret_offset = extra;
+
+ return 1;
+}
+
+int journal_file_move_to_entry_by_offset_for_data(
+ JournalFile *f,
+ Object *d,
+ uint64_t p,
+ direction_t direction,
+ Object **ret, uint64_t *ret_offset) {
+
+ assert(f);
+ assert(d);
+ assert(d->object.type == OBJECT_DATA);
+
+ return generic_array_bisect_for_data(
+ f,
+ d,
+ p,
+ test_object_offset,
+ direction,
+ ret, ret_offset);
+}
+
+int journal_file_move_to_entry_by_monotonic_for_data(
+ JournalFile *f,
+ Object *d,
+ sd_id128_t boot_id,
+ uint64_t monotonic,
+ direction_t direction,
+ Object **ret_object,
+ uint64_t *ret_offset) {
+
+ Object *o, *entry;
+ uint64_t z;
+ int r;
+
+ assert(f);
+ assert(d);
+ assert(d->object.type == OBJECT_DATA);
+
+ /* First, pin the given data object, before reading the _BOOT_ID= data object below. */
+ r = journal_file_pin_object(f, d);
+ if (r < 0)
+ return r;
+
+ /* Then, read a data object for _BOOT_ID= and seek by time. */
+ r = find_data_object_by_boot_id(f, boot_id, &o, NULL);
+ if (r <= 0)
+ return r;
+
+ r = generic_array_bisect_for_data(f,
+ o,
+ monotonic,
+ test_object_monotonic,
+ direction,
+ NULL, &z);
+ if (r <= 0)
+ return r;
+
+ /* And now, continue seeking until we find an entry that exists in both bisection arrays. */
+ for (;;) {
+ uint64_t p;
+
+ /* The journal entry found by the above bisect_plus_one() may not have the specified data,
+ * that is, it may not be linked in the data object. So, we need to check that. */
+
+ r = journal_file_move_to_entry_by_offset_for_data(
+ f, d, z, direction, ret_object ? &entry : NULL, &p);
+ if (r <= 0)
+ return r;
+ if (p == z)
+ break; /* The journal entry has the specified data. Yay! */
+
+ /* If the entry does not have the data, then move to the next (or previous, depends on the
+ * 'direction') entry linked to the data object. But, the next entry may be in another boot.
+ * So, we need to check that the entry has the matching boot ID. */
+
+ r = journal_file_move_to_entry_by_offset_for_data(
+ f, o, p, direction, ret_object ? &entry : NULL, &z);
+ if (r <= 0)
+ return r;
+ if (p == z)
+ break; /* The journal entry has the specified boot ID. Yay! */
+
+ /* If not, let's try to the next entry... */
+ }
+
+ if (ret_object)
+ *ret_object = entry;
+ if (ret_offset)
+ *ret_offset = z;
+ return 1;
+}
+
+int journal_file_move_to_entry_by_seqnum_for_data(
+ JournalFile *f,
+ Object *d,
+ uint64_t seqnum,
+ direction_t direction,
+ Object **ret_object,
+ uint64_t *ret_offset) {
+
+ assert(f);
+ assert(d);
+ assert(d->object.type == OBJECT_DATA);
+
+ return generic_array_bisect_for_data(
+ f,
+ d,
+ seqnum,
+ test_object_seqnum,
+ direction,
+ ret_object, ret_offset);
+}
+
+int journal_file_move_to_entry_by_realtime_for_data(
+ JournalFile *f,
+ Object *d,
+ uint64_t realtime,
+ direction_t direction,
+ Object **ret, uint64_t *ret_offset) {
+
+ assert(f);
+ assert(d);
+ assert(d->object.type == OBJECT_DATA);
+
+ return generic_array_bisect_for_data(
+ f,
+ d,
+ realtime,
+ test_object_realtime,
+ direction,
+ ret, ret_offset);
+}
+
+void journal_file_dump(JournalFile *f) {
+ Object *o;
+ uint64_t p;
+ int r;
+
+ assert(f);
+ assert(f->header);
+
+ journal_file_print_header(f);
+
+ p = le64toh(READ_NOW(f->header->header_size));
+ while (p != 0) {
+ const char *s;
+ Compression c;
+
+ r = journal_file_move_to_object(f, OBJECT_UNUSED, p, &o);
+ if (r < 0)
+ goto fail;
+
+ s = journal_object_type_to_string(o->object.type);
+
+ switch (o->object.type) {
+
+ case OBJECT_ENTRY:
+ assert(s);
+
+ printf("Type: %s seqnum=%"PRIu64" monotonic=%"PRIu64" realtime=%"PRIu64"\n",
+ s,
+ le64toh(o->entry.seqnum),
+ le64toh(o->entry.monotonic),
+ le64toh(o->entry.realtime));
+ break;
+
+ case OBJECT_TAG:
+ assert(s);
+
+ printf("Type: %s seqnum=%"PRIu64" epoch=%"PRIu64"\n",
+ s,
+ le64toh(o->tag.seqnum),
+ le64toh(o->tag.epoch));
+ break;
+
+ default:
+ if (s)
+ printf("Type: %s \n", s);
+ else
+ printf("Type: unknown (%i)", o->object.type);
+
+ break;
+ }
+
+ c = COMPRESSION_FROM_OBJECT(o);
+ if (c > COMPRESSION_NONE)
+ printf("Flags: %s\n",
+ compression_to_string(c));
+
+ if (p == le64toh(f->header->tail_object_offset))
+ p = 0;
+ else
+ p += ALIGN64(le64toh(o->object.size));
+ }
+
+ return;
+fail:
+ log_error("File corrupt");
+}
+
+/* Note: the lifetime of the compound literal is the immediately surrounding block. */
+#define FORMAT_TIMESTAMP_SAFE(t) (FORMAT_TIMESTAMP(t) ?: " --- ")
+
+void journal_file_print_header(JournalFile *f) {
+ struct stat st;
+
+ assert(f);
+ assert(f->header);
+
+ printf("File path: %s\n"
+ "File ID: %s\n"
+ "Machine ID: %s\n"
+ "Boot ID: %s\n"
+ "Sequential number ID: %s\n"
+ "State: %s\n"
+ "Compatible flags:%s%s%s%s\n"
+ "Incompatible flags:%s%s%s%s%s%s\n"
+ "Header size: %"PRIu64"\n"
+ "Arena size: %"PRIu64"\n"
+ "Data hash table size: %"PRIu64"\n"
+ "Field hash table size: %"PRIu64"\n"
+ "Rotate suggested: %s\n"
+ "Head sequential number: %"PRIu64" (%"PRIx64")\n"
+ "Tail sequential number: %"PRIu64" (%"PRIx64")\n"
+ "Head realtime timestamp: %s (%"PRIx64")\n"
+ "Tail realtime timestamp: %s (%"PRIx64")\n"
+ "Tail monotonic timestamp: %s (%"PRIx64")\n"
+ "Objects: %"PRIu64"\n"
+ "Entry objects: %"PRIu64"\n",
+ f->path,
+ SD_ID128_TO_STRING(f->header->file_id),
+ SD_ID128_TO_STRING(f->header->machine_id),
+ SD_ID128_TO_STRING(f->header->tail_entry_boot_id),
+ SD_ID128_TO_STRING(f->header->seqnum_id),
+ f->header->state == STATE_OFFLINE ? "OFFLINE" :
+ f->header->state == STATE_ONLINE ? "ONLINE" :
+ f->header->state == STATE_ARCHIVED ? "ARCHIVED" : "UNKNOWN",
+ JOURNAL_HEADER_SEALED(f->header) ? " SEALED" : "",
+ JOURNAL_HEADER_SEALED_CONTINUOUS(f->header) ? " SEALED_CONTINUOUS" : "",
+ JOURNAL_HEADER_TAIL_ENTRY_BOOT_ID(f->header) ? " TAIL_ENTRY_BOOT_ID" : "",
+ (le32toh(f->header->compatible_flags) & ~HEADER_COMPATIBLE_ANY) ? " ???" : "",
+ JOURNAL_HEADER_COMPRESSED_XZ(f->header) ? " COMPRESSED-XZ" : "",
+ JOURNAL_HEADER_COMPRESSED_LZ4(f->header) ? " COMPRESSED-LZ4" : "",
+ JOURNAL_HEADER_COMPRESSED_ZSTD(f->header) ? " COMPRESSED-ZSTD" : "",
+ JOURNAL_HEADER_KEYED_HASH(f->header) ? " KEYED-HASH" : "",
+ JOURNAL_HEADER_COMPACT(f->header) ? " COMPACT" : "",
+ (le32toh(f->header->incompatible_flags) & ~HEADER_INCOMPATIBLE_ANY) ? " ???" : "",
+ le64toh(f->header->header_size),
+ le64toh(f->header->arena_size),
+ le64toh(f->header->data_hash_table_size) / sizeof(HashItem),
+ le64toh(f->header->field_hash_table_size) / sizeof(HashItem),
+ yes_no(journal_file_rotate_suggested(f, 0, LOG_DEBUG)),
+ le64toh(f->header->head_entry_seqnum), le64toh(f->header->head_entry_seqnum),
+ le64toh(f->header->tail_entry_seqnum), le64toh(f->header->tail_entry_seqnum),
+ FORMAT_TIMESTAMP_SAFE(le64toh(f->header->head_entry_realtime)), le64toh(f->header->head_entry_realtime),
+ FORMAT_TIMESTAMP_SAFE(le64toh(f->header->tail_entry_realtime)), le64toh(f->header->tail_entry_realtime),
+ FORMAT_TIMESPAN(le64toh(f->header->tail_entry_monotonic), USEC_PER_MSEC), le64toh(f->header->tail_entry_monotonic),
+ le64toh(f->header->n_objects),
+ le64toh(f->header->n_entries));
+
+ if (JOURNAL_HEADER_CONTAINS(f->header, n_data))
+ printf("Data objects: %"PRIu64"\n"
+ "Data hash table fill: %.1f%%\n",
+ le64toh(f->header->n_data),
+ 100.0 * (double) le64toh(f->header->n_data) / ((double) (le64toh(f->header->data_hash_table_size) / sizeof(HashItem))));
+
+ if (JOURNAL_HEADER_CONTAINS(f->header, n_fields))
+ printf("Field objects: %"PRIu64"\n"
+ "Field hash table fill: %.1f%%\n",
+ le64toh(f->header->n_fields),
+ 100.0 * (double) le64toh(f->header->n_fields) / ((double) (le64toh(f->header->field_hash_table_size) / sizeof(HashItem))));
+
+ if (JOURNAL_HEADER_CONTAINS(f->header, n_tags))
+ printf("Tag objects: %"PRIu64"\n",
+ le64toh(f->header->n_tags));
+ if (JOURNAL_HEADER_CONTAINS(f->header, n_entry_arrays))
+ printf("Entry array objects: %"PRIu64"\n",
+ le64toh(f->header->n_entry_arrays));
+
+ if (JOURNAL_HEADER_CONTAINS(f->header, field_hash_chain_depth))
+ printf("Deepest field hash chain: %" PRIu64"\n",
+ f->header->field_hash_chain_depth);
+
+ if (JOURNAL_HEADER_CONTAINS(f->header, data_hash_chain_depth))
+ printf("Deepest data hash chain: %" PRIu64"\n",
+ f->header->data_hash_chain_depth);
+
+ if (fstat(f->fd, &st) >= 0)
+ printf("Disk usage: %s\n", FORMAT_BYTES((uint64_t) st.st_blocks * 512ULL));
+}
+
+static int journal_file_warn_btrfs(JournalFile *f) {
+ unsigned attrs;
+ int r;
+
+ assert(f);
+
+ /* Before we write anything, check if the COW logic is turned
+ * off on btrfs. Given our write pattern that is quite
+ * unfriendly to COW file systems this should greatly improve
+ * performance on COW file systems, such as btrfs, at the
+ * expense of data integrity features (which shouldn't be too
+ * bad, given that we do our own checksumming). */
+
+ r = fd_is_fs_type(f->fd, BTRFS_SUPER_MAGIC);
+ if (r < 0)
+ return log_ratelimit_warning_errno(r, JOURNAL_LOG_RATELIMIT, "Failed to determine if journal is on btrfs: %m");
+ if (r == 0)
+ return 0;
+
+ r = read_attr_fd(f->fd, &attrs);
+ if (r < 0)
+ return log_ratelimit_warning_errno(r, JOURNAL_LOG_RATELIMIT, "Failed to read file attributes: %m");
+
+ if (attrs & FS_NOCOW_FL) {
+ log_debug("Detected btrfs file system with copy-on-write disabled, all is good.");
+ return 0;
+ }
+
+ log_ratelimit_notice(JOURNAL_LOG_RATELIMIT,
+ "Creating journal file %s on a btrfs file system, and copy-on-write is enabled. "
+ "This is likely to slow down journal access substantially, please consider turning "
+ "off the copy-on-write file attribute on the journal directory, using chattr +C.",
+ f->path);
+
+ return 1;
+}
+
+static void journal_default_metrics(JournalMetrics *m, int fd, bool compact) {
+ struct statvfs ss;
+ uint64_t fs_size = 0;
+
+ assert(m);
+ assert(fd >= 0);
+
+ if (fstatvfs(fd, &ss) >= 0)
+ fs_size = u64_multiply_safe(ss.f_frsize, ss.f_blocks);
+ else
+ log_debug_errno(errno, "Failed to determine disk size: %m");
+
+ if (m->max_use == UINT64_MAX) {
+
+ if (fs_size > 0)
+ m->max_use = CLAMP(PAGE_ALIGN_U64(fs_size / 10), /* 10% of file system size */
+ MAX_USE_LOWER, MAX_USE_UPPER);
+ else
+ m->max_use = MAX_USE_LOWER;
+ } else {
+ m->max_use = PAGE_ALIGN_U64(m->max_use);
+
+ if (m->max_use != 0 && m->max_use < JOURNAL_FILE_SIZE_MIN*2)
+ m->max_use = JOURNAL_FILE_SIZE_MIN*2;
+ }
+
+ if (m->min_use == UINT64_MAX) {
+ if (fs_size > 0)
+ m->min_use = CLAMP(PAGE_ALIGN_U64(fs_size / 50), /* 2% of file system size */
+ MIN_USE_LOW, MIN_USE_HIGH);
+ else
+ m->min_use = MIN_USE_LOW;
+ }
+
+ if (m->min_use > m->max_use)
+ m->min_use = m->max_use;
+
+ if (m->max_size == UINT64_MAX)
+ m->max_size = MIN(PAGE_ALIGN_U64(m->max_use / 8), /* 8 chunks */
+ MAX_SIZE_UPPER);
+ else
+ m->max_size = PAGE_ALIGN_U64(m->max_size);
+
+ if (compact && m->max_size > JOURNAL_COMPACT_SIZE_MAX)
+ m->max_size = JOURNAL_COMPACT_SIZE_MAX;
+
+ if (m->max_size != 0) {
+ if (m->max_size < JOURNAL_FILE_SIZE_MIN)
+ m->max_size = JOURNAL_FILE_SIZE_MIN;
+
+ if (m->max_use != 0 && m->max_size*2 > m->max_use)
+ m->max_use = m->max_size*2;
+ }
+
+ if (m->min_size == UINT64_MAX)
+ m->min_size = JOURNAL_FILE_SIZE_MIN;
+ else
+ m->min_size = CLAMP(PAGE_ALIGN_U64(m->min_size),
+ JOURNAL_FILE_SIZE_MIN,
+ m->max_size ?: UINT64_MAX);
+
+ if (m->keep_free == UINT64_MAX) {
+ if (fs_size > 0)
+ m->keep_free = MIN(PAGE_ALIGN_U64(fs_size / 20), /* 5% of file system size */
+ KEEP_FREE_UPPER);
+ else
+ m->keep_free = DEFAULT_KEEP_FREE;
+ }
+
+ if (m->n_max_files == UINT64_MAX)
+ m->n_max_files = DEFAULT_N_MAX_FILES;
+
+ log_debug("Fixed min_use=%s max_use=%s max_size=%s min_size=%s keep_free=%s n_max_files=%" PRIu64,
+ FORMAT_BYTES(m->min_use),
+ FORMAT_BYTES(m->max_use),
+ FORMAT_BYTES(m->max_size),
+ FORMAT_BYTES(m->min_size),
+ FORMAT_BYTES(m->keep_free),
+ m->n_max_files);
+}
+
+int journal_file_open(
+ int fd,
+ const char *fname,
+ int open_flags,
+ JournalFileFlags file_flags,
+ mode_t mode,
+ uint64_t compress_threshold_bytes,
+ JournalMetrics *metrics,
+ MMapCache *mmap_cache,
+ JournalFile *template,
+ JournalFile **ret) {
+
+ bool newly_created = false;
+ JournalFile *f;
+ void *h;
+ int r;
+
+ assert(fd >= 0 || fname);
+ assert(file_flags >= 0);
+ assert(file_flags <= _JOURNAL_FILE_FLAGS_MAX);
+ assert(mmap_cache);
+ assert(ret);
+
+ if (!IN_SET((open_flags & O_ACCMODE), O_RDONLY, O_RDWR))
+ return -EINVAL;
+
+ if ((open_flags & O_ACCMODE) == O_RDONLY && FLAGS_SET(open_flags, O_CREAT))
+ return -EINVAL;
+
+ if (fname && (open_flags & O_CREAT) && !endswith(fname, ".journal"))
+ return -EINVAL;
+
+ f = new(JournalFile, 1);
+ if (!f)
+ return -ENOMEM;
+
+ *f = (JournalFile) {
+ .fd = fd,
+ .mode = mode,
+ .open_flags = open_flags,
+ .compress_threshold_bytes = compress_threshold_bytes == UINT64_MAX ?
+ DEFAULT_COMPRESS_THRESHOLD :
+ MAX(MIN_COMPRESS_THRESHOLD, compress_threshold_bytes),
+ .strict_order = FLAGS_SET(file_flags, JOURNAL_STRICT_ORDER),
+ .newest_boot_id_prioq_idx = PRIOQ_IDX_NULL,
+ .last_direction = _DIRECTION_INVALID,
+ };
+
+ if (fname) {
+ f->path = strdup(fname);
+ if (!f->path) {
+ r = -ENOMEM;
+ goto fail;
+ }
+ } else {
+ assert(fd >= 0);
+
+ /* If we don't know the path, fill in something explanatory and vaguely useful */
+ if (asprintf(&f->path, "/proc/self/%i", fd) < 0) {
+ r = -ENOMEM;
+ goto fail;
+ }
+ }
+
+ f->chain_cache = ordered_hashmap_new(&uint64_hash_ops);
+ if (!f->chain_cache) {
+ r = -ENOMEM;
+ goto fail;
+ }
+
+ if (f->fd < 0) {
+ /* We pass O_NONBLOCK here, so that in case somebody pointed us to some character device node or FIFO
+ * or so, we likely fail quickly than block for long. For regular files O_NONBLOCK has no effect, hence
+ * it doesn't hurt in that case. */
+
+ f->fd = openat_report_new(AT_FDCWD, f->path, f->open_flags|O_CLOEXEC|O_NONBLOCK, f->mode, &newly_created);
+ if (f->fd < 0) {
+ r = f->fd;
+ goto fail;
+ }
+
+ /* fds we opened here by us should also be closed by us. */
+ f->close_fd = true;
+
+ r = fd_nonblock(f->fd, false);
+ if (r < 0)
+ goto fail;
+
+ if (!newly_created) {
+ r = journal_file_fstat(f);
+ if (r < 0)
+ goto fail;
+ }
+ } else {
+ r = journal_file_fstat(f);
+ if (r < 0)
+ goto fail;
+
+ /* If we just got the fd passed in, we don't really know if we created the file anew */
+ newly_created = f->last_stat.st_size == 0 && journal_file_writable(f);
+ }
+
+ r = mmap_cache_add_fd(mmap_cache, f->fd, mmap_prot_from_open_flags(open_flags), &f->cache_fd);
+ if (r < 0)
+ goto fail;
+
+ if (newly_created) {
+ (void) journal_file_warn_btrfs(f);
+
+ /* Let's attach the creation time to the journal file, so that the vacuuming code knows the age of this
+ * file even if the file might end up corrupted one day... Ideally we'd just use the creation time many
+ * file systems maintain for each file, but the API to query this is very new, hence let's emulate this
+ * via extended attributes. If extended attributes are not supported we'll just skip this, and rely
+ * solely on mtime/atime/ctime of the file. */
+ (void) fd_setcrtime(f->fd, 0);
+
+ r = journal_file_init_header(f, file_flags, template);
+ if (r < 0)
+ goto fail;
+
+ r = journal_file_fstat(f);
+ if (r < 0)
+ goto fail;
+ }
+
+ if (f->last_stat.st_size < (off_t) HEADER_SIZE_MIN) {
+ r = -ENODATA;
+ goto fail;
+ }
+
+ r = mmap_cache_fd_get(f->cache_fd, MMAP_CACHE_CATEGORY_HEADER, true, 0, PAGE_ALIGN(sizeof(Header)), &f->last_stat, &h);
+ if (r == -EINVAL) {
+ /* Some file systems (jffs2 or p9fs) don't support mmap() properly (or only read-only
+ * mmap()), and return EINVAL in that case. Let's propagate that as a more recognizable error
+ * code. */
+ r = -EAFNOSUPPORT;
+ goto fail;
+ }
+ if (r < 0)
+ goto fail;
+
+ f->header = h;
+
+ if (!newly_created) {
+ r = journal_file_verify_header(f);
+ if (r < 0)
+ goto fail;
+ }
+
+#if HAVE_GCRYPT
+ if (!newly_created && journal_file_writable(f) && JOURNAL_HEADER_SEALED(f->header)) {
+ r = journal_file_fss_load(f);
+ if (r < 0)
+ goto fail;
+ }
+#endif
+
+ if (journal_file_writable(f)) {
+ if (metrics) {
+ journal_default_metrics(metrics, f->fd, JOURNAL_HEADER_COMPACT(f->header));
+ f->metrics = *metrics;
+ } else if (template)
+ f->metrics = template->metrics;
+
+ r = journal_file_refresh_header(f);
+ if (r < 0)
+ goto fail;
+ }
+
+#if HAVE_GCRYPT
+ r = journal_file_hmac_setup(f);
+ if (r < 0)
+ goto fail;
+#endif
+
+ if (newly_created) {
+ r = journal_file_setup_field_hash_table(f);
+ if (r < 0)
+ goto fail;
+
+ r = journal_file_setup_data_hash_table(f);
+ if (r < 0)
+ goto fail;
+
+#if HAVE_GCRYPT
+ r = journal_file_append_first_tag(f);
+ if (r < 0)
+ goto fail;
+#endif
+ }
+
+ if (mmap_cache_fd_got_sigbus(f->cache_fd)) {
+ r = -EIO;
+ goto fail;
+ }
+
+ if (template && template->post_change_timer) {
+ r = journal_file_enable_post_change_timer(
+ f,
+ sd_event_source_get_event(template->post_change_timer),
+ template->post_change_timer_period);
+
+ if (r < 0)
+ goto fail;
+ }
+
+ /* The file is opened now successfully, thus we take possession of any passed in fd. */
+ f->close_fd = true;
+
+ if (DEBUG_LOGGING) {
+ static int last_seal = -1, last_keyed_hash = -1;
+ static Compression last_compression = _COMPRESSION_INVALID;
+ static uint64_t last_bytes = UINT64_MAX;
+
+ if (last_seal != JOURNAL_HEADER_SEALED(f->header) ||
+ last_keyed_hash != JOURNAL_HEADER_KEYED_HASH(f->header) ||
+ last_compression != JOURNAL_FILE_COMPRESSION(f) ||
+ last_bytes != f->compress_threshold_bytes) {
+
+ log_debug("Journal effective settings seal=%s keyed_hash=%s compress=%s compress_threshold_bytes=%s",
+ yes_no(JOURNAL_HEADER_SEALED(f->header)), yes_no(JOURNAL_HEADER_KEYED_HASH(f->header)),
+ compression_to_string(JOURNAL_FILE_COMPRESSION(f)), FORMAT_BYTES(f->compress_threshold_bytes));
+ last_seal = JOURNAL_HEADER_SEALED(f->header);
+ last_keyed_hash = JOURNAL_HEADER_KEYED_HASH(f->header);
+ last_compression = JOURNAL_FILE_COMPRESSION(f);
+ last_bytes = f->compress_threshold_bytes;
+ }
+ }
+
+ *ret = f;
+ return 0;
+
+fail:
+ if (f->cache_fd && mmap_cache_fd_got_sigbus(f->cache_fd))
+ r = -EIO;
+
+ (void) journal_file_close(f);
+
+ if (newly_created && fd < 0)
+ (void) unlink(fname);
+
+ return r;
+}
+
+int journal_file_parse_uid_from_filename(const char *path, uid_t *ret_uid) {
+ _cleanup_free_ char *buf = NULL, *p = NULL;
+ const char *a, *b, *at;
+ int r;
+
+ /* This helper returns -EREMOTE when the filename doesn't match user online/offline journal
+ * pattern. Hence it currently doesn't parse archived or disposed user journals. */
+
+ assert(path);
+ assert(ret_uid);
+
+ r = path_extract_filename(path, &p);
+ if (r < 0)
+ return r;
+ if (r == O_DIRECTORY)
+ return -EISDIR;
+
+ a = startswith(p, "user-");
+ if (!a)
+ return -EREMOTE;
+ b = endswith(p, ".journal");
+ if (!b)
+ return -EREMOTE;
+
+ at = strchr(a, '@');
+ if (at)
+ return -EREMOTE;
+
+ buf = strndup(a, b-a);
+ if (!buf)
+ return -ENOMEM;
+
+ return parse_uid(buf, ret_uid);
+}
+
+int journal_file_archive(JournalFile *f, char **ret_previous_path) {
+ _cleanup_free_ char *p = NULL;
+
+ assert(f);
+
+ if (!journal_file_writable(f))
+ return -EINVAL;
+
+ /* Is this a journal file that was passed to us as fd? If so, we synthesized a path name for it, and we refuse
+ * rotation, since we don't know the actual path, and couldn't rename the file hence. */
+ if (path_startswith(f->path, "/proc/self/fd"))
+ return -EINVAL;
+
+ if (!endswith(f->path, ".journal"))
+ return -EINVAL;
+
+ if (asprintf(&p, "%.*s@" SD_ID128_FORMAT_STR "-%016"PRIx64"-%016"PRIx64".journal",
+ (int) strlen(f->path) - 8, f->path,
+ SD_ID128_FORMAT_VAL(f->header->seqnum_id),
+ le64toh(f->header->head_entry_seqnum),
+ le64toh(f->header->head_entry_realtime)) < 0)
+ return -ENOMEM;
+
+ /* Try to rename the file to the archived version. If the file already was deleted, we'll get ENOENT, let's
+ * ignore that case. */
+ if (rename(f->path, p) < 0 && errno != ENOENT)
+ return -errno;
+
+ /* Sync the rename to disk */
+ (void) fsync_directory_of_file(f->fd);
+
+ if (ret_previous_path)
+ *ret_previous_path = f->path;
+ else
+ free(f->path);
+
+ f->path = TAKE_PTR(p);
+
+ /* Set as archive so offlining commits w/state=STATE_ARCHIVED. Previously we would set old_file->header->state
+ * to STATE_ARCHIVED directly here, but journal_file_set_offline() short-circuits when state != STATE_ONLINE,
+ * which would result in the rotated journal never getting fsync() called before closing. Now we simply queue
+ * the archive state by setting an archive bit, leaving the state as STATE_ONLINE so proper offlining
+ * occurs. */
+ f->archive = true;
+
+ return 0;
+}
+
+int journal_file_dispose(int dir_fd, const char *fname) {
+ _cleanup_free_ char *p = NULL;
+
+ assert(fname);
+
+ /* Renames a journal file to *.journal~, i.e. to mark it as corrupted or otherwise uncleanly shutdown. Note that
+ * this is done without looking into the file or changing any of its contents. The idea is that this is called
+ * whenever something is suspicious and we want to move the file away and make clear that it is not accessed
+ * for writing anymore. */
+
+ if (!endswith(fname, ".journal"))
+ return -EINVAL;
+
+ if (asprintf(&p, "%.*s@%016" PRIx64 "-%016" PRIx64 ".journal~",
+ (int) strlen(fname) - 8, fname,
+ now(CLOCK_REALTIME),
+ random_u64()) < 0)
+ return -ENOMEM;
+
+ if (renameat(dir_fd, fname, dir_fd, p) < 0)
+ return -errno;
+
+ return 0;
+}
+
+int journal_file_copy_entry(
+ JournalFile *from,
+ JournalFile *to,
+ Object *o,
+ uint64_t p,
+ uint64_t *seqnum,
+ sd_id128_t *seqnum_id) {
+
+ _cleanup_free_ EntryItem *items_alloc = NULL;
+ EntryItem *items;
+ uint64_t n, m = 0, xor_hash = 0;
+ sd_id128_t boot_id;
+ dual_timestamp ts;
+ int r;
+
+ assert(from);
+ assert(to);
+ assert(o);
+ assert(p > 0);
+
+ if (!journal_file_writable(to))
+ return -EPERM;
+
+ ts = (dual_timestamp) {
+ .monotonic = le64toh(o->entry.monotonic),
+ .realtime = le64toh(o->entry.realtime),
+ };
+ boot_id = o->entry.boot_id;
+
+ n = journal_file_entry_n_items(from, o);
+ if (n == 0)
+ return 0;
+
+ if (n < ALLOCA_MAX / sizeof(EntryItem) / 2)
+ items = newa(EntryItem, n);
+ else {
+ items_alloc = new(EntryItem, n);
+ if (!items_alloc)
+ return -ENOMEM;
+
+ items = items_alloc;
+ }
+
+ for (uint64_t i = 0; i < n; i++) {
+ uint64_t h, q;
+ void *data;
+ size_t l;
+ Object *u;
+
+ q = journal_file_entry_item_object_offset(from, o, i);
+ r = journal_file_data_payload(from, NULL, q, NULL, 0, 0, &data, &l);
+ if (IN_SET(r, -EADDRNOTAVAIL, -EBADMSG)) {
+ log_debug_errno(r, "Entry item %"PRIu64" data object is bad, skipping over it: %m", i);
+ continue;
+ }
+ if (r < 0)
+ return r;
+ assert(r > 0);
+
+ if (l == 0)
+ return -EBADMSG;
+
+ r = journal_file_append_data(to, data, l, &u, &h);
+ if (r < 0)
+ return r;
+
+ if (JOURNAL_HEADER_KEYED_HASH(to->header))
+ xor_hash ^= jenkins_hash64(data, l);
+ else
+ xor_hash ^= le64toh(u->data.hash);
+
+ items[m++] = (EntryItem) {
+ .object_offset = h,
+ .hash = le64toh(u->data.hash),
+ };
+ }
+
+ if (m == 0)
+ return 0;
+
+ r = journal_file_append_entry_internal(
+ to,
+ &ts,
+ &boot_id,
+ &from->header->machine_id,
+ xor_hash,
+ items,
+ m,
+ seqnum,
+ seqnum_id,
+ /* ret_object= */ NULL,
+ /* ret_offset= */ NULL);
+
+ if (mmap_cache_fd_got_sigbus(to->cache_fd))
+ return -EIO;
+
+ return r;
+}
+
+void journal_reset_metrics(JournalMetrics *m) {
+ assert(m);
+
+ /* Set everything to "pick automatic values". */
+
+ *m = (JournalMetrics) {
+ .min_use = UINT64_MAX,
+ .max_use = UINT64_MAX,
+ .min_size = UINT64_MAX,
+ .max_size = UINT64_MAX,
+ .keep_free = UINT64_MAX,
+ .n_max_files = UINT64_MAX,
+ };
+}
+
+int journal_file_get_cutoff_realtime_usec(JournalFile *f, usec_t *ret_from, usec_t *ret_to) {
+ assert(f);
+ assert(f->header);
+ assert(ret_from || ret_to);
+
+ if (ret_from) {
+ if (f->header->head_entry_realtime == 0)
+ return -ENOENT;
+
+ *ret_from = le64toh(f->header->head_entry_realtime);
+ }
+
+ if (ret_to) {
+ if (f->header->tail_entry_realtime == 0)
+ return -ENOENT;
+
+ *ret_to = le64toh(f->header->tail_entry_realtime);
+ }
+
+ return 1;
+}
+
+int journal_file_get_cutoff_monotonic_usec(JournalFile *f, sd_id128_t boot_id, usec_t *ret_from, usec_t *ret_to) {
+ Object *o;
+ uint64_t p;
+ int r;
+
+ assert(f);
+ assert(ret_from || ret_to);
+
+ /* FIXME: fix return value assignment on success with 0. */
+
+ r = find_data_object_by_boot_id(f, boot_id, &o, &p);
+ if (r <= 0)
+ return r;
+
+ if (le64toh(o->data.n_entries) <= 0)
+ return 0;
+
+ if (ret_from) {
+ r = journal_file_move_to_object(f, OBJECT_ENTRY, le64toh(o->data.entry_offset), &o);
+ if (r < 0)
+ return r;
+
+ *ret_from = le64toh(o->entry.monotonic);
+ }
+
+ if (ret_to) {
+ r = journal_file_move_to_object(f, OBJECT_DATA, p, &o);
+ if (r < 0)
+ return r;
+
+ r = journal_file_move_to_entry_for_data(f, o, DIRECTION_UP, &o, NULL);
+ if (r <= 0)
+ return r;
+
+ *ret_to = le64toh(o->entry.monotonic);
+ }
+
+ return 1;
+}
+
+bool journal_file_rotate_suggested(JournalFile *f, usec_t max_file_usec, int log_level) {
+ assert(f);
+ assert(f->header);
+
+ /* If we gained new header fields we gained new features,
+ * hence suggest a rotation */
+ if (le64toh(f->header->header_size) < sizeof(Header)) {
+ log_ratelimit_full(log_level, JOURNAL_LOG_RATELIMIT,
+ "%s uses an outdated header, suggesting rotation.", f->path);
+ return true;
+ }
+
+ /* Let's check if the hash tables grew over a certain fill level (75%, borrowing this value from
+ * Java's hash table implementation), and if so suggest a rotation. To calculate the fill level we
+ * need the n_data field, which only exists in newer versions. */
+
+ if (JOURNAL_HEADER_CONTAINS(f->header, n_data))
+ if (le64toh(f->header->n_data) * 4ULL > (le64toh(f->header->data_hash_table_size) / sizeof(HashItem)) * 3ULL) {
+ log_ratelimit_full(
+ log_level, JOURNAL_LOG_RATELIMIT,
+ "Data hash table of %s has a fill level at %.1f (%"PRIu64" of %"PRIu64" items, %"PRIu64" file size, %"PRIu64" bytes per hash table item), suggesting rotation.",
+ f->path,
+ 100.0 * (double) le64toh(f->header->n_data) / ((double) (le64toh(f->header->data_hash_table_size) / sizeof(HashItem))),
+ le64toh(f->header->n_data),
+ le64toh(f->header->data_hash_table_size) / sizeof(HashItem),
+ (uint64_t) f->last_stat.st_size,
+ f->last_stat.st_size / le64toh(f->header->n_data));
+ return true;
+ }
+
+ if (JOURNAL_HEADER_CONTAINS(f->header, n_fields))
+ if (le64toh(f->header->n_fields) * 4ULL > (le64toh(f->header->field_hash_table_size) / sizeof(HashItem)) * 3ULL) {
+ log_ratelimit_full(
+ log_level, JOURNAL_LOG_RATELIMIT,
+ "Field hash table of %s has a fill level at %.1f (%"PRIu64" of %"PRIu64" items), suggesting rotation.",
+ f->path,
+ 100.0 * (double) le64toh(f->header->n_fields) / ((double) (le64toh(f->header->field_hash_table_size) / sizeof(HashItem))),
+ le64toh(f->header->n_fields),
+ le64toh(f->header->field_hash_table_size) / sizeof(HashItem));
+ return true;
+ }
+
+ /* If there are too many hash collisions somebody is most likely playing games with us. Hence, if our
+ * longest chain is longer than some threshold, let's suggest rotation. */
+ if (JOURNAL_HEADER_CONTAINS(f->header, data_hash_chain_depth) &&
+ le64toh(f->header->data_hash_chain_depth) > HASH_CHAIN_DEPTH_MAX) {
+ log_ratelimit_full(
+ log_level, JOURNAL_LOG_RATELIMIT,
+ "Data hash table of %s has deepest hash chain of length %" PRIu64 ", suggesting rotation.",
+ f->path, le64toh(f->header->data_hash_chain_depth));
+ return true;
+ }
+
+ if (JOURNAL_HEADER_CONTAINS(f->header, field_hash_chain_depth) &&
+ le64toh(f->header->field_hash_chain_depth) > HASH_CHAIN_DEPTH_MAX) {
+ log_ratelimit_full(
+ log_level, JOURNAL_LOG_RATELIMIT,
+ "Field hash table of %s has deepest hash chain of length at %" PRIu64 ", suggesting rotation.",
+ f->path, le64toh(f->header->field_hash_chain_depth));
+ return true;
+ }
+
+ /* Are the data objects properly indexed by field objects? */
+ if (JOURNAL_HEADER_CONTAINS(f->header, n_data) &&
+ JOURNAL_HEADER_CONTAINS(f->header, n_fields) &&
+ le64toh(f->header->n_data) > 0 &&
+ le64toh(f->header->n_fields) == 0) {
+ log_ratelimit_full(
+ log_level, JOURNAL_LOG_RATELIMIT,
+ "Data objects of %s are not indexed by field objects, suggesting rotation.",
+ f->path);
+ return true;
+ }
+
+ if (max_file_usec > 0) {
+ usec_t t, h;
+
+ h = le64toh(f->header->head_entry_realtime);
+ t = now(CLOCK_REALTIME);
+
+ if (h > 0 && t > h + max_file_usec) {
+ log_ratelimit_full(
+ log_level, JOURNAL_LOG_RATELIMIT,
+ "Oldest entry in %s is older than the configured file retention duration (%s), suggesting rotation.",
+ f->path, FORMAT_TIMESPAN(max_file_usec, USEC_PER_SEC));
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static const char * const journal_object_type_table[] = {
+ [OBJECT_UNUSED] = "unused",
+ [OBJECT_DATA] = "data",
+ [OBJECT_FIELD] = "field",
+ [OBJECT_ENTRY] = "entry",
+ [OBJECT_DATA_HASH_TABLE] = "data hash table",
+ [OBJECT_FIELD_HASH_TABLE] = "field hash table",
+ [OBJECT_ENTRY_ARRAY] = "entry array",
+ [OBJECT_TAG] = "tag",
+};
+
+DEFINE_STRING_TABLE_LOOKUP_TO_STRING(journal_object_type, ObjectType);
diff --git a/src/libsystemd/sd-journal/journal-file.h b/src/libsystemd/sd-journal/journal-file.h
new file mode 100644
index 0000000..81fafb9
--- /dev/null
+++ b/src/libsystemd/sd-journal/journal-file.h
@@ -0,0 +1,393 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+#pragma once
+
+#include <fcntl.h>
+#include <inttypes.h>
+#include <sys/uio.h>
+
+#if HAVE_GCRYPT
+# include <gcrypt.h>
+#endif
+
+#include "sd-event.h"
+#include "sd-id128.h"
+
+#include "compress.h"
+#include "hashmap.h"
+#include "journal-def.h"
+#include "mmap-cache.h"
+#include "sparse-endian.h"
+#include "time-util.h"
+
+typedef struct JournalMetrics {
+ /* For all these: UINT64_MAX means "pick automatically", and 0 means "no limit enforced" */
+ uint64_t max_size; /* how large journal files grow at max */
+ uint64_t min_size; /* how large journal files grow at least */
+ uint64_t max_use; /* how much disk space to use in total at max, keep_free permitting */
+ uint64_t min_use; /* how much disk space to use in total at least, even if keep_free says not to */
+ uint64_t keep_free; /* how much to keep free on disk */
+ uint64_t n_max_files; /* how many files to keep around at max */
+} JournalMetrics;
+
+typedef enum direction {
+ DIRECTION_UP,
+ DIRECTION_DOWN,
+ _DIRECTION_INVALID = -EINVAL,
+} direction_t;
+
+typedef enum LocationType {
+ /* The first and last entries, resp. */
+ LOCATION_HEAD,
+ LOCATION_TAIL,
+
+ /* We already read the entry we currently point to, and the
+ * next one to read should probably not be this one again. */
+ LOCATION_DISCRETE,
+
+ /* We should seek to the precise location specified, and
+ * return it, as we haven't read it yet. */
+ LOCATION_SEEK
+} LocationType;
+
+typedef enum OfflineState {
+ OFFLINE_JOINED,
+ OFFLINE_SYNCING,
+ OFFLINE_OFFLINING,
+ OFFLINE_CANCEL,
+ OFFLINE_AGAIN_FROM_SYNCING,
+ OFFLINE_AGAIN_FROM_OFFLINING,
+ OFFLINE_DONE
+} OfflineState;
+
+typedef struct JournalFile {
+ int fd;
+ MMapFileDescriptor *cache_fd;
+
+ mode_t mode;
+
+ int open_flags;
+ bool close_fd:1;
+ bool archive:1;
+ bool strict_order:1;
+
+ direction_t last_direction;
+ LocationType location_type;
+ uint64_t last_n_entries;
+
+ char *path;
+ struct stat last_stat;
+ usec_t last_stat_usec;
+
+ Header *header;
+ HashItem *data_hash_table;
+ HashItem *field_hash_table;
+
+ uint64_t current_offset;
+ uint64_t current_seqnum;
+ uint64_t current_realtime;
+ uint64_t current_monotonic;
+ sd_id128_t current_boot_id;
+ uint64_t current_xor_hash;
+
+ JournalMetrics metrics;
+
+ sd_event_source *post_change_timer;
+ usec_t post_change_timer_period;
+
+ OrderedHashmap *chain_cache;
+
+ pthread_t offline_thread;
+ volatile OfflineState offline_state;
+
+ unsigned last_seen_generation;
+
+ uint64_t compress_threshold_bytes;
+#if HAVE_COMPRESSION
+ void *compress_buffer;
+#endif
+
+#if HAVE_GCRYPT
+ gcry_md_hd_t hmac;
+ bool hmac_running;
+
+ FSSHeader *fss_file;
+ size_t fss_file_size;
+
+ uint64_t fss_start_usec;
+ uint64_t fss_interval_usec;
+
+ void *fsprg_state;
+ size_t fsprg_state_size;
+
+ void *fsprg_seed;
+ size_t fsprg_seed_size;
+#endif
+
+ /* When we insert this file into the per-boot priority queue 'newest_by_boot_id' in sd_journal, then by these keys */
+ sd_id128_t newest_boot_id;
+ sd_id128_t newest_machine_id;
+ uint64_t newest_monotonic_usec;
+ uint64_t newest_realtime_usec;
+ unsigned newest_boot_id_prioq_idx;
+ usec_t newest_mtime;
+} JournalFile;
+
+typedef enum JournalFileFlags {
+ JOURNAL_COMPRESS = 1 << 0,
+ JOURNAL_SEAL = 1 << 1,
+ JOURNAL_STRICT_ORDER = 1 << 2,
+ _JOURNAL_FILE_FLAGS_MAX = JOURNAL_COMPRESS|JOURNAL_SEAL|JOURNAL_STRICT_ORDER,
+} JournalFileFlags;
+
+typedef struct {
+ uint64_t object_offset;
+ uint64_t hash;
+} EntryItem;
+
+int journal_file_open(
+ int fd,
+ const char *fname,
+ int open_flags,
+ JournalFileFlags file_flags,
+ mode_t mode,
+ uint64_t compress_threshold_bytes,
+ JournalMetrics *metrics,
+ MMapCache *mmap_cache,
+ JournalFile *template,
+ JournalFile **ret);
+
+int journal_file_set_offline_thread_join(JournalFile *f);
+JournalFile* journal_file_close(JournalFile *j);
+int journal_file_fstat(JournalFile *f);
+DEFINE_TRIVIAL_CLEANUP_FUNC(JournalFile*, journal_file_close);
+
+#define ALIGN64(x) (((x) + 7ULL) & ~7ULL)
+#define VALID64(x) (((x) & 7ULL) == 0ULL)
+
+/* Use six characters to cover the offsets common in smallish journal
+ * files without adding too many zeros. */
+#define OFSfmt "%06"PRIx64
+
+static inline bool VALID_REALTIME(uint64_t u) {
+ /* This considers timestamps until the year 3112 valid. That should be plenty room... */
+ return u > 0 && u < (1ULL << 55);
+}
+
+static inline bool VALID_MONOTONIC(uint64_t u) {
+ /* This considers timestamps until 1142 years of runtime valid. */
+ return u < (1ULL << 55);
+}
+
+static inline bool VALID_EPOCH(uint64_t u) {
+ /* This allows changing the key for 1142 years, every usec. */
+ return u < (1ULL << 55);
+}
+
+#define JOURNAL_HEADER_CONTAINS(h, field) \
+ (le64toh((h)->header_size) >= offsetof(Header, field) + sizeof((h)->field))
+
+#define JOURNAL_HEADER_SEALED(h) \
+ FLAGS_SET(le32toh((h)->compatible_flags), HEADER_COMPATIBLE_SEALED)
+
+#define JOURNAL_HEADER_SEALED_CONTINUOUS(h) \
+ FLAGS_SET(le32toh((h)->compatible_flags), HEADER_COMPATIBLE_SEALED_CONTINUOUS)
+
+#define JOURNAL_HEADER_TAIL_ENTRY_BOOT_ID(h) \
+ FLAGS_SET(le32toh((h)->compatible_flags), HEADER_COMPATIBLE_TAIL_ENTRY_BOOT_ID)
+
+#define JOURNAL_HEADER_COMPRESSED_XZ(h) \
+ FLAGS_SET(le32toh((h)->incompatible_flags), HEADER_INCOMPATIBLE_COMPRESSED_XZ)
+
+#define JOURNAL_HEADER_COMPRESSED_LZ4(h) \
+ FLAGS_SET(le32toh((h)->incompatible_flags), HEADER_INCOMPATIBLE_COMPRESSED_LZ4)
+
+#define JOURNAL_HEADER_COMPRESSED_ZSTD(h) \
+ FLAGS_SET(le32toh((h)->incompatible_flags), HEADER_INCOMPATIBLE_COMPRESSED_ZSTD)
+
+#define JOURNAL_HEADER_KEYED_HASH(h) \
+ FLAGS_SET(le32toh((h)->incompatible_flags), HEADER_INCOMPATIBLE_KEYED_HASH)
+
+#define JOURNAL_HEADER_COMPACT(h) \
+ FLAGS_SET(le32toh((h)->incompatible_flags), HEADER_INCOMPATIBLE_COMPACT)
+
+int journal_file_move_to_object(JournalFile *f, ObjectType type, uint64_t offset, Object **ret);
+int journal_file_pin_object(JournalFile *f, Object *o);
+int journal_file_read_object_header(JournalFile *f, ObjectType type, uint64_t offset, Object *ret);
+
+int journal_file_tail_end_by_pread(JournalFile *f, uint64_t *ret_offset);
+int journal_file_tail_end_by_mmap(JournalFile *f, uint64_t *ret_offset);
+
+static inline uint64_t journal_file_entry_item_object_offset(JournalFile *f, Object *o, size_t i) {
+ assert(f);
+ assert(o);
+ return JOURNAL_HEADER_COMPACT(f->header) ? le32toh(o->entry.items.compact[i].object_offset) :
+ le64toh(o->entry.items.regular[i].object_offset);
+}
+
+static inline size_t journal_file_entry_item_size(JournalFile *f) {
+ assert(f);
+ return JOURNAL_HEADER_COMPACT(f->header) ? sizeof_field(Object, entry.items.compact[0]) :
+ sizeof_field(Object, entry.items.regular[0]);
+}
+
+uint64_t journal_file_entry_n_items(JournalFile *f, Object *o) _pure_;
+
+int journal_file_data_payload(
+ JournalFile *f,
+ Object *o,
+ uint64_t offset,
+ const char *field,
+ size_t field_length,
+ size_t data_threshold,
+ void **ret_data,
+ size_t *ret_size);
+
+static inline size_t journal_file_data_payload_offset(JournalFile *f) {
+ return JOURNAL_HEADER_COMPACT(f->header)
+ ? offsetof(Object, data.compact.payload)
+ : offsetof(Object, data.regular.payload);
+}
+
+static inline uint8_t* journal_file_data_payload_field(JournalFile *f, Object *o) {
+ return JOURNAL_HEADER_COMPACT(f->header) ? o->data.compact.payload : o->data.regular.payload;
+}
+
+uint64_t journal_file_entry_array_n_items(JournalFile *f, Object *o) _pure_;
+
+static inline uint64_t journal_file_entry_array_item(JournalFile *f, Object *o, size_t i) {
+ assert(f);
+ assert(o);
+ return JOURNAL_HEADER_COMPACT(f->header) ? le32toh(o->entry_array.items.compact[i]) :
+ le64toh(o->entry_array.items.regular[i]);
+}
+
+static inline size_t journal_file_entry_array_item_size(JournalFile *f) {
+ assert(f);
+ return JOURNAL_HEADER_COMPACT(f->header) ? sizeof(le32_t) : sizeof(le64_t);
+}
+
+uint64_t journal_file_hash_table_n_items(Object *o) _pure_;
+
+int journal_file_append_object(JournalFile *f, ObjectType type, uint64_t size, Object **ret_object, uint64_t *ret_offset);
+int journal_file_append_entry(
+ JournalFile *f,
+ const dual_timestamp *ts,
+ const sd_id128_t *boot_id,
+ const struct iovec iovec[],
+ size_t n_iovec,
+ uint64_t *seqnum,
+ sd_id128_t *seqnum_id,
+ Object **ret_object,
+ uint64_t *ret_offset);
+
+int journal_file_find_data_object(JournalFile *f, const void *data, uint64_t size, Object **ret_object, uint64_t *ret_offset);
+int journal_file_find_data_object_with_hash(JournalFile *f, const void *data, uint64_t size, uint64_t hash, Object **ret_object, uint64_t *ret_offset);
+
+int journal_file_find_field_object(JournalFile *f, const void *field, uint64_t size, Object **ret_object, uint64_t *ret_offset);
+int journal_file_find_field_object_with_hash(JournalFile *f, const void *field, uint64_t size, uint64_t hash, Object **ret_object, uint64_t *ret_offset);
+
+void journal_file_reset_location(JournalFile *f);
+void journal_file_save_location(JournalFile *f, Object *o, uint64_t offset);
+int journal_file_next_entry(JournalFile *f, uint64_t p, direction_t direction, Object **ret_object, uint64_t *ret_offset);
+
+int journal_file_move_to_entry_by_offset(JournalFile *f, uint64_t p, direction_t direction, Object **ret_object, uint64_t *ret_offset);
+int journal_file_move_to_entry_by_seqnum(JournalFile *f, uint64_t seqnum, direction_t direction, Object **ret_object, uint64_t *ret_offset);
+int journal_file_move_to_entry_by_realtime(JournalFile *f, uint64_t realtime, direction_t direction, Object **ret_object, uint64_t *ret_offset);
+int journal_file_move_to_entry_by_monotonic(JournalFile *f, sd_id128_t boot_id, uint64_t monotonic, direction_t direction, Object **ret_object, uint64_t *ret_offset);
+
+int journal_file_move_to_entry_for_data(JournalFile *f, Object *d, direction_t direction, Object **ret_object, uint64_t *ret_offset);
+
+int journal_file_move_to_entry_by_offset_for_data(JournalFile *f, Object *d, uint64_t p, direction_t direction, Object **ret_object, uint64_t *ret_offset);
+int journal_file_move_to_entry_by_seqnum_for_data(JournalFile *f, Object *d, uint64_t seqnum, direction_t direction, Object **ret_object, uint64_t *ret_offset);
+int journal_file_move_to_entry_by_realtime_for_data(JournalFile *f, Object *d, uint64_t realtime, direction_t direction, Object **ret_object, uint64_t *ret_offset);
+int journal_file_move_to_entry_by_monotonic_for_data(JournalFile *f, Object *d, sd_id128_t boot_id, uint64_t monotonic, direction_t direction, Object **ret_object, uint64_t *ret_offset);
+
+int journal_file_copy_entry(JournalFile *from, JournalFile *to, Object *o, uint64_t p, uint64_t *seqnum, sd_id128_t *seqnum_id);
+
+void journal_file_dump(JournalFile *f);
+void journal_file_print_header(JournalFile *f);
+
+int journal_file_archive(JournalFile *f, char **ret_previous_path);
+int journal_file_parse_uid_from_filename(const char *path, uid_t *uid);
+JournalFile* journal_initiate_close(JournalFile *f, Set *deferred_closes);
+
+int journal_file_dispose(int dir_fd, const char *fname);
+
+void journal_file_post_change(JournalFile *f);
+int journal_file_enable_post_change_timer(JournalFile *f, sd_event *e, usec_t t);
+
+void journal_reset_metrics(JournalMetrics *m);
+
+int journal_file_get_cutoff_realtime_usec(JournalFile *f, usec_t *ret_from, usec_t *ret_to);
+int journal_file_get_cutoff_monotonic_usec(JournalFile *f, sd_id128_t boot, usec_t *ret_from, usec_t *ret_to);
+
+bool journal_file_rotate_suggested(JournalFile *f, usec_t max_file_usec, int log_level);
+
+int journal_file_map_data_hash_table(JournalFile *f);
+int journal_file_map_field_hash_table(JournalFile *f);
+
+static inline Compression JOURNAL_FILE_COMPRESSION(JournalFile *f) {
+ assert(f);
+
+ if (JOURNAL_HEADER_COMPRESSED_XZ(f->header))
+ return COMPRESSION_XZ;
+ if (JOURNAL_HEADER_COMPRESSED_LZ4(f->header))
+ return COMPRESSION_LZ4;
+ if (JOURNAL_HEADER_COMPRESSED_ZSTD(f->header))
+ return COMPRESSION_ZSTD;
+ return COMPRESSION_NONE;
+}
+
+uint64_t journal_file_hash_data(JournalFile *f, const void *data, size_t sz);
+
+bool journal_field_valid(const char *p, size_t l, bool allow_protected);
+
+const char* journal_object_type_to_string(ObjectType type) _const_;
+
+static inline Compression COMPRESSION_FROM_OBJECT(const Object *o) {
+ assert(o);
+
+ switch (o->object.flags & _OBJECT_COMPRESSED_MASK) {
+ case 0:
+ return COMPRESSION_NONE;
+ case OBJECT_COMPRESSED_XZ:
+ return COMPRESSION_XZ;
+ case OBJECT_COMPRESSED_LZ4:
+ return COMPRESSION_LZ4;
+ case OBJECT_COMPRESSED_ZSTD:
+ return COMPRESSION_ZSTD;
+ default:
+ return _COMPRESSION_INVALID;
+ }
+}
+
+static inline uint8_t COMPRESSION_TO_OBJECT_FLAG(Compression c) {
+ switch (c) {
+ case COMPRESSION_XZ:
+ return OBJECT_COMPRESSED_XZ;
+ case COMPRESSION_LZ4:
+ return OBJECT_COMPRESSED_LZ4;
+ case COMPRESSION_ZSTD:
+ return OBJECT_COMPRESSED_ZSTD;
+ default:
+ return 0;
+ }
+}
+
+static inline uint32_t COMPRESSION_TO_HEADER_INCOMPATIBLE_FLAG(Compression c) {
+ switch (c) {
+ case COMPRESSION_XZ:
+ return HEADER_INCOMPATIBLE_COMPRESSED_XZ;
+ case COMPRESSION_LZ4:
+ return HEADER_INCOMPATIBLE_COMPRESSED_LZ4;
+ case COMPRESSION_ZSTD:
+ return HEADER_INCOMPATIBLE_COMPRESSED_ZSTD;
+ default:
+ return 0;
+ }
+}
+
+static inline bool journal_file_writable(JournalFile *f) {
+ assert(f);
+ return (f->open_flags & O_ACCMODE) != O_RDONLY;
+}
diff --git a/src/libsystemd/sd-journal/journal-internal.h b/src/libsystemd/sd-journal/journal-internal.h
new file mode 100644
index 0000000..259aac8
--- /dev/null
+++ b/src/libsystemd/sd-journal/journal-internal.h
@@ -0,0 +1,142 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+#pragma once
+
+#include <inttypes.h>
+#include <stdbool.h>
+#include <sys/types.h>
+
+#include "sd-id128.h"
+#include "sd-journal.h"
+
+#include "hashmap.h"
+#include "journal-def.h"
+#include "journal-file.h"
+#include "list.h"
+#include "set.h"
+
+#define JOURNAL_FILES_MAX 7168u
+
+#define JOURNAL_LOG_RATELIMIT ((const RateLimit) { .interval = 60 * USEC_PER_SEC, .burst = 3 })
+
+typedef struct Match Match;
+typedef struct Location Location;
+typedef struct Directory Directory;
+
+typedef enum MatchType {
+ MATCH_DISCRETE,
+ MATCH_OR_TERM,
+ MATCH_AND_TERM
+} MatchType;
+
+struct Match {
+ MatchType type;
+ Match *parent;
+ LIST_FIELDS(Match, matches);
+
+ /* For concrete matches */
+ char *data;
+ size_t size;
+ uint64_t hash; /* old-style jenkins hash. New-style siphash is different per file, hence won't be cached here */
+
+ /* For terms */
+ LIST_HEAD(Match, matches);
+};
+
+struct Location {
+ LocationType type;
+
+ bool seqnum_set:1;
+ bool realtime_set:1;
+ bool monotonic_set:1;
+ bool xor_hash_set:1;
+
+ uint64_t seqnum;
+ sd_id128_t seqnum_id;
+
+ uint64_t realtime;
+
+ uint64_t monotonic;
+ sd_id128_t boot_id;
+
+ uint64_t xor_hash;
+};
+
+struct Directory {
+ char *path;
+ int wd;
+ bool is_root;
+ unsigned last_seen_generation;
+};
+
+struct sd_journal {
+ int toplevel_fd;
+
+ char *path;
+ char *prefix;
+ char *namespace;
+
+ OrderedHashmap *files;
+ IteratedCache *files_cache;
+ MMapCache *mmap;
+ Hashmap *newest_by_boot_id; /* key: boot_id, value: prioq, ordered by monotonic timestamp of last update */
+
+ Location current_location;
+
+ JournalFile *current_file;
+ uint64_t current_field;
+
+ Match *level0, *level1, *level2;
+
+ uint64_t origin_id;
+
+ int inotify_fd;
+ unsigned current_invalidate_counter, last_invalidate_counter;
+ usec_t last_process_usec;
+ unsigned generation;
+
+ /* Iterating through unique fields and their data values */
+ char *unique_field;
+ JournalFile *unique_file;
+ uint64_t unique_offset;
+
+ /* Iterating through known fields */
+ JournalFile *fields_file;
+ uint64_t fields_offset;
+ uint64_t fields_hash_table_index;
+ char *fields_buffer;
+
+ int flags;
+
+ bool on_network:1;
+ bool no_new_files:1;
+ bool no_inotify:1;
+ bool unique_file_lost:1; /* File we were iterating over got
+ removed, and there were no more
+ files, so sd_j_enumerate_unique
+ will return a value equal to 0. */
+ bool fields_file_lost:1;
+ bool has_runtime_files:1;
+ bool has_persistent_files:1;
+
+ size_t data_threshold;
+
+ Hashmap *directories_by_path;
+ Hashmap *directories_by_wd;
+
+ Hashmap *errors;
+};
+
+char *journal_make_match_string(sd_journal *j);
+void journal_print_header(sd_journal *j);
+
+#define JOURNAL_FOREACH_DATA_RETVAL(j, data, l, retval) \
+ for (sd_journal_restart_data(j); ((retval) = sd_journal_enumerate_data((j), &(data), &(l))) > 0; )
+
+/* All errors that we might encounter while extracting a field that are not real errors,
+ * but only mean that the field is too large or we don't support the compression. */
+static inline bool JOURNAL_ERRNO_IS_UNAVAILABLE_FIELD(int r) {
+ return IN_SET(abs(r),
+ ENOBUFS, /* Field or decompressed field too large */
+ E2BIG, /* Field too large for pointer width */
+ EPROTONOSUPPORT); /* Unsupported compression */
+}
diff --git a/src/libsystemd/sd-journal/journal-send.c b/src/libsystemd/sd-journal/journal-send.c
new file mode 100644
index 0000000..be23b2f
--- /dev/null
+++ b/src/libsystemd/sd-journal/journal-send.c
@@ -0,0 +1,576 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+
+#include <errno.h>
+#include <fcntl.h>
+#include <printf.h>
+#include <stddef.h>
+#include <sys/un.h>
+#include <unistd.h>
+#if HAVE_VALGRIND_VALGRIND_H
+# include <valgrind/valgrind.h>
+#endif
+
+#define SD_JOURNAL_SUPPRESS_LOCATION
+
+#include "sd-journal.h"
+
+#include "alloc-util.h"
+#include "errno-util.h"
+#include "fd-util.h"
+#include "fileio.h"
+#include "io-util.h"
+#include "iovec-util.h"
+#include "journal-send.h"
+#include "memfd-util.h"
+#include "missing_syscall.h"
+#include "process-util.h"
+#include "socket-util.h"
+#include "stdio-util.h"
+#include "string-util.h"
+#include "tmpfile-util.h"
+
+#define SNDBUF_SIZE (8*1024*1024)
+
+#define ALLOCA_CODE_FUNC(f, func) \
+ do { \
+ size_t _fl; \
+ const char *_func = (func); \
+ char **_f = &(f); \
+ _fl = strlen(_func) + 1; \
+ *_f = newa(char, _fl + 10); \
+ memcpy(*_f, "CODE_FUNC=", 10); \
+ memcpy(*_f + 10, _func, _fl); \
+ } while (false)
+
+/* We open a single fd, and we'll share it with the current process,
+ * all its threads, and all its subprocesses. This means we need to
+ * initialize it atomically, and need to operate on it atomically
+ * never assuming we are the only user */
+static int fd_plus_one = 0;
+
+static int journal_fd(void) {
+ int fd;
+
+retry:
+ if (fd_plus_one > 0)
+ return fd_plus_one - 1;
+
+ fd = socket(AF_UNIX, SOCK_DGRAM|SOCK_CLOEXEC, 0);
+ if (fd < 0)
+ return -errno;
+
+ fd_inc_sndbuf(fd, SNDBUF_SIZE);
+
+ if (!__atomic_compare_exchange_n(&fd_plus_one, &(int){0}, fd+1,
+ false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) {
+ safe_close(fd);
+ goto retry;
+ }
+
+ return fd;
+}
+
+int journal_fd_nonblock(bool nonblock) {
+ int r;
+
+ r = journal_fd();
+ if (r < 0)
+ return r;
+
+ return fd_nonblock(r, nonblock);
+}
+
+void close_journal_fd(void) {
+#if HAVE_VALGRIND_VALGRIND_H
+ /* Be nice to valgrind. This is not atomic, so it is useful mainly for debugging. */
+
+ if (!RUNNING_ON_VALGRIND)
+ return;
+
+ if (getpid_cached() != gettid())
+ return;
+
+ if (fd_plus_one <= 0)
+ return;
+
+ safe_close(fd_plus_one - 1);
+ fd_plus_one = 0;
+#endif
+}
+
+_public_ int sd_journal_print(int priority, const char *format, ...) {
+ int r;
+ va_list ap;
+
+ va_start(ap, format);
+ r = sd_journal_printv(priority, format, ap);
+ va_end(ap);
+
+ return r;
+}
+
+_public_ int sd_journal_printv(int priority, const char *format, va_list ap) {
+ char p[STRLEN("PRIORITY=") + DECIMAL_STR_MAX(int) + 1];
+ char sbuf[LINE_MAX + 8] = "MESSAGE=";
+ struct iovec iov[2];
+ int len;
+ va_list aq;
+ char *buffer = sbuf;
+
+ assert_return(priority >= 0, -EINVAL);
+ assert_return(priority <= 7, -EINVAL);
+ assert_return(format, -EINVAL);
+
+ xsprintf(p, "PRIORITY=%i", priority & LOG_PRIMASK);
+
+ va_copy(aq, ap);
+ len = vsnprintf(buffer + 8, LINE_MAX, format, aq);
+ va_end(aq);
+
+ if (len >= (int)LONG_LINE_MAX - 8)
+ return -ENOBUFS;
+
+ /* Allocate large buffer to accommodate big message */
+ if (len >= LINE_MAX) {
+ buffer = alloca_safe(len + 9);
+ memcpy(buffer, "MESSAGE=", 8);
+ assert_se(vsnprintf(buffer + 8, len + 1, format, ap) == len);
+ }
+
+ /* Strip trailing whitespace, keep prefix whitespace. */
+ (void) strstrip(buffer);
+
+ /* Suppress empty lines */
+ if (isempty(buffer + 8))
+ return 0;
+
+ iov[0] = IOVEC_MAKE_STRING(buffer);
+ iov[1] = IOVEC_MAKE_STRING(p);
+
+ return sd_journal_sendv(iov, 2);
+}
+
+_printf_(1, 0) static int fill_iovec_sprintf(
+ const char *format,
+ va_list ap,
+ size_t extra,
+ struct iovec **ret_iov,
+ size_t *ret_n_iov) {
+
+ PROTECT_ERRNO;
+ struct iovec *iov = NULL;
+ size_t n = 0;
+
+ assert(ret_iov);
+ assert(ret_n_iov);
+
+ if (extra > 0) {
+ if (!GREEDY_REALLOC0(iov, extra))
+ return -ENOMEM;
+
+ n = extra;
+ }
+
+ CLEANUP_ARRAY(iov, n, iovec_array_free);
+
+ while (format) {
+ _cleanup_free_ char *buffer = NULL;
+ va_list aq;
+
+ va_copy(aq, ap);
+ if (vasprintf(&buffer, format, aq) < 0) {
+ va_end(aq);
+ return -ENOMEM;
+ }
+ va_end(aq);
+
+ VA_FORMAT_ADVANCE(format, ap);
+ format = va_arg(ap, char *);
+
+ if (!GREEDY_REALLOC(iov, n + 1))
+ return -ENOMEM;
+
+ /* strip trailing whitespace, keep prefixing whitespace */
+ iov[n++] = IOVEC_MAKE_STRING(delete_trailing_chars(TAKE_PTR(buffer), NULL));
+ }
+
+ *ret_iov = TAKE_PTR(iov);
+ *ret_n_iov = n;
+ return 0;
+}
+
+_public_ int sd_journal_send(const char *format, ...) {
+ struct iovec *iov = NULL;
+ size_t n_iov = 0;
+ va_list ap;
+ int r;
+
+ CLEANUP_ARRAY(iov, n_iov, iovec_array_free);
+
+ va_start(ap, format);
+ r = fill_iovec_sprintf(format, ap, 0, &iov, &n_iov);
+ va_end(ap);
+ if (r < 0)
+ return r;
+
+ return sd_journal_sendv(iov, n_iov);
+}
+
+_public_ int sd_journal_sendv(const struct iovec *iov, int n) {
+ PROTECT_ERRNO;
+ int fd, r;
+ _cleanup_close_ int buffer_fd = -EBADF;
+ struct iovec *w;
+ uint64_t *l;
+ int i, j = 0;
+ static const union sockaddr_union sa = {
+ .un.sun_family = AF_UNIX,
+ .un.sun_path = "/run/systemd/journal/socket",
+ };
+ struct msghdr mh = {
+ .msg_name = (struct sockaddr*) &sa.sa,
+ .msg_namelen = SOCKADDR_UN_LEN(sa.un),
+ };
+ ssize_t k;
+ bool have_syslog_identifier = false;
+ bool seal = true;
+
+ assert_return(iov, -EINVAL);
+ assert_return(n > 0, -EINVAL);
+
+ w = newa(struct iovec, n * 5 + 3);
+ l = newa(uint64_t, n);
+
+ for (i = 0; i < n; i++) {
+ char *c, *nl;
+
+ if (_unlikely_(!iov[i].iov_base || iov[i].iov_len <= 1))
+ return -EINVAL;
+
+ c = memchr(iov[i].iov_base, '=', iov[i].iov_len);
+ if (_unlikely_(!c || c == iov[i].iov_base))
+ return -EINVAL;
+
+ have_syslog_identifier = have_syslog_identifier ||
+ (c == (char *) iov[i].iov_base + 17 &&
+ startswith(iov[i].iov_base, "SYSLOG_IDENTIFIER"));
+
+ nl = memchr(iov[i].iov_base, '\n', iov[i].iov_len);
+ if (nl) {
+ if (_unlikely_(nl < c))
+ return -EINVAL;
+
+ /* Already includes a newline? Bummer, then
+ * let's write the variable name, then a
+ * newline, then the size (64-bit LE), followed
+ * by the data and a final newline */
+
+ w[j++] = IOVEC_MAKE(iov[i].iov_base, c - (char*) iov[i].iov_base);
+ w[j++] = IOVEC_MAKE_STRING("\n");
+
+ l[i] = htole64(iov[i].iov_len - (c - (char*) iov[i].iov_base) - 1);
+ w[j++] = IOVEC_MAKE(&l[i], sizeof(uint64_t));
+
+ w[j++] = IOVEC_MAKE(c + 1, iov[i].iov_len - (c - (char*) iov[i].iov_base) - 1);
+ } else
+ /* Nothing special? Then just add the line and
+ * append a newline */
+ w[j++] = iov[i];
+
+ w[j++] = IOVEC_MAKE_STRING("\n");
+ }
+
+ if (!have_syslog_identifier &&
+ string_is_safe(program_invocation_short_name)) {
+
+ /* Implicitly add program_invocation_short_name, if it
+ * is not set explicitly. We only do this for
+ * program_invocation_short_name, and nothing else
+ * since everything else is much nicer to retrieve
+ * from the outside. */
+
+ w[j++] = IOVEC_MAKE_STRING("SYSLOG_IDENTIFIER=");
+ w[j++] = IOVEC_MAKE_STRING(program_invocation_short_name);
+ w[j++] = IOVEC_MAKE_STRING("\n");
+ }
+
+ fd = journal_fd();
+ if (_unlikely_(fd < 0))
+ return fd;
+
+ mh.msg_iov = w;
+ mh.msg_iovlen = j;
+
+ k = sendmsg(fd, &mh, MSG_NOSIGNAL);
+ if (k >= 0)
+ return 0;
+
+ /* Fail silently if the journal is not available */
+ if (errno == ENOENT)
+ return 0;
+
+ if (!IN_SET(errno, EMSGSIZE, ENOBUFS, EAGAIN))
+ return -errno;
+
+ /* Message doesn't fit... Let's dump the data in a memfd or
+ * temporary file and just pass a file descriptor of it to the
+ * other side.
+ *
+ * For the temporary files we use /dev/shm instead of /tmp
+ * here, since we want this to be a tmpfs, and one that is
+ * available from early boot on and where unprivileged users
+ * can create files. */
+ buffer_fd = memfd_new(NULL);
+ if (buffer_fd < 0) {
+ if (buffer_fd == -ENOSYS) {
+ buffer_fd = open_tmpfile_unlinkable("/dev/shm", O_RDWR | O_CLOEXEC);
+ if (buffer_fd < 0)
+ return buffer_fd;
+
+ seal = false;
+ } else
+ return buffer_fd;
+ }
+
+ n = writev(buffer_fd, w, j);
+ if (n < 0)
+ return -errno;
+
+ if (seal) {
+ r = memfd_set_sealed(buffer_fd);
+ if (r < 0)
+ return r;
+ }
+
+ r = send_one_fd_sa(fd, buffer_fd, mh.msg_name, mh.msg_namelen, 0);
+ if (r == -ENOENT)
+ /* Fail silently if the journal is not available */
+ return 0;
+ return r;
+}
+
+static int fill_iovec_perror_and_send(const char *message, int skip, struct iovec iov[]) {
+ PROTECT_ERRNO;
+ size_t n, k;
+
+ k = isempty(message) ? 0 : strlen(message) + 2;
+ n = 8 + k + 256 + 1;
+
+ for (;;) {
+ char buffer[n];
+ char* j;
+
+ errno = 0;
+ j = strerror_r(_saved_errno_, buffer + 8 + k, n - 8 - k);
+ if (errno == 0) {
+ char error[STRLEN("ERRNO=") + DECIMAL_STR_MAX(int) + 1];
+
+ if (j != buffer + 8 + k)
+ memmove(buffer + 8 + k, j, strlen(j)+1);
+
+ memcpy(buffer, "MESSAGE=", 8);
+
+ if (k > 0) {
+ memcpy(buffer + 8, message, k - 2);
+ memcpy(buffer + 8 + k - 2, ": ", 2);
+ }
+
+ xsprintf(error, "ERRNO=%i", _saved_errno_);
+
+ assert_cc(3 == LOG_ERR);
+ iov[skip+0] = IOVEC_MAKE_STRING("PRIORITY=3");
+ iov[skip+1] = IOVEC_MAKE_STRING(buffer);
+ iov[skip+2] = IOVEC_MAKE_STRING(error);
+
+ return sd_journal_sendv(iov, skip + 3);
+ }
+
+ if (errno != ERANGE)
+ return -errno;
+
+ n *= 2;
+ }
+}
+
+_public_ int sd_journal_perror(const char *message) {
+ struct iovec iovec[3];
+
+ return fill_iovec_perror_and_send(message, 0, iovec);
+}
+
+_public_ int sd_journal_stream_fd(const char *identifier, int priority, int level_prefix) {
+ _cleanup_close_ int fd = -EBADF;
+ char *header;
+ size_t l;
+ int r;
+
+ assert_return(priority >= 0, -EINVAL);
+ assert_return(priority <= 7, -EINVAL);
+
+ fd = socket(AF_UNIX, SOCK_STREAM|SOCK_CLOEXEC, 0);
+ if (fd < 0)
+ return -errno;
+
+ r = connect_unix_path(fd, AT_FDCWD, "/run/systemd/journal/stdout");
+ if (r < 0)
+ return r;
+
+ if (shutdown(fd, SHUT_RD) < 0)
+ return -errno;
+
+ (void) fd_inc_sndbuf(fd, SNDBUF_SIZE);
+
+ identifier = strempty(identifier);
+
+ l = strlen(identifier);
+ header = newa(char, l + 1 + 1 + 2 + 2 + 2 + 2 + 2);
+
+ memcpy(header, identifier, l);
+ header[l++] = '\n';
+ header[l++] = '\n'; /* unit id */
+ header[l++] = '0' + priority;
+ header[l++] = '\n';
+ header[l++] = '0' + !!level_prefix;
+ header[l++] = '\n';
+ header[l++] = '0';
+ header[l++] = '\n';
+ header[l++] = '0';
+ header[l++] = '\n';
+ header[l++] = '0';
+ header[l++] = '\n';
+
+ r = loop_write(fd, header, l);
+ if (r < 0)
+ return r;
+
+ return TAKE_FD(fd);
+}
+
+_public_ int sd_journal_print_with_location(int priority, const char *file, const char *line, const char *func, const char *format, ...) {
+ int r;
+ va_list ap;
+
+ va_start(ap, format);
+ r = sd_journal_printv_with_location(priority, file, line, func, format, ap);
+ va_end(ap);
+
+ return r;
+}
+
+_public_ int sd_journal_printv_with_location(int priority, const char *file, const char *line, const char *func, const char *format, va_list ap) {
+ char p[STRLEN("PRIORITY=") + DECIMAL_STR_MAX(int) + 1];
+ char sbuf[LINE_MAX + 8] = "MESSAGE=";
+ struct iovec iov[5];
+ char *f;
+ int len;
+ char *buffer = sbuf;
+ va_list aq;
+
+ assert_return(priority >= 0, -EINVAL);
+ assert_return(priority <= 7, -EINVAL);
+ assert_return(format, -EINVAL);
+
+ xsprintf(p, "PRIORITY=%i", priority & LOG_PRIMASK);
+
+ va_copy(aq, ap);
+ len = vsnprintf(buffer + 8, LINE_MAX, format, aq);
+ va_end(aq);
+
+ if (len >= (int)LONG_LINE_MAX - 8)
+ return -ENOBUFS;
+
+ /* Allocate large buffer to accommodate big message */
+ if (len >= LINE_MAX) {
+ buffer = alloca_safe(len + 9);
+ memcpy(buffer, "MESSAGE=", 8);
+ assert_se(vsnprintf(buffer + 8, len + 1, format, ap) == len);
+ }
+
+ /* Strip trailing whitespace, keep prefixing whitespace */
+ (void) strstrip(buffer);
+
+ /* Suppress empty lines */
+ if (isempty(buffer + 8))
+ return 0;
+
+ /* func is initialized from __func__ which is not a macro, but
+ * a static const char[], hence cannot easily be prefixed with
+ * CODE_FUNC=, hence let's do it manually here. */
+ ALLOCA_CODE_FUNC(f, func);
+
+ iov[0] = IOVEC_MAKE_STRING(buffer);
+ iov[1] = IOVEC_MAKE_STRING(p);
+ iov[2] = IOVEC_MAKE_STRING(file);
+ iov[3] = IOVEC_MAKE_STRING(line);
+ iov[4] = IOVEC_MAKE_STRING(f);
+
+ return sd_journal_sendv(iov, ELEMENTSOF(iov));
+}
+
+_public_ int sd_journal_send_with_location(const char *file, const char *line, const char *func, const char *format, ...) {
+ struct iovec *iov = NULL;
+ size_t n_iov = 0;
+ va_list ap;
+ char *f;
+ int r;
+
+ CLEANUP_ARRAY(iov, n_iov, iovec_array_free);
+
+ va_start(ap, format);
+ r = fill_iovec_sprintf(format, ap, 3, &iov, &n_iov);
+ va_end(ap);
+ if (r < 0)
+ return r;
+
+ ALLOCA_CODE_FUNC(f, func);
+
+ iov[0] = IOVEC_MAKE_STRING(file);
+ iov[1] = IOVEC_MAKE_STRING(line);
+ iov[2] = IOVEC_MAKE_STRING(f);
+
+ r = sd_journal_sendv(iov, n_iov);
+
+ iov[0] = iov[1] = iov[2] = (struct iovec) {};
+
+ return r;
+}
+
+_public_ int sd_journal_sendv_with_location(
+ const char *file, const char *line,
+ const char *func,
+ const struct iovec *iov, int n) {
+
+ struct iovec *niov;
+ char *f;
+
+ assert_return(iov, -EINVAL);
+ assert_return(n > 0, -EINVAL);
+
+ niov = newa(struct iovec, n + 3);
+ memcpy(niov, iov, sizeof(struct iovec) * n);
+
+ ALLOCA_CODE_FUNC(f, func);
+
+ niov[n++] = IOVEC_MAKE_STRING(file);
+ niov[n++] = IOVEC_MAKE_STRING(line);
+ niov[n++] = IOVEC_MAKE_STRING(f);
+
+ return sd_journal_sendv(niov, n);
+}
+
+_public_ int sd_journal_perror_with_location(
+ const char *file, const char *line,
+ const char *func,
+ const char *message) {
+
+ struct iovec iov[6];
+ char *f;
+
+ ALLOCA_CODE_FUNC(f, func);
+
+ iov[0] = IOVEC_MAKE_STRING(file);
+ iov[1] = IOVEC_MAKE_STRING(line);
+ iov[2] = IOVEC_MAKE_STRING(f);
+
+ return fill_iovec_perror_and_send(message, 3, iov);
+}
diff --git a/src/libsystemd/sd-journal/journal-send.h b/src/libsystemd/sd-journal/journal-send.h
new file mode 100644
index 0000000..24315e2
--- /dev/null
+++ b/src/libsystemd/sd-journal/journal-send.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+#pragma once
+
+#include <stdbool.h>
+
+int journal_fd_nonblock(bool nonblock);
+void close_journal_fd(void);
diff --git a/src/libsystemd/sd-journal/journal-vacuum.c b/src/libsystemd/sd-journal/journal-vacuum.c
new file mode 100644
index 0000000..829edb3
--- /dev/null
+++ b/src/libsystemd/sd-journal/journal-vacuum.c
@@ -0,0 +1,330 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+
+#include <fcntl.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+#include "sd-id128.h"
+
+#include "alloc-util.h"
+#include "dirent-util.h"
+#include "fd-util.h"
+#include "format-util.h"
+#include "fs-util.h"
+#include "journal-def.h"
+#include "journal-file.h"
+#include "journal-internal.h"
+#include "journal-vacuum.h"
+#include "sort-util.h"
+#include "string-util.h"
+#include "time-util.h"
+#include "xattr-util.h"
+
+typedef struct vacuum_info {
+ uint64_t usage;
+ char *filename;
+
+ uint64_t realtime;
+
+ sd_id128_t seqnum_id;
+ uint64_t seqnum;
+ bool have_seqnum;
+} vacuum_info;
+
+static int vacuum_info_compare(const vacuum_info *a, const vacuum_info *b) {
+ int r;
+
+ if (a->have_seqnum && b->have_seqnum &&
+ sd_id128_equal(a->seqnum_id, b->seqnum_id))
+ return CMP(a->seqnum, b->seqnum);
+
+ r = CMP(a->realtime, b->realtime);
+ if (r != 0)
+ return r;
+
+ if (a->have_seqnum && b->have_seqnum)
+ return memcmp(&a->seqnum_id, &b->seqnum_id, 16);
+
+ return strcmp(a->filename, b->filename);
+}
+
+static void vacuum_info_array_free(vacuum_info *list, size_t n) {
+ if (!list)
+ return;
+
+ FOREACH_ARRAY(i, list, n)
+ free(i->filename);
+
+ free(list);
+}
+
+static void patch_realtime(
+ int fd,
+ const char *fn,
+ const struct stat *st,
+ unsigned long long *realtime) {
+
+ usec_t x;
+
+ /* The timestamp was determined by the file name, but let's see if the file might actually be older
+ * than the file name suggested... */
+
+ assert(fd >= 0);
+ assert(fn);
+ assert(st);
+ assert(realtime);
+
+ x = timespec_load(&st->st_ctim);
+ if (timestamp_is_set(x) && x < *realtime)
+ *realtime = x;
+
+ x = timespec_load(&st->st_atim);
+ if (timestamp_is_set(x) && x < *realtime)
+ *realtime = x;
+
+ x = timespec_load(&st->st_mtim);
+ if (timestamp_is_set(x) && x < *realtime)
+ *realtime = x;
+
+ /* Let's read the original creation time, if possible. Ideally we'd just query the creation time the
+ * FS might provide, but unfortunately there's currently no sane API to query it. Hence let's
+ * implement this manually... */
+
+ if (fd_getcrtime_at(fd, fn, AT_SYMLINK_FOLLOW, &x) >= 0 && x < *realtime)
+ *realtime = x;
+}
+
+static int journal_file_empty(int dir_fd, const char *name) {
+ _cleanup_close_ int fd = -EBADF;
+ struct stat st;
+ le64_t n_entries;
+ ssize_t n;
+
+ fd = openat(dir_fd, name, O_RDONLY|O_CLOEXEC|O_NOFOLLOW|O_NONBLOCK|O_NOATIME);
+ if (fd < 0) {
+ /* Maybe failed due to O_NOATIME and lack of privileges? */
+ fd = openat(dir_fd, name, O_RDONLY|O_CLOEXEC|O_NOFOLLOW|O_NONBLOCK);
+ if (fd < 0)
+ return -errno;
+ }
+
+ if (fstat(fd, &st) < 0)
+ return -errno;
+
+ /* If an offline file doesn't even have a header we consider it empty */
+ if (st.st_size < (off_t) sizeof(Header))
+ return 1;
+
+ /* If the number of entries is empty, we consider it empty, too */
+ n = pread(fd, &n_entries, sizeof(n_entries), offsetof(Header, n_entries));
+ if (n < 0)
+ return -errno;
+ if (n != sizeof(n_entries))
+ return -EIO;
+
+ return le64toh(n_entries) <= 0;
+}
+
+int journal_directory_vacuum(
+ const char *directory,
+ uint64_t max_use,
+ uint64_t n_max_files,
+ usec_t max_retention_usec,
+ usec_t *oldest_usec,
+ bool verbose) {
+
+ uint64_t sum = 0, freed = 0, n_active_files = 0;
+ size_t n_list = 0, i;
+ _cleanup_closedir_ DIR *d = NULL;
+ vacuum_info *list = NULL;
+ usec_t retention_limit = 0;
+ int r;
+
+ CLEANUP_ARRAY(list, n_list, vacuum_info_array_free);
+
+ assert(directory);
+
+ if (max_use <= 0 && max_retention_usec <= 0 && n_max_files <= 0)
+ return 0;
+
+ if (max_retention_usec > 0)
+ retention_limit = usec_sub_unsigned(now(CLOCK_REALTIME), max_retention_usec);
+
+ d = opendir(directory);
+ if (!d)
+ return -errno;
+
+ FOREACH_DIRENT_ALL(de, d, return -errno) {
+ unsigned long long seqnum = 0, realtime;
+ _cleanup_free_ char *p = NULL;
+ sd_id128_t seqnum_id;
+ bool have_seqnum;
+ uint64_t size;
+ struct stat st;
+ size_t q;
+
+ if (fstatat(dirfd(d), de->d_name, &st, AT_SYMLINK_NOFOLLOW) < 0) {
+ log_debug_errno(errno, "Failed to stat file %s while vacuuming, ignoring: %m", de->d_name);
+ continue;
+ }
+
+ if (!S_ISREG(st.st_mode))
+ continue;
+
+ size = 512UL * (uint64_t) st.st_blocks;
+
+ q = strlen(de->d_name);
+
+ if (endswith(de->d_name, ".journal")) {
+
+ /* Vacuum archived files. Active files are
+ * left around */
+
+ if (q < 1 + 32 + 1 + 16 + 1 + 16 + 8) {
+ n_active_files++;
+ sum += size;
+ continue;
+ }
+
+ if (de->d_name[q-8-16-1] != '-' ||
+ de->d_name[q-8-16-1-16-1] != '-' ||
+ de->d_name[q-8-16-1-16-1-32-1] != '@') {
+ n_active_files++;
+ sum += size;
+ continue;
+ }
+
+ p = strdup(de->d_name);
+ if (!p)
+ return -ENOMEM;
+
+ de->d_name[q-8-16-1-16-1] = 0;
+ if (sd_id128_from_string(de->d_name + q-8-16-1-16-1-32, &seqnum_id) < 0) {
+ n_active_files++;
+ sum += size;
+ continue;
+ }
+
+ if (sscanf(de->d_name + q-8-16-1-16, "%16llx-%16llx.journal", &seqnum, &realtime) != 2) {
+ n_active_files++;
+ sum += size;
+ continue;
+ }
+
+ have_seqnum = true;
+
+ } else if (endswith(de->d_name, ".journal~")) {
+ unsigned long long tmp;
+
+ /* seqnum_id won't be initialised before use below, so set to 0 */
+ seqnum_id = SD_ID128_NULL;
+
+ /* Vacuum corrupted files */
+
+ if (q < 1 + 16 + 1 + 16 + 8 + 1) {
+ n_active_files++;
+ sum += size;
+ continue;
+ }
+
+ if (de->d_name[q-1-8-16-1] != '-' ||
+ de->d_name[q-1-8-16-1-16-1] != '@') {
+ n_active_files++;
+ sum += size;
+ continue;
+ }
+
+ p = strdup(de->d_name);
+ if (!p)
+ return -ENOMEM;
+
+ if (sscanf(de->d_name + q-1-8-16-1-16, "%16llx-%16llx.journal~", &realtime, &tmp) != 2) {
+ n_active_files++;
+ sum += size;
+ continue;
+ }
+
+ have_seqnum = false;
+ } else {
+ /* We do not vacuum unknown files! */
+ log_debug("Not vacuuming unknown file %s.", de->d_name);
+ continue;
+ }
+
+ r = journal_file_empty(dirfd(d), p);
+ if (r < 0) {
+ log_debug_errno(r, "Failed check if %s is empty, ignoring: %m", p);
+ continue;
+ }
+ if (r > 0) {
+ /* Always vacuum empty non-online files. */
+
+ r = unlinkat_deallocate(dirfd(d), p, 0);
+ if (r >= 0) {
+
+ log_full(verbose ? LOG_INFO : LOG_DEBUG,
+ "Deleted empty archived journal %s/%s (%s).", directory, p, FORMAT_BYTES(size));
+
+ freed += size;
+ } else if (r != -ENOENT)
+ log_ratelimit_warning_errno(r, JOURNAL_LOG_RATELIMIT,
+ "Failed to delete empty archived journal %s/%s: %m",
+ directory, p);
+
+ continue;
+ }
+
+ patch_realtime(dirfd(d), p, &st, &realtime);
+
+ if (!GREEDY_REALLOC(list, n_list + 1))
+ return -ENOMEM;
+
+ list[n_list++] = (vacuum_info) {
+ .filename = TAKE_PTR(p),
+ .usage = size,
+ .seqnum = seqnum,
+ .realtime = realtime,
+ .seqnum_id = seqnum_id,
+ .have_seqnum = have_seqnum,
+ };
+
+ sum += size;
+ }
+
+ typesafe_qsort(list, n_list, vacuum_info_compare);
+
+ for (i = 0; i < n_list; i++) {
+ uint64_t left;
+
+ left = n_active_files + n_list - i;
+
+ if ((max_retention_usec <= 0 || list[i].realtime >= retention_limit) &&
+ (max_use <= 0 || sum <= max_use) &&
+ (n_max_files <= 0 || left <= n_max_files))
+ break;
+
+ r = unlinkat_deallocate(dirfd(d), list[i].filename, 0);
+ if (r >= 0) {
+ log_full(verbose ? LOG_INFO : LOG_DEBUG, "Deleted archived journal %s/%s (%s).",
+ directory, list[i].filename, FORMAT_BYTES(list[i].usage));
+ freed += list[i].usage;
+
+ if (list[i].usage < sum)
+ sum -= list[i].usage;
+ else
+ sum = 0;
+
+ } else if (r != -ENOENT)
+ log_ratelimit_warning_errno(r, JOURNAL_LOG_RATELIMIT,
+ "Failed to delete archived journal %s/%s: %m",
+ directory, list[i].filename);
+ }
+
+ if (oldest_usec && i < n_list && (*oldest_usec == 0 || list[i].realtime < *oldest_usec))
+ *oldest_usec = list[i].realtime;
+
+ log_full(verbose ? LOG_INFO : LOG_DEBUG, "Vacuuming done, freed %s of archived journals from %s.",
+ FORMAT_BYTES(freed), directory);
+
+ return 0;
+}
diff --git a/src/libsystemd/sd-journal/journal-vacuum.h b/src/libsystemd/sd-journal/journal-vacuum.h
new file mode 100644
index 0000000..d87c847
--- /dev/null
+++ b/src/libsystemd/sd-journal/journal-vacuum.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+#pragma once
+
+#include <inttypes.h>
+#include <stdbool.h>
+
+#include "time-util.h"
+
+int journal_directory_vacuum(const char *directory, uint64_t max_use, uint64_t n_max_files, usec_t max_retention_usec, usec_t *oldest_usec, bool verbose);
diff --git a/src/libsystemd/sd-journal/journal-verify.c b/src/libsystemd/sd-journal/journal-verify.c
new file mode 100644
index 0000000..bdaa01d
--- /dev/null
+++ b/src/libsystemd/sd-journal/journal-verify.c
@@ -0,0 +1,1436 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+
+#include <fcntl.h>
+#include <stddef.h>
+#include <sys/mman.h>
+#include <unistd.h>
+
+#include "alloc-util.h"
+#include "compress.h"
+#include "fd-util.h"
+#include "fileio.h"
+#include "fs-util.h"
+#include "journal-authenticate.h"
+#include "journal-def.h"
+#include "journal-file.h"
+#include "journal-verify.h"
+#include "lookup3.h"
+#include "macro.h"
+#include "terminal-util.h"
+#include "tmpfile-util.h"
+
+static void draw_progress(uint64_t p, usec_t *last_usec) {
+ unsigned n, i, j, k;
+ usec_t z, x;
+
+ if (!on_tty())
+ return;
+
+ z = now(CLOCK_MONOTONIC);
+ x = *last_usec;
+
+ if (x != 0 && x + 40 * USEC_PER_MSEC > z)
+ return;
+
+ *last_usec = z;
+
+ n = (3 * columns()) / 4;
+ j = (n * (unsigned) p) / 65535ULL;
+ k = n - j;
+
+ fputs("\r", stdout);
+ if (colors_enabled())
+ fputs("\x1B[?25l", stdout);
+
+ fputs(ansi_highlight_green(), stdout);
+
+ for (i = 0; i < j; i++)
+ fputs("\xe2\x96\x88", stdout);
+
+ fputs(ansi_normal(), stdout);
+
+ for (i = 0; i < k; i++)
+ fputs("\xe2\x96\x91", stdout);
+
+ printf(" %3"PRIu64"%%", 100U * p / 65535U);
+
+ fputs("\r", stdout);
+ if (colors_enabled())
+ fputs("\x1B[?25h", stdout);
+
+ fflush(stdout);
+}
+
+static uint64_t scale_progress(uint64_t scale, uint64_t p, uint64_t m) {
+ /* Calculates scale * p / m, but handles m == 0 safely, and saturates.
+ * Currently all callers use m >= 1, but we keep the check to be defensive.
+ */
+
+ if (p >= m || m == 0)
+ return scale;
+
+ return scale * p / m;
+}
+
+static void flush_progress(void) {
+ unsigned n, i;
+
+ if (!on_tty())
+ return;
+
+ n = (3 * columns()) / 4;
+
+ putchar('\r');
+
+ for (i = 0; i < n + 5; i++)
+ putchar(' ');
+
+ putchar('\r');
+ fflush(stdout);
+}
+
+#define debug(_offset, _fmt, ...) do { \
+ flush_progress(); \
+ log_debug(OFSfmt": " _fmt, _offset, ##__VA_ARGS__); \
+ } while (0)
+
+#define warning(_offset, _fmt, ...) do { \
+ flush_progress(); \
+ log_warning(OFSfmt": " _fmt, _offset, ##__VA_ARGS__); \
+ } while (0)
+
+#define error(_offset, _fmt, ...) do { \
+ flush_progress(); \
+ log_error(OFSfmt": " _fmt, (uint64_t)_offset, ##__VA_ARGS__); \
+ } while (0)
+
+#define error_errno(_offset, error, _fmt, ...) do { \
+ flush_progress(); \
+ log_error_errno(error, OFSfmt": " _fmt, (uint64_t)_offset, ##__VA_ARGS__); \
+ } while (0)
+
+static int hash_payload(JournalFile *f, Object *o, uint64_t offset, const uint8_t *src, uint64_t size, uint64_t *res_hash) {
+ Compression c;
+ int r;
+
+ assert(o);
+ assert(src);
+ assert(res_hash);
+
+ c = COMPRESSION_FROM_OBJECT(o);
+ if (c < 0)
+ return -EBADMSG;
+ if (c != COMPRESSION_NONE) {
+ _cleanup_free_ void *b = NULL;
+ size_t b_size;
+
+ r = decompress_blob(c, src, size, &b, &b_size, 0);
+ if (r < 0) {
+ error_errno(offset, r, "%s decompression failed: %m",
+ compression_to_string(c));
+ return r;
+ }
+
+ *res_hash = journal_file_hash_data(f, b, b_size);
+ } else
+ *res_hash = journal_file_hash_data(f, src, size);
+
+ return 0;
+}
+
+static int journal_file_object_verify(JournalFile *f, uint64_t offset, Object *o) {
+ assert(f);
+ assert(offset);
+ assert(o);
+
+ /* This does various superficial tests about the length an
+ * possible field values. It does not follow any references to
+ * other objects. */
+
+ if ((o->object.flags & _OBJECT_COMPRESSED_MASK) != 0 &&
+ o->object.type != OBJECT_DATA) {
+ error(offset,
+ "Found compressed object of type %s that isn't of type data, which is not allowed.",
+ journal_object_type_to_string(o->object.type));
+ return -EBADMSG;
+ }
+
+ switch (o->object.type) {
+
+ case OBJECT_DATA: {
+ uint64_t h1, h2;
+ int r;
+
+ if (le64toh(o->data.entry_offset) == 0)
+ warning(offset, "Unused data (entry_offset==0)");
+
+ if ((le64toh(o->data.entry_offset) == 0) ^ (le64toh(o->data.n_entries) == 0)) {
+ error(offset, "Bad n_entries: %"PRIu64, le64toh(o->data.n_entries));
+ return -EBADMSG;
+ }
+
+ if (le64toh(o->object.size) - journal_file_data_payload_offset(f) <= 0) {
+ error(offset, "Bad object size (<= %zu): %"PRIu64,
+ journal_file_data_payload_offset(f),
+ le64toh(o->object.size));
+ return -EBADMSG;
+ }
+
+ h1 = le64toh(o->data.hash);
+ r = hash_payload(f, o, offset, journal_file_data_payload_field(f, o),
+ le64toh(o->object.size) - journal_file_data_payload_offset(f),
+ &h2);
+ if (r < 0)
+ return r;
+
+ if (h1 != h2) {
+ error(offset, "Invalid hash (%08" PRIx64 " vs. %08" PRIx64 ")", h1, h2);
+ return -EBADMSG;
+ }
+
+ if (!VALID64(le64toh(o->data.next_hash_offset)) ||
+ !VALID64(le64toh(o->data.next_field_offset)) ||
+ !VALID64(le64toh(o->data.entry_offset)) ||
+ !VALID64(le64toh(o->data.entry_array_offset))) {
+ error(offset, "Invalid offset (next_hash_offset="OFSfmt", next_field_offset="OFSfmt", entry_offset="OFSfmt", entry_array_offset="OFSfmt,
+ le64toh(o->data.next_hash_offset),
+ le64toh(o->data.next_field_offset),
+ le64toh(o->data.entry_offset),
+ le64toh(o->data.entry_array_offset));
+ return -EBADMSG;
+ }
+
+ break;
+ }
+
+ case OBJECT_FIELD: {
+ uint64_t h1, h2;
+ int r;
+
+ if (le64toh(o->object.size) - offsetof(Object, field.payload) <= 0) {
+ error(offset,
+ "Bad field size (<= %zu): %"PRIu64,
+ offsetof(Object, field.payload),
+ le64toh(o->object.size));
+ return -EBADMSG;
+ }
+
+ h1 = le64toh(o->field.hash);
+ r = hash_payload(f, o, offset, o->field.payload,
+ le64toh(o->object.size) - offsetof(Object, field.payload),
+ &h2);
+ if (r < 0)
+ return r;
+
+ if (h1 != h2) {
+ error(offset, "Invalid hash (%08" PRIx64 " vs. %08" PRIx64 ")", h1, h2);
+ return -EBADMSG;
+ }
+
+ if (!VALID64(le64toh(o->field.next_hash_offset)) ||
+ !VALID64(le64toh(o->field.head_data_offset))) {
+ error(offset,
+ "Invalid offset (next_hash_offset="OFSfmt", head_data_offset="OFSfmt,
+ le64toh(o->field.next_hash_offset),
+ le64toh(o->field.head_data_offset));
+ return -EBADMSG;
+ }
+ break;
+ }
+
+ case OBJECT_ENTRY:
+ if ((le64toh(o->object.size) - offsetof(Object, entry.items)) % journal_file_entry_item_size(f) != 0) {
+ error(offset,
+ "Bad entry size (<= %zu): %"PRIu64,
+ offsetof(Object, entry.items),
+ le64toh(o->object.size));
+ return -EBADMSG;
+ }
+
+ if ((le64toh(o->object.size) - offsetof(Object, entry.items)) / journal_file_entry_item_size(f) <= 0) {
+ error(offset,
+ "Invalid number items in entry: %"PRIu64,
+ (le64toh(o->object.size) - offsetof(Object, entry.items)) / journal_file_entry_item_size(f));
+ return -EBADMSG;
+ }
+
+ if (le64toh(o->entry.seqnum) <= 0) {
+ error(offset,
+ "Invalid entry seqnum: %"PRIx64,
+ le64toh(o->entry.seqnum));
+ return -EBADMSG;
+ }
+
+ if (!VALID_REALTIME(le64toh(o->entry.realtime))) {
+ error(offset,
+ "Invalid entry realtime timestamp: %"PRIu64,
+ le64toh(o->entry.realtime));
+ return -EBADMSG;
+ }
+
+ if (!VALID_MONOTONIC(le64toh(o->entry.monotonic))) {
+ error(offset,
+ "Invalid entry monotonic timestamp: %"PRIu64,
+ le64toh(o->entry.monotonic));
+ return -EBADMSG;
+ }
+
+ for (uint64_t i = 0; i < journal_file_entry_n_items(f, o); i++) {
+ if (journal_file_entry_item_object_offset(f, o, i) == 0 ||
+ !VALID64(journal_file_entry_item_object_offset(f, o, i))) {
+ error(offset,
+ "Invalid entry item (%"PRIu64"/%"PRIu64") offset: "OFSfmt,
+ i, journal_file_entry_n_items(f, o),
+ journal_file_entry_item_object_offset(f, o, i));
+ return -EBADMSG;
+ }
+ }
+
+ break;
+
+ case OBJECT_DATA_HASH_TABLE:
+ case OBJECT_FIELD_HASH_TABLE:
+ if ((le64toh(o->object.size) - offsetof(Object, hash_table.items)) % sizeof(HashItem) != 0 ||
+ (le64toh(o->object.size) - offsetof(Object, hash_table.items)) / sizeof(HashItem) <= 0) {
+ error(offset,
+ "Invalid %s size: %"PRIu64,
+ journal_object_type_to_string(o->object.type),
+ le64toh(o->object.size));
+ return -EBADMSG;
+ }
+
+ for (uint64_t i = 0; i < journal_file_hash_table_n_items(o); i++) {
+ if (o->hash_table.items[i].head_hash_offset != 0 &&
+ !VALID64(le64toh(o->hash_table.items[i].head_hash_offset))) {
+ error(offset,
+ "Invalid %s hash table item (%"PRIu64"/%"PRIu64") head_hash_offset: "OFSfmt,
+ journal_object_type_to_string(o->object.type),
+ i, journal_file_hash_table_n_items(o),
+ le64toh(o->hash_table.items[i].head_hash_offset));
+ return -EBADMSG;
+ }
+ if (o->hash_table.items[i].tail_hash_offset != 0 &&
+ !VALID64(le64toh(o->hash_table.items[i].tail_hash_offset))) {
+ error(offset,
+ "Invalid %s hash table item (%"PRIu64"/%"PRIu64") tail_hash_offset: "OFSfmt,
+ journal_object_type_to_string(o->object.type),
+ i, journal_file_hash_table_n_items(o),
+ le64toh(o->hash_table.items[i].tail_hash_offset));
+ return -EBADMSG;
+ }
+
+ if ((o->hash_table.items[i].head_hash_offset != 0) !=
+ (o->hash_table.items[i].tail_hash_offset != 0)) {
+ error(offset,
+ "Invalid %s hash table item (%"PRIu64"/%"PRIu64"): head_hash_offset="OFSfmt" tail_hash_offset="OFSfmt,
+ journal_object_type_to_string(o->object.type),
+ i, journal_file_hash_table_n_items(o),
+ le64toh(o->hash_table.items[i].head_hash_offset),
+ le64toh(o->hash_table.items[i].tail_hash_offset));
+ return -EBADMSG;
+ }
+ }
+
+ break;
+
+ case OBJECT_ENTRY_ARRAY:
+ if ((le64toh(o->object.size) - offsetof(Object, entry_array.items)) % journal_file_entry_array_item_size(f) != 0 ||
+ (le64toh(o->object.size) - offsetof(Object, entry_array.items)) / journal_file_entry_array_item_size(f) <= 0) {
+ error(offset,
+ "Invalid object entry array size: %"PRIu64,
+ le64toh(o->object.size));
+ return -EBADMSG;
+ }
+
+ if (!VALID64(le64toh(o->entry_array.next_entry_array_offset))) {
+ error(offset,
+ "Invalid object entry array next_entry_array_offset: "OFSfmt,
+ le64toh(o->entry_array.next_entry_array_offset));
+ return -EBADMSG;
+ }
+
+ for (uint64_t i = 0; i < journal_file_entry_array_n_items(f, o); i++) {
+ uint64_t q = journal_file_entry_array_item(f, o, i);
+ if (q != 0 && !VALID64(q)) {
+ error(offset,
+ "Invalid object entry array item (%"PRIu64"/%"PRIu64"): "OFSfmt,
+ i, journal_file_entry_array_n_items(f, o), q);
+ return -EBADMSG;
+ }
+ }
+
+ break;
+
+ case OBJECT_TAG:
+ if (le64toh(o->object.size) != sizeof(TagObject)) {
+ error(offset,
+ "Invalid object tag size: %"PRIu64,
+ le64toh(o->object.size));
+ return -EBADMSG;
+ }
+
+ if (!VALID_EPOCH(le64toh(o->tag.epoch))) {
+ error(offset,
+ "Invalid object tag epoch: %"PRIu64,
+ le64toh(o->tag.epoch));
+ return -EBADMSG;
+ }
+
+ break;
+ }
+
+ return 0;
+}
+
+static int write_uint64(FILE *fp, uint64_t p) {
+ if (fwrite(&p, sizeof(p), 1, fp) != 1)
+ return -EIO;
+
+ return 0;
+}
+
+static int contains_uint64(MMapFileDescriptor *f, uint64_t n, uint64_t p) {
+ uint64_t a, b;
+ int r;
+
+ assert(f);
+
+ /* Bisection ... */
+
+ a = 0; b = n;
+ while (a < b) {
+ uint64_t c, *z;
+
+ c = (a + b) / 2;
+
+ r = mmap_cache_fd_get(f, 0, false, c * sizeof(uint64_t), sizeof(uint64_t), NULL, (void **) &z);
+ if (r < 0)
+ return r;
+
+ if (*z == p)
+ return 1;
+
+ if (a + 1 >= b)
+ return 0;
+
+ if (p < *z)
+ b = c;
+ else
+ a = c;
+ }
+
+ return 0;
+}
+
+static int verify_data(
+ JournalFile *f,
+ Object *o, uint64_t p,
+ MMapFileDescriptor *cache_entry_fd, uint64_t n_entries,
+ MMapFileDescriptor *cache_entry_array_fd, uint64_t n_entry_arrays) {
+
+ uint64_t i, n, a, last, q;
+ int r;
+
+ assert(f);
+ assert(o);
+ assert(cache_entry_fd);
+ assert(cache_entry_array_fd);
+
+ n = le64toh(o->data.n_entries);
+ a = le64toh(o->data.entry_array_offset);
+
+ /* Entry array means at least two objects */
+ if (a && n < 2) {
+ error(p, "Entry array present (entry_array_offset="OFSfmt", but n_entries=%"PRIu64")", a, n);
+ return -EBADMSG;
+ }
+
+ if (n == 0)
+ return 0;
+
+ /* We already checked that earlier */
+ assert(o->data.entry_offset);
+
+ last = q = le64toh(o->data.entry_offset);
+ if (!contains_uint64(cache_entry_fd, n_entries, q)) {
+ error(p, "Data object references invalid entry at "OFSfmt, q);
+ return -EBADMSG;
+ }
+
+ r = journal_file_move_to_entry_by_offset(f, q, DIRECTION_DOWN, NULL, NULL);
+ if (r < 0)
+ return r;
+ if (r == 0) {
+ error(q, "Entry object doesn't exist in the main entry array");
+ return -EBADMSG;
+ }
+
+ i = 1;
+ while (i < n) {
+ uint64_t next, m, j;
+
+ if (a == 0) {
+ error(p, "Array chain too short");
+ return -EBADMSG;
+ }
+
+ if (!contains_uint64(cache_entry_array_fd, n_entry_arrays, a)) {
+ error(p, "Invalid array offset "OFSfmt, a);
+ return -EBADMSG;
+ }
+
+ r = journal_file_move_to_object(f, OBJECT_ENTRY_ARRAY, a, &o);
+ if (r < 0)
+ return r;
+
+ next = le64toh(o->entry_array.next_entry_array_offset);
+ if (next != 0 && next <= a) {
+ error(p, "Array chain has cycle (jumps back from "OFSfmt" to "OFSfmt")", a, next);
+ return -EBADMSG;
+ }
+
+ m = journal_file_entry_array_n_items(f, o);
+ for (j = 0; i < n && j < m; i++, j++) {
+
+ q = journal_file_entry_array_item(f, o, j);
+ if (q <= last) {
+ error(p, "Data object's entry array not sorted (%"PRIu64" <= %"PRIu64")", q, last);
+ return -EBADMSG;
+ }
+ last = q;
+
+ if (!contains_uint64(cache_entry_fd, n_entries, q)) {
+ error(p, "Data object references invalid entry at "OFSfmt, q);
+ return -EBADMSG;
+ }
+
+ r = journal_file_move_to_entry_by_offset(f, q, DIRECTION_DOWN, NULL, NULL);
+ if (r < 0)
+ return r;
+ if (r == 0) {
+ error(q, "Entry object doesn't exist in the main entry array");
+ return -EBADMSG;
+ }
+
+ /* Pointer might have moved, reposition */
+ r = journal_file_move_to_object(f, OBJECT_ENTRY_ARRAY, a, &o);
+ if (r < 0)
+ return r;
+ }
+
+ a = next;
+ }
+
+ return 0;
+}
+
+static int verify_data_hash_table(
+ JournalFile *f,
+ MMapFileDescriptor *cache_data_fd, uint64_t n_data,
+ MMapFileDescriptor *cache_entry_fd, uint64_t n_entries,
+ MMapFileDescriptor *cache_entry_array_fd, uint64_t n_entry_arrays,
+ usec_t *last_usec,
+ bool show_progress) {
+
+ uint64_t i, n;
+ int r;
+
+ assert(f);
+ assert(cache_data_fd);
+ assert(cache_entry_fd);
+ assert(cache_entry_array_fd);
+ assert(last_usec);
+
+ n = le64toh(f->header->data_hash_table_size) / sizeof(HashItem);
+ if (n <= 0)
+ return 0;
+
+ r = journal_file_map_data_hash_table(f);
+ if (r < 0)
+ return log_error_errno(r, "Failed to map data hash table: %m");
+
+ for (i = 0; i < n; i++) {
+ uint64_t last = 0, p;
+
+ if (show_progress)
+ draw_progress(0xC000 + scale_progress(0x3FFF, i, n), last_usec);
+
+ p = le64toh(f->data_hash_table[i].head_hash_offset);
+ while (p != 0) {
+ Object *o;
+ uint64_t next;
+
+ if (!contains_uint64(cache_data_fd, n_data, p)) {
+ error(p, "Invalid data object at hash entry %"PRIu64" of %"PRIu64, i, n);
+ return -EBADMSG;
+ }
+
+ r = journal_file_move_to_object(f, OBJECT_DATA, p, &o);
+ if (r < 0)
+ return r;
+
+ next = le64toh(o->data.next_hash_offset);
+ if (next != 0 && next <= p) {
+ error(p, "Hash chain has a cycle in hash entry %"PRIu64" of %"PRIu64, i, n);
+ return -EBADMSG;
+ }
+
+ if (le64toh(o->data.hash) % n != i) {
+ error(p, "Hash value mismatch in hash entry %"PRIu64" of %"PRIu64, i, n);
+ return -EBADMSG;
+ }
+
+ r = verify_data(f, o, p, cache_entry_fd, n_entries, cache_entry_array_fd, n_entry_arrays);
+ if (r < 0)
+ return r;
+
+ last = p;
+ p = next;
+ }
+
+ if (last != le64toh(f->data_hash_table[i].tail_hash_offset)) {
+ error(p,
+ "Tail hash pointer mismatch in hash table (%"PRIu64" != %"PRIu64")",
+ last,
+ le64toh(f->data_hash_table[i].tail_hash_offset));
+ return -EBADMSG;
+ }
+ }
+
+ return 0;
+}
+
+static int data_object_in_hash_table(JournalFile *f, uint64_t hash, uint64_t p) {
+ uint64_t n, h, q;
+ int r;
+ assert(f);
+
+ n = le64toh(f->header->data_hash_table_size) / sizeof(HashItem);
+ if (n <= 0)
+ return 0;
+
+ r = journal_file_map_data_hash_table(f);
+ if (r < 0)
+ return log_error_errno(r, "Failed to map data hash table: %m");
+
+ h = hash % n;
+
+ q = le64toh(f->data_hash_table[h].head_hash_offset);
+ while (q != 0) {
+ Object *o;
+
+ if (p == q)
+ return 1;
+
+ r = journal_file_move_to_object(f, OBJECT_DATA, q, &o);
+ if (r < 0)
+ return r;
+
+ q = le64toh(o->data.next_hash_offset);
+ }
+
+ return 0;
+}
+
+static int verify_entry(
+ JournalFile *f,
+ Object *o, uint64_t p,
+ MMapFileDescriptor *cache_data_fd, uint64_t n_data,
+ bool last) {
+
+ uint64_t i, n;
+ int r;
+
+ assert(f);
+ assert(o);
+ assert(cache_data_fd);
+
+ n = journal_file_entry_n_items(f, o);
+ for (i = 0; i < n; i++) {
+ uint64_t q;
+ Object *u;
+
+ q = journal_file_entry_item_object_offset(f, o, i);
+
+ if (!contains_uint64(cache_data_fd, n_data, q)) {
+ error(p, "Invalid data object of entry");
+ return -EBADMSG;
+ }
+
+ r = journal_file_move_to_object(f, OBJECT_DATA, q, &u);
+ if (r < 0)
+ return r;
+
+ r = data_object_in_hash_table(f, le64toh(u->data.hash), q);
+ if (r < 0)
+ return r;
+ if (r == 0) {
+ error(p, "Data object missing from hash table");
+ return -EBADMSG;
+ }
+
+ /* Pointer might have moved, reposition */
+ r = journal_file_move_to_object(f, OBJECT_DATA, q, &u);
+ if (r < 0)
+ return r;
+
+ r = journal_file_move_to_entry_by_offset_for_data(f, u, p, DIRECTION_DOWN, NULL, NULL);
+ if (r < 0)
+ return r;
+
+ /* The last entry object has a very high chance of not being referenced as journal files
+ * almost always run out of space during linking of entry items when trying to add a new
+ * entry array so let's not error in that scenario. */
+ if (r == 0 && !last) {
+ error(p, "Entry object not referenced by linked data object at "OFSfmt, q);
+ return -EBADMSG;
+ }
+ }
+
+ return 0;
+}
+
+static int verify_entry_array(
+ JournalFile *f,
+ MMapFileDescriptor *cache_data_fd, uint64_t n_data,
+ MMapFileDescriptor *cache_entry_fd, uint64_t n_entries,
+ MMapFileDescriptor *cache_entry_array_fd, uint64_t n_entry_arrays,
+ usec_t *last_usec,
+ bool show_progress) {
+
+ uint64_t i = 0, a, n, last = 0;
+ int r;
+
+ assert(f);
+ assert(cache_data_fd);
+ assert(cache_entry_fd);
+ assert(cache_entry_array_fd);
+ assert(last_usec);
+
+ n = le64toh(f->header->n_entries);
+ a = le64toh(f->header->entry_array_offset);
+ while (i < n) {
+ uint64_t next, m, j;
+ Object *o;
+
+ if (show_progress)
+ draw_progress(0x8000 + scale_progress(0x3FFF, i, n), last_usec);
+
+ if (a == 0) {
+ error(a, "Array chain too short at %"PRIu64" of %"PRIu64, i, n);
+ return -EBADMSG;
+ }
+
+ if (!contains_uint64(cache_entry_array_fd, n_entry_arrays, a)) {
+ error(a, "Invalid array %"PRIu64" of %"PRIu64, i, n);
+ return -EBADMSG;
+ }
+
+ r = journal_file_move_to_object(f, OBJECT_ENTRY_ARRAY, a, &o);
+ if (r < 0)
+ return r;
+
+ next = le64toh(o->entry_array.next_entry_array_offset);
+ if (next != 0 && next <= a) {
+ error(a, "Array chain has cycle at %"PRIu64" of %"PRIu64" (jumps back from to "OFSfmt")", i, n, next);
+ return -EBADMSG;
+ }
+
+ m = journal_file_entry_array_n_items(f, o);
+ for (j = 0; i < n && j < m; i++, j++) {
+ uint64_t p;
+
+ p = journal_file_entry_array_item(f, o, j);
+ if (p <= last) {
+ error(a, "Entry array not sorted at %"PRIu64" of %"PRIu64, i, n);
+ return -EBADMSG;
+ }
+ last = p;
+
+ if (!contains_uint64(cache_entry_fd, n_entries, p)) {
+ error(a, "Invalid array entry at %"PRIu64" of %"PRIu64, i, n);
+ return -EBADMSG;
+ }
+
+ r = journal_file_move_to_object(f, OBJECT_ENTRY, p, &o);
+ if (r < 0)
+ return r;
+
+ r = verify_entry(f, o, p, cache_data_fd, n_data, /*last=*/ i + 1 == n);
+ if (r < 0)
+ return r;
+
+ /* Pointer might have moved, reposition */
+ r = journal_file_move_to_object(f, OBJECT_ENTRY_ARRAY, a, &o);
+ if (r < 0)
+ return r;
+ }
+
+ a = next;
+ }
+
+ return 0;
+}
+
+static int verify_hash_table(
+ Object *o, uint64_t p, uint64_t *n_hash_tables, uint64_t header_offset, uint64_t header_size) {
+
+ assert(o);
+ assert(n_hash_tables);
+
+ if (*n_hash_tables > 1) {
+ error(p,
+ "More than one %s: %" PRIu64,
+ journal_object_type_to_string(o->object.type),
+ *n_hash_tables);
+ return -EBADMSG;
+ }
+
+ if (header_offset != p + offsetof(Object, hash_table.items)) {
+ error(p,
+ "Header offset for %s invalid (%" PRIu64 " != %" PRIu64 ")",
+ journal_object_type_to_string(o->object.type),
+ header_offset,
+ p + offsetof(Object, hash_table.items));
+ return -EBADMSG;
+ }
+
+ if (header_size != le64toh(o->object.size) - offsetof(Object, hash_table.items)) {
+ error(p,
+ "Header size for %s invalid (%" PRIu64 " != %" PRIu64 ")",
+ journal_object_type_to_string(o->object.type),
+ header_size,
+ le64toh(o->object.size) - offsetof(Object, hash_table.items));
+ return -EBADMSG;
+ }
+
+ (*n_hash_tables)++;
+
+ return 0;
+}
+
+int journal_file_verify(
+ JournalFile *f,
+ const char *key,
+ usec_t *first_contained, usec_t *last_validated, usec_t *last_contained,
+ bool show_progress) {
+ int r;
+ Object *o;
+ uint64_t p = 0, last_epoch = 0, last_tag_realtime = 0;
+
+ uint64_t entry_seqnum = 0, entry_monotonic = 0, entry_realtime = 0;
+ usec_t min_entry_realtime = USEC_INFINITY, max_entry_realtime = 0;
+ sd_id128_t entry_boot_id = {}; /* Unnecessary initialization to appease gcc */
+ bool entry_seqnum_set = false, entry_monotonic_set = false, entry_realtime_set = false, found_main_entry_array = false;
+ uint64_t n_objects = 0, n_entries = 0, n_data = 0, n_fields = 0, n_data_hash_tables = 0, n_field_hash_tables = 0, n_entry_arrays = 0, n_tags = 0;
+ usec_t last_usec = 0;
+ _cleanup_close_ int data_fd = -EBADF, entry_fd = -EBADF, entry_array_fd = -EBADF;
+ _cleanup_fclose_ FILE *data_fp = NULL, *entry_fp = NULL, *entry_array_fp = NULL;
+ MMapFileDescriptor *cache_data_fd = NULL, *cache_entry_fd = NULL, *cache_entry_array_fd = NULL;
+ unsigned i;
+ bool found_last = false;
+ const char *tmp_dir = NULL;
+ MMapCache *m;
+
+#if HAVE_GCRYPT
+ uint64_t last_tag = 0;
+#endif
+ assert(f);
+
+ if (key) {
+#if HAVE_GCRYPT
+ r = journal_file_parse_verification_key(f, key);
+ if (r < 0) {
+ log_error("Failed to parse seed.");
+ return r;
+ }
+#else
+ return -EOPNOTSUPP;
+#endif
+ } else if (JOURNAL_HEADER_SEALED(f->header))
+ return -ENOKEY;
+
+ r = var_tmp_dir(&tmp_dir);
+ if (r < 0) {
+ log_error_errno(r, "Failed to determine temporary directory: %m");
+ goto fail;
+ }
+
+ data_fd = open_tmpfile_unlinkable(tmp_dir, O_RDWR | O_CLOEXEC);
+ if (data_fd < 0) {
+ r = log_error_errno(data_fd, "Failed to create data file: %m");
+ goto fail;
+ }
+
+ entry_fd = open_tmpfile_unlinkable(tmp_dir, O_RDWR | O_CLOEXEC);
+ if (entry_fd < 0) {
+ r = log_error_errno(entry_fd, "Failed to create entry file: %m");
+ goto fail;
+ }
+
+ entry_array_fd = open_tmpfile_unlinkable(tmp_dir, O_RDWR | O_CLOEXEC);
+ if (entry_array_fd < 0) {
+ r = log_error_errno(entry_array_fd,
+ "Failed to create entry array file: %m");
+ goto fail;
+ }
+
+ m = mmap_cache_fd_cache(f->cache_fd);
+ r = mmap_cache_add_fd(m, data_fd, PROT_READ|PROT_WRITE, &cache_data_fd);
+ if (r < 0) {
+ log_error_errno(r, "Failed to cache data file: %m");
+ goto fail;
+ }
+
+ r = mmap_cache_add_fd(m, entry_fd, PROT_READ|PROT_WRITE, &cache_entry_fd);
+ if (r < 0) {
+ log_error_errno(r, "Failed to cache entry file: %m");
+ goto fail;
+ }
+
+ r = mmap_cache_add_fd(m, entry_array_fd, PROT_READ|PROT_WRITE, &cache_entry_array_fd);
+ if (r < 0) {
+ log_error_errno(r, "Failed to cache entry array file: %m");
+ goto fail;
+ }
+
+ r = take_fdopen_unlocked(&data_fd, "w+", &data_fp);
+ if (r < 0) {
+ log_error_errno(r, "Failed to open data file stream: %m");
+ goto fail;
+ }
+
+ r = take_fdopen_unlocked(&entry_fd, "w+", &entry_fp);
+ if (r < 0) {
+ log_error_errno(r, "Failed to open entry file stream: %m");
+ goto fail;
+ }
+
+ r = take_fdopen_unlocked(&entry_array_fd, "w+", &entry_array_fp);
+ if (r < 0) {
+ log_error_errno(r, "Failed to open entry array file stream: %m");
+ goto fail;
+ }
+
+ if (le32toh(f->header->compatible_flags) & ~HEADER_COMPATIBLE_SUPPORTED) {
+ log_error("Cannot verify file with unknown extensions.");
+ r = -EOPNOTSUPP;
+ goto fail;
+ }
+
+ for (i = 0; i < sizeof(f->header->reserved); i++)
+ if (f->header->reserved[i] != 0) {
+ error(offsetof(Header, reserved[i]), "Reserved field is non-zero");
+ r = -EBADMSG;
+ goto fail;
+ }
+
+ if (JOURNAL_HEADER_SEALED(f->header) && !JOURNAL_HEADER_SEALED_CONTINUOUS(f->header))
+ warning(p,
+ "This log file was sealed with an old journald version where the sequence of seals might not be continuous. We cannot guarantee completeness.");
+
+ /* First iteration: we go through all objects, verify the
+ * superficial structure, headers, hashes. */
+
+ p = le64toh(f->header->header_size);
+ for (;;) {
+ /* Early exit if there are no objects in the file, at all */
+ if (le64toh(f->header->tail_object_offset) == 0)
+ break;
+
+ if (show_progress)
+ draw_progress(scale_progress(0x7FFF, p, le64toh(f->header->tail_object_offset)), &last_usec);
+
+ r = journal_file_move_to_object(f, OBJECT_UNUSED, p, &o);
+ if (r < 0) {
+ error_errno(p, r, "Invalid object: %m");
+ goto fail;
+ }
+
+ if (p > le64toh(f->header->tail_object_offset)) {
+ error(offsetof(Header, tail_object_offset),
+ "Invalid tail object pointer (%"PRIu64" > %"PRIu64")",
+ p,
+ le64toh(f->header->tail_object_offset));
+ r = -EBADMSG;
+ goto fail;
+ }
+
+ n_objects++;
+
+ r = journal_file_object_verify(f, p, o);
+ if (r < 0) {
+ error_errno(p, r, "Invalid object contents: %m");
+ goto fail;
+ }
+
+ if (!!(o->object.flags & OBJECT_COMPRESSED_XZ) +
+ !!(o->object.flags & OBJECT_COMPRESSED_LZ4) +
+ !!(o->object.flags & OBJECT_COMPRESSED_ZSTD) > 1) {
+ error(p, "Object has multiple compression flags set (flags: 0x%x)", o->object.flags);
+ r = -EINVAL;
+ goto fail;
+ }
+
+ if ((o->object.flags & OBJECT_COMPRESSED_XZ) && !JOURNAL_HEADER_COMPRESSED_XZ(f->header)) {
+ error(p, "XZ compressed object in file without XZ compression");
+ r = -EBADMSG;
+ goto fail;
+ }
+
+ if ((o->object.flags & OBJECT_COMPRESSED_LZ4) && !JOURNAL_HEADER_COMPRESSED_LZ4(f->header)) {
+ error(p, "LZ4 compressed object in file without LZ4 compression");
+ r = -EBADMSG;
+ goto fail;
+ }
+
+ if ((o->object.flags & OBJECT_COMPRESSED_ZSTD) && !JOURNAL_HEADER_COMPRESSED_ZSTD(f->header)) {
+ error(p, "ZSTD compressed object in file without ZSTD compression");
+ r = -EBADMSG;
+ goto fail;
+ }
+
+ switch (o->object.type) {
+
+ case OBJECT_DATA:
+ r = write_uint64(data_fp, p);
+ if (r < 0)
+ goto fail;
+
+ n_data++;
+ break;
+
+ case OBJECT_FIELD:
+ n_fields++;
+ break;
+
+ case OBJECT_ENTRY:
+ if (JOURNAL_HEADER_SEALED(f->header) && n_tags <= 0) {
+ error(p, "First entry before first tag");
+ r = -EBADMSG;
+ goto fail;
+ }
+
+ r = write_uint64(entry_fp, p);
+ if (r < 0)
+ goto fail;
+
+ if (le64toh(o->entry.realtime) < last_tag_realtime) {
+ error(p,
+ "Older entry after newer tag (%"PRIu64" < %"PRIu64")",
+ le64toh(o->entry.realtime),
+ last_tag_realtime);
+ r = -EBADMSG;
+ goto fail;
+ }
+
+ if (!entry_seqnum_set &&
+ le64toh(o->entry.seqnum) != le64toh(f->header->head_entry_seqnum)) {
+ error(p,
+ "Head entry sequence number incorrect (%"PRIu64" != %"PRIu64")",
+ le64toh(o->entry.seqnum),
+ le64toh(f->header->head_entry_seqnum));
+ r = -EBADMSG;
+ goto fail;
+ }
+
+ if (entry_seqnum_set &&
+ entry_seqnum >= le64toh(o->entry.seqnum)) {
+ error(p,
+ "Entry sequence number out of synchronization (%"PRIu64" >= %"PRIu64")",
+ entry_seqnum,
+ le64toh(o->entry.seqnum));
+ r = -EBADMSG;
+ goto fail;
+ }
+
+ entry_seqnum = le64toh(o->entry.seqnum);
+ entry_seqnum_set = true;
+
+ if (entry_monotonic_set &&
+ sd_id128_equal(entry_boot_id, o->entry.boot_id) &&
+ entry_monotonic > le64toh(o->entry.monotonic)) {
+ error(p,
+ "Entry timestamp out of synchronization (%"PRIu64" > %"PRIu64")",
+ entry_monotonic,
+ le64toh(o->entry.monotonic));
+ r = -EBADMSG;
+ goto fail;
+ }
+
+ entry_monotonic = le64toh(o->entry.monotonic);
+ entry_boot_id = o->entry.boot_id;
+ entry_monotonic_set = true;
+
+ if (!entry_realtime_set &&
+ le64toh(o->entry.realtime) != le64toh(f->header->head_entry_realtime)) {
+ error(p,
+ "Head entry realtime timestamp incorrect (%"PRIu64" != %"PRIu64")",
+ le64toh(o->entry.realtime),
+ le64toh(f->header->head_entry_realtime));
+ r = -EBADMSG;
+ goto fail;
+ }
+
+ entry_realtime = le64toh(o->entry.realtime);
+ entry_realtime_set = true;
+
+ max_entry_realtime = MAX(max_entry_realtime, le64toh(o->entry.realtime));
+ min_entry_realtime = MIN(min_entry_realtime, le64toh(o->entry.realtime));
+
+ n_entries++;
+ break;
+
+ case OBJECT_DATA_HASH_TABLE:
+ r = verify_hash_table(o, p, &n_data_hash_tables,
+ le64toh(f->header->data_hash_table_offset),
+ le64toh(f->header->data_hash_table_size));
+ if (r < 0)
+ goto fail;
+ break;
+
+ case OBJECT_FIELD_HASH_TABLE:
+ r = verify_hash_table(o, p, &n_field_hash_tables,
+ le64toh(f->header->field_hash_table_offset),
+ le64toh(f->header->field_hash_table_size));
+ if (r < 0)
+ goto fail;
+
+ break;
+
+ case OBJECT_ENTRY_ARRAY:
+ r = write_uint64(entry_array_fp, p);
+ if (r < 0)
+ goto fail;
+
+ if (p == le64toh(f->header->entry_array_offset)) {
+ if (found_main_entry_array) {
+ error(p, "More than one main entry array");
+ r = -EBADMSG;
+ goto fail;
+ }
+
+ found_main_entry_array = true;
+ }
+
+ n_entry_arrays++;
+ break;
+
+ case OBJECT_TAG:
+ if (!JOURNAL_HEADER_SEALED(f->header)) {
+ error(p, "Tag object in file without sealing");
+ r = -EBADMSG;
+ goto fail;
+ }
+
+ if (le64toh(o->tag.seqnum) != n_tags + 1) {
+ error(p,
+ "Tag sequence number out of synchronization (%"PRIu64" != %"PRIu64")",
+ le64toh(o->tag.seqnum),
+ n_tags + 1);
+ r = -EBADMSG;
+ goto fail;
+ }
+
+ if (JOURNAL_HEADER_SEALED_CONTINUOUS(f->header)) {
+ if (!(n_tags == 0 || (n_tags == 1 && le64toh(o->tag.epoch) == last_epoch)
+ || le64toh(o->tag.epoch) == last_epoch + 1)) {
+ error(p,
+ "Epoch sequence not continuous (%"PRIu64" vs %"PRIu64")",
+ le64toh(o->tag.epoch),
+ last_epoch);
+ r = -EBADMSG;
+ goto fail;
+ }
+ } else {
+ if (le64toh(o->tag.epoch) < last_epoch) {
+ error(p,
+ "Epoch sequence out of synchronization (%"PRIu64" < %"PRIu64")",
+ le64toh(o->tag.epoch),
+ last_epoch);
+ r = -EBADMSG;
+ goto fail;
+ }
+ }
+
+#if HAVE_GCRYPT
+ if (JOURNAL_HEADER_SEALED(f->header)) {
+ uint64_t q, rt, rt_end;
+
+ debug(p, "Checking tag %"PRIu64"...", le64toh(o->tag.seqnum));
+
+ rt = f->fss_start_usec + le64toh(o->tag.epoch) * f->fss_interval_usec;
+ rt_end = usec_add(rt, f->fss_interval_usec);
+ if (entry_realtime_set && entry_realtime >= rt_end) {
+ error(p,
+ "tag/entry realtime timestamp out of synchronization (%"PRIu64" >= %"PRIu64")",
+ entry_realtime,
+ rt + f->fss_interval_usec);
+ r = -EBADMSG;
+ goto fail;
+ }
+ if (max_entry_realtime >= rt_end) {
+ error(p,
+ "Entry realtime (%"PRIu64", %s) is too late with respect to tag (%"PRIu64", %s)",
+ max_entry_realtime, FORMAT_TIMESTAMP(max_entry_realtime),
+ rt_end, FORMAT_TIMESTAMP(rt_end));
+ r = -EBADMSG;
+ goto fail;
+ }
+ if (min_entry_realtime < rt) {
+ error(p,
+ "Entry realtime (%"PRIu64", %s) is too early with respect to tag (%"PRIu64", %s)",
+ min_entry_realtime, FORMAT_TIMESTAMP(min_entry_realtime),
+ rt, FORMAT_TIMESTAMP(rt));
+ r = -EBADMSG;
+ goto fail;
+ }
+ min_entry_realtime = USEC_INFINITY;
+
+ /* OK, now we know the epoch. So let's now set
+ * it, and calculate the HMAC for everything
+ * since the last tag. */
+ r = journal_file_fsprg_seek(f, le64toh(o->tag.epoch));
+ if (r < 0)
+ goto fail;
+
+ r = journal_file_hmac_start(f);
+ if (r < 0)
+ goto fail;
+
+ if (last_tag == 0) {
+ r = journal_file_hmac_put_header(f);
+ if (r < 0)
+ goto fail;
+
+ q = le64toh(f->header->header_size);
+ } else
+ q = last_tag;
+
+ while (q <= p) {
+ r = journal_file_move_to_object(f, OBJECT_UNUSED, q, &o);
+ if (r < 0)
+ goto fail;
+
+ r = journal_file_hmac_put_object(f, OBJECT_UNUSED, o, q);
+ if (r < 0)
+ goto fail;
+
+ q = q + ALIGN64(le64toh(o->object.size));
+ }
+
+ /* Position might have changed, let's reposition things */
+ r = journal_file_move_to_object(f, OBJECT_UNUSED, p, &o);
+ if (r < 0)
+ goto fail;
+
+ if (memcmp(o->tag.tag, gcry_md_read(f->hmac, 0), TAG_LENGTH) != 0) {
+ error(p, "Tag failed verification");
+ r = -EBADMSG;
+ goto fail;
+ }
+
+ f->hmac_running = false;
+ last_tag_realtime = rt;
+ }
+
+ last_tag = p + ALIGN64(le64toh(o->object.size));
+#endif
+
+ last_epoch = le64toh(o->tag.epoch);
+
+ n_tags++;
+ break;
+ }
+
+ if (p == le64toh(f->header->tail_object_offset)) {
+ found_last = true;
+ break;
+ }
+
+ p = p + ALIGN64(le64toh(o->object.size));
+ };
+
+ if (!found_last && le64toh(f->header->tail_object_offset) != 0) {
+ error(le64toh(f->header->tail_object_offset),
+ "Tail object pointer dead (%"PRIu64" != 0)",
+ le64toh(f->header->tail_object_offset));
+ r = -EBADMSG;
+ goto fail;
+ }
+
+ if (n_objects != le64toh(f->header->n_objects)) {
+ error(offsetof(Header, n_objects),
+ "Object number mismatch (%"PRIu64" != %"PRIu64")",
+ n_objects,
+ le64toh(f->header->n_objects));
+ r = -EBADMSG;
+ goto fail;
+ }
+
+ if (n_entries != le64toh(f->header->n_entries)) {
+ error(offsetof(Header, n_entries),
+ "Entry number mismatch (%"PRIu64" != %"PRIu64")",
+ n_entries,
+ le64toh(f->header->n_entries));
+ r = -EBADMSG;
+ goto fail;
+ }
+
+ if (JOURNAL_HEADER_CONTAINS(f->header, n_data) &&
+ n_data != le64toh(f->header->n_data)) {
+ error(offsetof(Header, n_data),
+ "Data number mismatch (%"PRIu64" != %"PRIu64")",
+ n_data,
+ le64toh(f->header->n_data));
+ r = -EBADMSG;
+ goto fail;
+ }
+
+ if (JOURNAL_HEADER_CONTAINS(f->header, n_fields) &&
+ n_fields != le64toh(f->header->n_fields)) {
+ error(offsetof(Header, n_fields),
+ "Field number mismatch (%"PRIu64" != %"PRIu64")",
+ n_fields,
+ le64toh(f->header->n_fields));
+ r = -EBADMSG;
+ goto fail;
+ }
+
+ if (JOURNAL_HEADER_CONTAINS(f->header, n_tags) &&
+ n_tags != le64toh(f->header->n_tags)) {
+ error(offsetof(Header, n_tags),
+ "Tag number mismatch (%"PRIu64" != %"PRIu64")",
+ n_tags,
+ le64toh(f->header->n_tags));
+ r = -EBADMSG;
+ goto fail;
+ }
+
+ if (JOURNAL_HEADER_CONTAINS(f->header, n_entry_arrays) &&
+ n_entry_arrays != le64toh(f->header->n_entry_arrays)) {
+ error(offsetof(Header, n_entry_arrays),
+ "Entry array number mismatch (%"PRIu64" != %"PRIu64")",
+ n_entry_arrays,
+ le64toh(f->header->n_entry_arrays));
+ r = -EBADMSG;
+ goto fail;
+ }
+
+ if (!found_main_entry_array && le64toh(f->header->entry_array_offset) != 0) {
+ error(0, "Missing main entry array");
+ r = -EBADMSG;
+ goto fail;
+ }
+
+ if (entry_seqnum_set &&
+ entry_seqnum != le64toh(f->header->tail_entry_seqnum)) {
+ error(offsetof(Header, tail_entry_seqnum),
+ "Tail entry sequence number incorrect (%"PRIu64" != %"PRIu64")",
+ entry_seqnum,
+ le64toh(f->header->tail_entry_seqnum));
+ r = -EBADMSG;
+ goto fail;
+ }
+
+ if (entry_monotonic_set &&
+ (sd_id128_equal(entry_boot_id, f->header->tail_entry_boot_id) &&
+ JOURNAL_HEADER_TAIL_ENTRY_BOOT_ID(f->header) &&
+ entry_monotonic != le64toh(f->header->tail_entry_monotonic))) {
+ error(0,
+ "Invalid tail monotonic timestamp (%"PRIu64" != %"PRIu64")",
+ entry_monotonic,
+ le64toh(f->header->tail_entry_monotonic));
+ r = -EBADMSG;
+ goto fail;
+ }
+
+ if (entry_realtime_set && entry_realtime != le64toh(f->header->tail_entry_realtime)) {
+ error(0,
+ "Invalid tail realtime timestamp (%"PRIu64" != %"PRIu64")",
+ entry_realtime,
+ le64toh(f->header->tail_entry_realtime));
+ r = -EBADMSG;
+ goto fail;
+ }
+
+ if (fflush(data_fp) != 0) {
+ r = log_error_errno(errno, "Failed to flush data file stream: %m");
+ goto fail;
+ }
+
+ if (fflush(entry_fp) != 0) {
+ r = log_error_errno(errno, "Failed to flush entry file stream: %m");
+ goto fail;
+ }
+
+ if (fflush(entry_array_fp) != 0) {
+ r = log_error_errno(errno, "Failed to flush entry array file stream: %m");
+ goto fail;
+ }
+
+ /* Second iteration: we follow all objects referenced from the
+ * two entry points: the object hash table and the entry
+ * array. We also check that everything referenced (directly
+ * or indirectly) in the data hash table also exists in the
+ * entry array, and vice versa. Note that we do not care for
+ * unreferenced objects. We only care that everything that is
+ * referenced is consistent. */
+
+ r = verify_entry_array(f,
+ cache_data_fd, n_data,
+ cache_entry_fd, n_entries,
+ cache_entry_array_fd, n_entry_arrays,
+ &last_usec,
+ show_progress);
+ if (r < 0)
+ goto fail;
+
+ r = verify_data_hash_table(f,
+ cache_data_fd, n_data,
+ cache_entry_fd, n_entries,
+ cache_entry_array_fd, n_entry_arrays,
+ &last_usec,
+ show_progress);
+ if (r < 0)
+ goto fail;
+
+ if (show_progress)
+ flush_progress();
+
+ mmap_cache_fd_free(cache_data_fd);
+ mmap_cache_fd_free(cache_entry_fd);
+ mmap_cache_fd_free(cache_entry_array_fd);
+
+ if (first_contained)
+ *first_contained = le64toh(f->header->head_entry_realtime);
+#if HAVE_GCRYPT
+ if (last_validated)
+ *last_validated = last_tag_realtime + f->fss_interval_usec;
+#endif
+ if (last_contained)
+ *last_contained = le64toh(f->header->tail_entry_realtime);
+
+ return 0;
+
+fail:
+ if (show_progress)
+ flush_progress();
+
+ log_error("File corruption detected at %s:%"PRIu64" (of %"PRIu64" bytes, %"PRIu64"%%).",
+ f->path,
+ p,
+ (uint64_t) f->last_stat.st_size,
+ 100U * p / (uint64_t) f->last_stat.st_size);
+
+ if (cache_data_fd)
+ mmap_cache_fd_free(cache_data_fd);
+
+ if (cache_entry_fd)
+ mmap_cache_fd_free(cache_entry_fd);
+
+ if (cache_entry_array_fd)
+ mmap_cache_fd_free(cache_entry_array_fd);
+
+ return r;
+}
diff --git a/src/libsystemd/sd-journal/journal-verify.h b/src/libsystemd/sd-journal/journal-verify.h
new file mode 100644
index 0000000..5790330
--- /dev/null
+++ b/src/libsystemd/sd-journal/journal-verify.h
@@ -0,0 +1,6 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+#pragma once
+
+#include "journal-file.h"
+
+int journal_file_verify(JournalFile *f, const char *key, usec_t *first_contained, usec_t *last_validated, usec_t *last_contained, bool show_progress);
diff --git a/src/libsystemd/sd-journal/lookup3.c b/src/libsystemd/sd-journal/lookup3.c
new file mode 100644
index 0000000..c2a6406
--- /dev/null
+++ b/src/libsystemd/sd-journal/lookup3.c
@@ -0,0 +1,1002 @@
+/* SPDX-License-Identifier: LicenseRef-lookup3-public-domain */
+/* Slightly modified by Lennart Poettering, to avoid name clashes, and
+ * unexport a few functions. */
+
+#include "lookup3.h"
+
+#if HAVE_VALGRIND_VALGRIND_H
+# include <valgrind/valgrind.h>
+#else
+# define RUNNING_ON_VALGRIND 0
+#endif
+
+/*
+-------------------------------------------------------------------------------
+lookup3.c, by Bob Jenkins, May 2006, Public Domain.
+
+These are functions for producing 32-bit hashes for hash table lookup.
+hashword(), hashlittle(), hashlittle2(), hashbig(), mix(), and final()
+are externally useful functions. Routines to test the hash are included
+if SELF_TEST is defined. You can use this free for any purpose. It's in
+the public domain. It has no warranty.
+
+You probably want to use hashlittle(). hashlittle() and hashbig()
+hash byte arrays. hashlittle() is faster than hashbig() on
+little-endian machines. Intel and AMD are little-endian machines.
+On second thought, you probably want hashlittle2(), which is identical to
+hashlittle() except it returns two 32-bit hashes for the price of one.
+You could implement hashbig2() if you wanted but I haven't bothered here.
+
+If you want to find a hash of, say, exactly 7 integers, do
+ a = i1; b = i2; c = i3;
+ mix(a,b,c);
+ a += i4; b += i5; c += i6;
+ mix(a,b,c);
+ a += i7;
+ final(a,b,c);
+then use c as the hash value. If you have a variable length array of
+4-byte integers to hash, use hashword(). If you have a byte array (like
+a character string), use hashlittle(). If you have several byte arrays, or
+a mix of things, see the comments above hashlittle().
+
+Why is this so big? I read 12 bytes at a time into 3 4-byte integers,
+then mix those integers. This is fast (you can do a lot more thorough
+mixing with 12*3 instructions on 3 integers than you can with 3 instructions
+on 1 byte), but shoehorning those bytes into integers efficiently is messy.
+-------------------------------------------------------------------------------
+*/
+/* #define SELF_TEST 1 */
+
+#include <stdint.h> /* defines uint32_t etc */
+#include <stdio.h> /* defines printf for tests */
+#include <sys/param.h> /* attempt to define endianness */
+#include <time.h> /* defines time_t for timings in the test */
+#ifdef linux
+# include <endian.h> /* attempt to define endianness */
+#endif
+
+#if __GNUC__ >= 7
+_Pragma("GCC diagnostic ignored \"-Wimplicit-fallthrough\"")
+#endif
+
+/*
+ * My best guess at if you are big-endian or little-endian. This may
+ * need adjustment.
+ */
+#if (defined(__BYTE_ORDER) && defined(__LITTLE_ENDIAN) && \
+ __BYTE_ORDER == __LITTLE_ENDIAN) || \
+ (defined(i386) || defined(__i386__) || defined(__i486__) || \
+ defined(__i586__) || defined(__i686__) || defined(vax) || defined(MIPSEL))
+# define HASH_LITTLE_ENDIAN 1
+# define HASH_BIG_ENDIAN 0
+#elif (defined(__BYTE_ORDER) && defined(__BIG_ENDIAN) && \
+ __BYTE_ORDER == __BIG_ENDIAN) || \
+ (defined(sparc) || defined(POWERPC) || defined(mc68000) || defined(sel))
+# define HASH_LITTLE_ENDIAN 0
+# define HASH_BIG_ENDIAN 1
+#else
+# define HASH_LITTLE_ENDIAN 0
+# define HASH_BIG_ENDIAN 0
+#endif
+
+#define hashsize(n) ((uint32_t)1<<(n))
+#define hashmask(n) (hashsize(n)-1)
+#define rot(x,k) (((x)<<(k)) | ((x)>>(32-(k))))
+
+/*
+-------------------------------------------------------------------------------
+mix -- mix 3 32-bit values reversibly.
+
+This is reversible, so any information in (a,b,c) before mix() is
+still in (a,b,c) after mix().
+
+If four pairs of (a,b,c) inputs are run through mix(), or through
+mix() in reverse, there are at least 32 bits of the output that
+are sometimes the same for one pair and different for another pair.
+This was tested for:
+* pairs that differed by one bit, by two bits, in any combination
+ of top bits of (a,b,c), or in any combination of bottom bits of
+ (a,b,c).
+* "differ" is defined as +, -, ^, or ~^. For + and -, I transformed
+ the output delta to a Gray code (a^(a>>1)) so a string of 1's (as
+ is commonly produced by subtraction) look like a single 1-bit
+ difference.
+* the base values were pseudorandom, all zero but one bit set, or
+ all zero plus a counter that starts at zero.
+
+Some k values for my "a-=c; a^=rot(c,k); c+=b;" arrangement that
+satisfy this are
+ 4 6 8 16 19 4
+ 9 15 3 18 27 15
+ 14 9 3 7 17 3
+Well, "9 15 3 18 27 15" didn't quite get 32 bits diffing
+for "differ" defined as + with a one-bit base and a two-bit delta. I
+used http://burtleburtle.net/bob/hash/avalanche.html to choose
+the operations, constants, and arrangements of the variables.
+
+This does not achieve avalanche. There are input bits of (a,b,c)
+that fail to affect some output bits of (a,b,c), especially of a. The
+most thoroughly mixed value is c, but it doesn't really even achieve
+avalanche in c.
+
+This allows some parallelism. Read-after-writes are good at doubling
+the number of bits affected, so the goal of mixing pulls in the opposite
+direction as the goal of parallelism. I did what I could. Rotates
+seem to cost as much as shifts on every machine I could lay my hands
+on, and rotates are much kinder to the top and bottom bits, so I used
+rotates.
+-------------------------------------------------------------------------------
+*/
+#define mix(a,b,c) \
+{ \
+ a -= c; a ^= rot(c, 4); c += b; \
+ b -= a; b ^= rot(a, 6); a += c; \
+ c -= b; c ^= rot(b, 8); b += a; \
+ a -= c; a ^= rot(c,16); c += b; \
+ b -= a; b ^= rot(a,19); a += c; \
+ c -= b; c ^= rot(b, 4); b += a; \
+}
+
+/*
+-------------------------------------------------------------------------------
+final -- final mixing of 3 32-bit values (a,b,c) into c
+
+Pairs of (a,b,c) values differing in only a few bits will usually
+produce values of c that look totally different. This was tested for
+* pairs that differed by one bit, by two bits, in any combination
+ of top bits of (a,b,c), or in any combination of bottom bits of
+ (a,b,c).
+* "differ" is defined as +, -, ^, or ~^. For + and -, I transformed
+ the output delta to a Gray code (a^(a>>1)) so a string of 1's (as
+ is commonly produced by subtraction) look like a single 1-bit
+ difference.
+* the base values were pseudorandom, all zero but one bit set, or
+ all zero plus a counter that starts at zero.
+
+These constants passed:
+ 14 11 25 16 4 14 24
+ 12 14 25 16 4 14 24
+and these came close:
+ 4 8 15 26 3 22 24
+ 10 8 15 26 3 22 24
+ 11 8 15 26 3 22 24
+-------------------------------------------------------------------------------
+*/
+#define final(a,b,c) \
+{ \
+ c ^= b; c -= rot(b,14); \
+ a ^= c; a -= rot(c,11); \
+ b ^= a; b -= rot(a,25); \
+ c ^= b; c -= rot(b,16); \
+ a ^= c; a -= rot(c,4); \
+ b ^= a; b -= rot(a,14); \
+ c ^= b; c -= rot(b,24); \
+}
+
+/*
+--------------------------------------------------------------------
+ This works on all machines. To be useful, it requires
+ -- that the key be an array of uint32_t's, and
+ -- that the length be the number of uint32_t's in the key
+
+ The function hashword() is identical to hashlittle() on little-endian
+ machines, and identical to hashbig() on big-endian machines,
+ except that the length has to be measured in uint32_ts rather than in
+ bytes. hashlittle() is more complicated than hashword() only because
+ hashlittle() has to dance around fitting the key bytes into registers.
+--------------------------------------------------------------------
+*/
+uint32_t jenkins_hashword(
+const uint32_t *k, /* the key, an array of uint32_t values */
+size_t length, /* the length of the key, in uint32_ts */
+uint32_t initval) /* the previous hash, or an arbitrary value */
+{
+ uint32_t a,b,c;
+
+ /* Set up the internal state */
+ a = b = c = 0xdeadbeef + (((uint32_t)length)<<2) + initval;
+
+ /*------------------------------------------------- handle most of the key */
+ while (length > 3)
+ {
+ a += k[0];
+ b += k[1];
+ c += k[2];
+ mix(a,b,c);
+ length -= 3;
+ k += 3;
+ }
+
+ /*------------------------------------------- handle the last 3 uint32_t's */
+ switch(length) /* all the case statements fall through */
+ {
+ case 3 : c+=k[2];
+ case 2 : b+=k[1];
+ case 1 : a+=k[0];
+ final(a,b,c);
+ case 0: /* case 0: nothing left to add */
+ break;
+ }
+ /*------------------------------------------------------ report the result */
+ return c;
+}
+
+/*
+--------------------------------------------------------------------
+hashword2() -- same as hashword(), but take two seeds and return two
+32-bit values. pc and pb must both be nonnull, and *pc and *pb must
+both be initialized with seeds. If you pass in (*pb)==0, the output
+(*pc) will be the same as the return value from hashword().
+--------------------------------------------------------------------
+*/
+void jenkins_hashword2 (
+const uint32_t *k, /* the key, an array of uint32_t values */
+size_t length, /* the length of the key, in uint32_ts */
+uint32_t *pc, /* IN: seed OUT: primary hash value */
+uint32_t *pb) /* IN: more seed OUT: secondary hash value */
+{
+ uint32_t a,b,c;
+
+ /* Set up the internal state */
+ a = b = c = 0xdeadbeef + ((uint32_t)(length<<2)) + *pc;
+ c += *pb;
+
+ /*------------------------------------------------- handle most of the key */
+ while (length > 3)
+ {
+ a += k[0];
+ b += k[1];
+ c += k[2];
+ mix(a,b,c);
+ length -= 3;
+ k += 3;
+ }
+
+ /*------------------------------------------- handle the last 3 uint32_t's */
+ switch(length) /* all the case statements fall through */
+ {
+ case 3 : c+=k[2];
+ case 2 : b+=k[1];
+ case 1 : a+=k[0];
+ final(a,b,c);
+ case 0: /* case 0: nothing left to add */
+ break;
+ }
+ /*------------------------------------------------------ report the result */
+ *pc=c; *pb=b;
+}
+
+/*
+-------------------------------------------------------------------------------
+hashlittle() -- hash a variable-length key into a 32-bit value
+ k : the key (the unaligned variable-length array of bytes)
+ length : the length of the key, counting by bytes
+ initval : can be any 4-byte value
+Returns a 32-bit value. Every bit of the key affects every bit of
+the return value. Two keys differing by one or two bits will have
+totally different hash values.
+
+The best hash table sizes are powers of 2. There is no need to do
+mod a prime (mod is sooo slow!). If you need less than 32 bits,
+use a bitmask. For example, if you need only 10 bits, do
+ h = (h & hashmask(10));
+In which case, the hash table should have hashsize(10) elements.
+
+If you are hashing n strings (uint8_t **)k, do it like this:
+ for (i=0, h=0; i<n; ++i) h = hashlittle( k[i], len[i], h);
+
+By Bob Jenkins, 2006. bob_jenkins@burtleburtle.net. You may use this
+code any way you wish, private, educational, or commercial. It's free.
+
+Use for hash table lookup, or anything where one collision in 2^^32 is
+acceptable. Do NOT use for cryptographic purposes.
+-------------------------------------------------------------------------------
+*/
+
+uint32_t jenkins_hashlittle( const void *key, size_t length, uint32_t initval)
+{
+ uint32_t a,b,c; /* internal state */
+ union { const void *ptr; size_t i; } u; /* needed for Mac Powerbook G4 */
+
+ /* Set up the internal state */
+ a = b = c = 0xdeadbeef + ((uint32_t)length) + initval;
+
+ u.ptr = key;
+ if (HASH_LITTLE_ENDIAN && ((u.i & 0x3) == 0)) {
+ const uint32_t *k = (const uint32_t *)key; /* read 32-bit chunks */
+
+ /*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */
+ while (length > 12)
+ {
+ a += k[0];
+ b += k[1];
+ c += k[2];
+ mix(a,b,c);
+ length -= 12;
+ k += 3;
+ }
+
+ /*----------------------------- handle the last (probably partial) block */
+ /*
+ * "k[2]&0xffffff" actually reads beyond the end of the string, but
+ * then masks off the part it's not allowed to read. Because the
+ * string is aligned, the masked-off tail is in the same word as the
+ * rest of the string. Every machine with memory protection I've seen
+ * does it on word boundaries, so is OK with this. But valgrind will
+ * still catch it and complain. The masking trick does make the hash
+ * noticeably faster for short strings (like English words).
+ */
+#define VALGRIND_LIKE (_unlikely_(HAS_FEATURE_ADDRESS_SANITIZER || \
+ HAS_FEATURE_MEMORY_SANITIZER || \
+ RUNNING_ON_VALGRIND))
+
+ if (!VALGRIND_LIKE) {
+ switch(length)
+ {
+ case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
+ case 11: c+=k[2]&0xffffff; b+=k[1]; a+=k[0]; break;
+ case 10: c+=k[2]&0xffff; b+=k[1]; a+=k[0]; break;
+ case 9 : c+=k[2]&0xff; b+=k[1]; a+=k[0]; break;
+ case 8 : b+=k[1]; a+=k[0]; break;
+ case 7 : b+=k[1]&0xffffff; a+=k[0]; break;
+ case 6 : b+=k[1]&0xffff; a+=k[0]; break;
+ case 5 : b+=k[1]&0xff; a+=k[0]; break;
+ case 4 : a+=k[0]; break;
+ case 3 : a+=k[0]&0xffffff; break;
+ case 2 : a+=k[0]&0xffff; break;
+ case 1 : a+=k[0]&0xff; break;
+ case 0 : return c; /* zero length strings require no mixing */
+ }
+ } else {
+ const uint8_t *k8 = (const uint8_t *) k;
+
+ switch(length)
+ {
+ case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
+ case 11: c+=((uint32_t)k8[10])<<16; /* fall through */
+ case 10: c+=((uint32_t)k8[9])<<8; /* fall through */
+ case 9 : c+=k8[8]; /* fall through */
+ case 8 : b+=k[1]; a+=k[0]; break;
+ case 7 : b+=((uint32_t)k8[6])<<16; /* fall through */
+ case 6 : b+=((uint32_t)k8[5])<<8; /* fall through */
+ case 5 : b+=k8[4]; /* fall through */
+ case 4 : a+=k[0]; break;
+ case 3 : a+=((uint32_t)k8[2])<<16; /* fall through */
+ case 2 : a+=((uint32_t)k8[1])<<8; /* fall through */
+ case 1 : a+=k8[0]; break;
+ case 0 : return c;
+ }
+ }
+
+ } else if (HASH_LITTLE_ENDIAN && ((u.i & 0x1) == 0)) {
+ const uint16_t *k = (const uint16_t *)key; /* read 16-bit chunks */
+ const uint8_t *k8;
+
+ /*--------------- all but last block: aligned reads and different mixing */
+ while (length > 12)
+ {
+ a += k[0] + (((uint32_t)k[1])<<16);
+ b += k[2] + (((uint32_t)k[3])<<16);
+ c += k[4] + (((uint32_t)k[5])<<16);
+ mix(a,b,c);
+ length -= 12;
+ k += 6;
+ }
+
+ /*----------------------------- handle the last (probably partial) block */
+ k8 = (const uint8_t *)k;
+ switch(length)
+ {
+ case 12: c+=k[4]+(((uint32_t)k[5])<<16);
+ b+=k[2]+(((uint32_t)k[3])<<16);
+ a+=k[0]+(((uint32_t)k[1])<<16);
+ break;
+ case 11: c+=((uint32_t)k8[10])<<16; /* fall through */
+ case 10: c+=k[4];
+ b+=k[2]+(((uint32_t)k[3])<<16);
+ a+=k[0]+(((uint32_t)k[1])<<16);
+ break;
+ case 9 : c+=k8[8]; /* fall through */
+ case 8 : b+=k[2]+(((uint32_t)k[3])<<16);
+ a+=k[0]+(((uint32_t)k[1])<<16);
+ break;
+ case 7 : b+=((uint32_t)k8[6])<<16; /* fall through */
+ case 6 : b+=k[2];
+ a+=k[0]+(((uint32_t)k[1])<<16);
+ break;
+ case 5 : b+=k8[4]; /* fall through */
+ case 4 : a+=k[0]+(((uint32_t)k[1])<<16);
+ break;
+ case 3 : a+=((uint32_t)k8[2])<<16; /* fall through */
+ case 2 : a+=k[0];
+ break;
+ case 1 : a+=k8[0];
+ break;
+ case 0 : return c; /* zero length requires no mixing */
+ }
+
+ } else { /* need to read the key one byte at a time */
+ const uint8_t *k = (const uint8_t *)key;
+
+ /*--------------- all but the last block: affect some 32 bits of (a,b,c) */
+ while (length > 12)
+ {
+ a += k[0];
+ a += ((uint32_t)k[1])<<8;
+ a += ((uint32_t)k[2])<<16;
+ a += ((uint32_t)k[3])<<24;
+ b += k[4];
+ b += ((uint32_t)k[5])<<8;
+ b += ((uint32_t)k[6])<<16;
+ b += ((uint32_t)k[7])<<24;
+ c += k[8];
+ c += ((uint32_t)k[9])<<8;
+ c += ((uint32_t)k[10])<<16;
+ c += ((uint32_t)k[11])<<24;
+ mix(a,b,c);
+ length -= 12;
+ k += 12;
+ }
+
+ /*-------------------------------- last block: affect all 32 bits of (c) */
+ switch(length) /* all the case statements fall through */
+ {
+ case 12: c+=((uint32_t)k[11])<<24;
+ case 11: c+=((uint32_t)k[10])<<16;
+ case 10: c+=((uint32_t)k[9])<<8;
+ case 9 : c+=k[8];
+ case 8 : b+=((uint32_t)k[7])<<24;
+ case 7 : b+=((uint32_t)k[6])<<16;
+ case 6 : b+=((uint32_t)k[5])<<8;
+ case 5 : b+=k[4];
+ case 4 : a+=((uint32_t)k[3])<<24;
+ case 3 : a+=((uint32_t)k[2])<<16;
+ case 2 : a+=((uint32_t)k[1])<<8;
+ case 1 : a+=k[0];
+ break;
+ case 0 : return c;
+ }
+ }
+
+ final(a,b,c);
+ return c;
+}
+
+/*
+ * hashlittle2: return 2 32-bit hash values
+ *
+ * This is identical to hashlittle(), except it returns two 32-bit hash
+ * values instead of just one. This is good enough for hash table
+ * lookup with 2^^64 buckets, or if you want a second hash if you're not
+ * happy with the first, or if you want a probably-unique 64-bit ID for
+ * the key. *pc is better mixed than *pb, so use *pc first. If you want
+ * a 64-bit value do something like "*pc + (((uint64_t)*pb)<<32)".
+ */
+void jenkins_hashlittle2(
+ const void *key, /* the key to hash */
+ size_t length, /* length of the key */
+ uint32_t *pc, /* IN: primary initval, OUT: primary hash */
+ uint32_t *pb) /* IN: secondary initval, OUT: secondary hash */
+{
+ uint32_t a,b,c; /* internal state */
+ union { const void *ptr; size_t i; } u; /* needed for Mac Powerbook G4 */
+
+ /* Set up the internal state */
+ a = b = c = 0xdeadbeef + ((uint32_t)length) + *pc;
+ c += *pb;
+
+ u.ptr = key;
+ if (HASH_LITTLE_ENDIAN && ((u.i & 0x3) == 0)) {
+ const uint32_t *k = (const uint32_t *)key; /* read 32-bit chunks */
+
+ /*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */
+ while (length > 12)
+ {
+ a += k[0];
+ b += k[1];
+ c += k[2];
+ mix(a,b,c);
+ length -= 12;
+ k += 3;
+ }
+
+ /*----------------------------- handle the last (probably partial) block */
+ /*
+ * "k[2]&0xffffff" actually reads beyond the end of the string, but
+ * then masks off the part it's not allowed to read. Because the
+ * string is aligned, the masked-off tail is in the same word as the
+ * rest of the string. Every machine with memory protection I've seen
+ * does it on word boundaries, so is OK with this. But valgrind will
+ * still catch it and complain. The masking trick does make the hash
+ * noticeably faster for short strings (like English words).
+ */
+ if (!VALGRIND_LIKE) {
+ switch(length)
+ {
+ case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
+ case 11: c+=k[2]&0xffffff; b+=k[1]; a+=k[0]; break;
+ case 10: c+=k[2]&0xffff; b+=k[1]; a+=k[0]; break;
+ case 9 : c+=k[2]&0xff; b+=k[1]; a+=k[0]; break;
+ case 8 : b+=k[1]; a+=k[0]; break;
+ case 7 : b+=k[1]&0xffffff; a+=k[0]; break;
+ case 6 : b+=k[1]&0xffff; a+=k[0]; break;
+ case 5 : b+=k[1]&0xff; a+=k[0]; break;
+ case 4 : a+=k[0]; break;
+ case 3 : a+=k[0]&0xffffff; break;
+ case 2 : a+=k[0]&0xffff; break;
+ case 1 : a+=k[0]&0xff; break;
+ case 0 : *pc=c; *pb=b; return; /* zero length strings require no mixing */
+ }
+ } else {
+ const uint8_t *k8 = (const uint8_t *)k;
+
+ switch(length)
+ {
+ case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
+ case 11: c+=((uint32_t)k8[10])<<16; /* fall through */
+ case 10: c+=((uint32_t)k8[9])<<8; /* fall through */
+ case 9 : c+=k8[8]; /* fall through */
+ case 8 : b+=k[1]; a+=k[0]; break;
+ case 7 : b+=((uint32_t)k8[6])<<16; /* fall through */
+ case 6 : b+=((uint32_t)k8[5])<<8; /* fall through */
+ case 5 : b+=k8[4]; /* fall through */
+ case 4 : a+=k[0]; break;
+ case 3 : a+=((uint32_t)k8[2])<<16; /* fall through */
+ case 2 : a+=((uint32_t)k8[1])<<8; /* fall through */
+ case 1 : a+=k8[0]; break;
+ case 0 : *pc=c; *pb=b; return; /* zero length strings require no mixing */
+ }
+ }
+
+ } else if (HASH_LITTLE_ENDIAN && ((u.i & 0x1) == 0)) {
+ const uint16_t *k = (const uint16_t *)key; /* read 16-bit chunks */
+ const uint8_t *k8;
+
+ /*--------------- all but last block: aligned reads and different mixing */
+ while (length > 12)
+ {
+ a += k[0] + (((uint32_t)k[1])<<16);
+ b += k[2] + (((uint32_t)k[3])<<16);
+ c += k[4] + (((uint32_t)k[5])<<16);
+ mix(a,b,c);
+ length -= 12;
+ k += 6;
+ }
+
+ /*----------------------------- handle the last (probably partial) block */
+ k8 = (const uint8_t *)k;
+ switch(length)
+ {
+ case 12: c+=k[4]+(((uint32_t)k[5])<<16);
+ b+=k[2]+(((uint32_t)k[3])<<16);
+ a+=k[0]+(((uint32_t)k[1])<<16);
+ break;
+ case 11: c+=((uint32_t)k8[10])<<16; /* fall through */
+ case 10: c+=k[4];
+ b+=k[2]+(((uint32_t)k[3])<<16);
+ a+=k[0]+(((uint32_t)k[1])<<16);
+ break;
+ case 9 : c+=k8[8]; /* fall through */
+ case 8 : b+=k[2]+(((uint32_t)k[3])<<16);
+ a+=k[0]+(((uint32_t)k[1])<<16);
+ break;
+ case 7 : b+=((uint32_t)k8[6])<<16; /* fall through */
+ case 6 : b+=k[2];
+ a+=k[0]+(((uint32_t)k[1])<<16);
+ break;
+ case 5 : b+=k8[4]; /* fall through */
+ case 4 : a+=k[0]+(((uint32_t)k[1])<<16);
+ break;
+ case 3 : a+=((uint32_t)k8[2])<<16; /* fall through */
+ case 2 : a+=k[0];
+ break;
+ case 1 : a+=k8[0];
+ break;
+ case 0 : *pc=c; *pb=b; return; /* zero length strings require no mixing */
+ }
+
+ } else { /* need to read the key one byte at a time */
+ const uint8_t *k = (const uint8_t *)key;
+
+ /*--------------- all but the last block: affect some 32 bits of (a,b,c) */
+ while (length > 12)
+ {
+ a += k[0];
+ a += ((uint32_t)k[1])<<8;
+ a += ((uint32_t)k[2])<<16;
+ a += ((uint32_t)k[3])<<24;
+ b += k[4];
+ b += ((uint32_t)k[5])<<8;
+ b += ((uint32_t)k[6])<<16;
+ b += ((uint32_t)k[7])<<24;
+ c += k[8];
+ c += ((uint32_t)k[9])<<8;
+ c += ((uint32_t)k[10])<<16;
+ c += ((uint32_t)k[11])<<24;
+ mix(a,b,c);
+ length -= 12;
+ k += 12;
+ }
+
+ /*-------------------------------- last block: affect all 32 bits of (c) */
+ switch(length) /* all the case statements fall through */
+ {
+ case 12: c+=((uint32_t)k[11])<<24;
+ case 11: c+=((uint32_t)k[10])<<16;
+ case 10: c+=((uint32_t)k[9])<<8;
+ case 9 : c+=k[8];
+ case 8 : b+=((uint32_t)k[7])<<24;
+ case 7 : b+=((uint32_t)k[6])<<16;
+ case 6 : b+=((uint32_t)k[5])<<8;
+ case 5 : b+=k[4];
+ case 4 : a+=((uint32_t)k[3])<<24;
+ case 3 : a+=((uint32_t)k[2])<<16;
+ case 2 : a+=((uint32_t)k[1])<<8;
+ case 1 : a+=k[0];
+ break;
+ case 0 : *pc=c; *pb=b; return; /* zero length strings require no mixing */
+ }
+ }
+
+ final(a,b,c);
+ *pc=c; *pb=b;
+}
+
+/*
+ * hashbig():
+ * This is the same as hashword() on big-endian machines. It is different
+ * from hashlittle() on all machines. hashbig() takes advantage of
+ * big-endian byte ordering.
+ */
+uint32_t jenkins_hashbig( const void *key, size_t length, uint32_t initval)
+{
+ uint32_t a,b,c;
+ union { const void *ptr; size_t i; } u; /* to cast key to (size_t) happily */
+
+ /* Set up the internal state */
+ a = b = c = 0xdeadbeef + ((uint32_t)length) + initval;
+
+ u.ptr = key;
+ if (HASH_BIG_ENDIAN && ((u.i & 0x3) == 0)) {
+ const uint32_t *k = (const uint32_t *)key; /* read 32-bit chunks */
+
+ /*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */
+ while (length > 12)
+ {
+ a += k[0];
+ b += k[1];
+ c += k[2];
+ mix(a,b,c);
+ length -= 12;
+ k += 3;
+ }
+
+ /*----------------------------- handle the last (probably partial) block */
+ /*
+ * "k[2]<<8" actually reads beyond the end of the string, but
+ * then shifts out the part it's not allowed to read. Because the
+ * string is aligned, the illegal read is in the same word as the
+ * rest of the string. Every machine with memory protection I've seen
+ * does it on word boundaries, so is OK with this. But valgrind will
+ * still catch it and complain. The masking trick does make the hash
+ * noticeably faster for short strings (like English words).
+ */
+
+ if (!VALGRIND_LIKE) {
+ switch(length)
+ {
+ case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
+ case 11: c+=k[2]&0xffffff00; b+=k[1]; a+=k[0]; break;
+ case 10: c+=k[2]&0xffff0000; b+=k[1]; a+=k[0]; break;
+ case 9 : c+=k[2]&0xff000000; b+=k[1]; a+=k[0]; break;
+ case 8 : b+=k[1]; a+=k[0]; break;
+ case 7 : b+=k[1]&0xffffff00; a+=k[0]; break;
+ case 6 : b+=k[1]&0xffff0000; a+=k[0]; break;
+ case 5 : b+=k[1]&0xff000000; a+=k[0]; break;
+ case 4 : a+=k[0]; break;
+ case 3 : a+=k[0]&0xffffff00; break;
+ case 2 : a+=k[0]&0xffff0000; break;
+ case 1 : a+=k[0]&0xff000000; break;
+ case 0 : return c; /* zero length strings require no mixing */
+ }
+ } else {
+ const uint8_t *k8 = (const uint8_t *)k;
+
+ switch(length) /* all the case statements fall through */
+ {
+ case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
+ case 11: c+=((uint32_t)k8[10])<<8; /* fall through */
+ case 10: c+=((uint32_t)k8[9])<<16; /* fall through */
+ case 9 : c+=((uint32_t)k8[8])<<24; /* fall through */
+ case 8 : b+=k[1]; a+=k[0]; break;
+ case 7 : b+=((uint32_t)k8[6])<<8; /* fall through */
+ case 6 : b+=((uint32_t)k8[5])<<16; /* fall through */
+ case 5 : b+=((uint32_t)k8[4])<<24; /* fall through */
+ case 4 : a+=k[0]; break;
+ case 3 : a+=((uint32_t)k8[2])<<8; /* fall through */
+ case 2 : a+=((uint32_t)k8[1])<<16; /* fall through */
+ case 1 : a+=((uint32_t)k8[0])<<24; break;
+ case 0 : return c;
+ }
+ }
+
+ } else { /* need to read the key one byte at a time */
+ const uint8_t *k = (const uint8_t *)key;
+
+ /*--------------- all but the last block: affect some 32 bits of (a,b,c) */
+ while (length > 12)
+ {
+ a += ((uint32_t)k[0])<<24;
+ a += ((uint32_t)k[1])<<16;
+ a += ((uint32_t)k[2])<<8;
+ a += ((uint32_t)k[3]);
+ b += ((uint32_t)k[4])<<24;
+ b += ((uint32_t)k[5])<<16;
+ b += ((uint32_t)k[6])<<8;
+ b += ((uint32_t)k[7]);
+ c += ((uint32_t)k[8])<<24;
+ c += ((uint32_t)k[9])<<16;
+ c += ((uint32_t)k[10])<<8;
+ c += ((uint32_t)k[11]);
+ mix(a,b,c);
+ length -= 12;
+ k += 12;
+ }
+
+ /*-------------------------------- last block: affect all 32 bits of (c) */
+ switch(length) /* all the case statements fall through */
+ {
+ case 12: c+=k[11];
+ case 11: c+=((uint32_t)k[10])<<8;
+ case 10: c+=((uint32_t)k[9])<<16;
+ case 9 : c+=((uint32_t)k[8])<<24;
+ case 8 : b+=k[7];
+ case 7 : b+=((uint32_t)k[6])<<8;
+ case 6 : b+=((uint32_t)k[5])<<16;
+ case 5 : b+=((uint32_t)k[4])<<24;
+ case 4 : a+=k[3];
+ case 3 : a+=((uint32_t)k[2])<<8;
+ case 2 : a+=((uint32_t)k[1])<<16;
+ case 1 : a+=((uint32_t)k[0])<<24;
+ break;
+ case 0 : return c;
+ }
+ }
+
+ final(a,b,c);
+ return c;
+}
+
+#ifdef SELF_TEST
+
+/* used for timings */
+void driver1()
+{
+ uint8_t buf[256];
+ uint32_t i;
+ uint32_t h=0;
+ time_t a,z;
+
+ time(&a);
+ for (i=0; i<256; ++i) buf[i] = 'x';
+ for (i=0; i<1; ++i)
+ {
+ h = hashlittle(&buf[0],1,h);
+ }
+ time(&z);
+ if (z-a > 0) printf("time %d %.8x\n", z-a, h);
+}
+
+/* check that every input bit changes every output bit half the time */
+#define HASHSTATE 1
+#define HASHLEN 1
+#define MAXPAIR 60
+#define MAXLEN 70
+void driver2()
+{
+ uint8_t qa[MAXLEN+1], qb[MAXLEN+2], *a = &qa[0], *b = &qb[1];
+ uint32_t c[HASHSTATE], d[HASHSTATE], i=0, j=0, k, l, m=0, z;
+ uint32_t e[HASHSTATE],f[HASHSTATE],g[HASHSTATE],h[HASHSTATE];
+ uint32_t x[HASHSTATE],y[HASHSTATE];
+ uint32_t hlen;
+
+ printf("No more than %d trials should ever be needed \n",MAXPAIR/2);
+ for (hlen=0; hlen < MAXLEN; ++hlen)
+ {
+ z=0;
+ for (i=0; i<hlen; ++i) /*----------------------- for each input byte, */
+ {
+ for (j=0; j<8; ++j) /*------------------------ for each input bit, */
+ {
+ for (m=1; m<8; ++m) /*------------- for several possible initvals, */
+ {
+ for (l=0; l<HASHSTATE; ++l)
+ e[l]=f[l]=g[l]=h[l]=x[l]=y[l]=~((uint32_t)0);
+
+ /*---- check that every output bit is affected by that input bit */
+ for (k=0; k<MAXPAIR; k+=2)
+ {
+ uint32_t finished=1;
+ /* keys have one bit different */
+ for (l=0; l<hlen+1; ++l) {a[l] = b[l] = (uint8_t)0;}
+ /* have a and b be two keys differing in only one bit */
+ a[i] ^= (k<<j);
+ a[i] ^= (k>>(8-j));
+ c[0] = hashlittle(a, hlen, m);
+ b[i] ^= ((k+1)<<j);
+ b[i] ^= ((k+1)>>(8-j));
+ d[0] = hashlittle(b, hlen, m);
+ /* check every bit is 1, 0, set, and not set at least once */
+ for (l=0; l<HASHSTATE; ++l)
+ {
+ e[l] &= (c[l]^d[l]);
+ f[l] &= ~(c[l]^d[l]);
+ g[l] &= c[l];
+ h[l] &= ~c[l];
+ x[l] &= d[l];
+ y[l] &= ~d[l];
+ if (e[l]|f[l]|g[l]|h[l]|x[l]|y[l]) finished=0;
+ }
+ if (finished) break;
+ }
+ if (k>z) z=k;
+ if (k==MAXPAIR)
+ {
+ printf("Some bit didn't change: ");
+ printf("%.8x %.8x %.8x %.8x %.8x %.8x ",
+ e[0],f[0],g[0],h[0],x[0],y[0]);
+ printf("i %d j %d m %d len %d\n", i, j, m, hlen);
+ }
+ if (z==MAXPAIR) goto done;
+ }
+ }
+ }
+ done:
+ if (z < MAXPAIR)
+ {
+ printf("Mix success %2d bytes %2d initvals ",i,m);
+ printf("required %d trials\n", z/2);
+ }
+ }
+ printf("\n");
+}
+
+/* Check for reading beyond the end of the buffer and alignment problems */
+void driver3()
+{
+ uint8_t buf[MAXLEN+20], *b;
+ uint32_t len;
+ uint8_t q[] = "This is the time for all good men to come to the aid of their country...";
+ uint32_t h;
+ uint8_t qq[] = "xThis is the time for all good men to come to the aid of their country...";
+ uint32_t i;
+ uint8_t qqq[] = "xxThis is the time for all good men to come to the aid of their country...";
+ uint32_t j;
+ uint8_t qqqq[] = "xxxThis is the time for all good men to come to the aid of their country...";
+ uint32_t ref,x,y;
+ uint8_t *p;
+
+ printf("Endianness. These lines should all be the same (for values filled in):\n");
+ printf("%.8x %.8x %.8x\n",
+ hashword((const uint32_t *)q, (sizeof(q)-1)/4, 13),
+ hashword((const uint32_t *)q, (sizeof(q)-5)/4, 13),
+ hashword((const uint32_t *)q, (sizeof(q)-9)/4, 13));
+ p = q;
+ printf("%.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x\n",
+ hashlittle(p, sizeof(q)-1, 13), hashlittle(p, sizeof(q)-2, 13),
+ hashlittle(p, sizeof(q)-3, 13), hashlittle(p, sizeof(q)-4, 13),
+ hashlittle(p, sizeof(q)-5, 13), hashlittle(p, sizeof(q)-6, 13),
+ hashlittle(p, sizeof(q)-7, 13), hashlittle(p, sizeof(q)-8, 13),
+ hashlittle(p, sizeof(q)-9, 13), hashlittle(p, sizeof(q)-10, 13),
+ hashlittle(p, sizeof(q)-11, 13), hashlittle(p, sizeof(q)-12, 13));
+ p = &qq[1];
+ printf("%.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x\n",
+ hashlittle(p, sizeof(q)-1, 13), hashlittle(p, sizeof(q)-2, 13),
+ hashlittle(p, sizeof(q)-3, 13), hashlittle(p, sizeof(q)-4, 13),
+ hashlittle(p, sizeof(q)-5, 13), hashlittle(p, sizeof(q)-6, 13),
+ hashlittle(p, sizeof(q)-7, 13), hashlittle(p, sizeof(q)-8, 13),
+ hashlittle(p, sizeof(q)-9, 13), hashlittle(p, sizeof(q)-10, 13),
+ hashlittle(p, sizeof(q)-11, 13), hashlittle(p, sizeof(q)-12, 13));
+ p = &qqq[2];
+ printf("%.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x\n",
+ hashlittle(p, sizeof(q)-1, 13), hashlittle(p, sizeof(q)-2, 13),
+ hashlittle(p, sizeof(q)-3, 13), hashlittle(p, sizeof(q)-4, 13),
+ hashlittle(p, sizeof(q)-5, 13), hashlittle(p, sizeof(q)-6, 13),
+ hashlittle(p, sizeof(q)-7, 13), hashlittle(p, sizeof(q)-8, 13),
+ hashlittle(p, sizeof(q)-9, 13), hashlittle(p, sizeof(q)-10, 13),
+ hashlittle(p, sizeof(q)-11, 13), hashlittle(p, sizeof(q)-12, 13));
+ p = &qqqq[3];
+ printf("%.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x\n",
+ hashlittle(p, sizeof(q)-1, 13), hashlittle(p, sizeof(q)-2, 13),
+ hashlittle(p, sizeof(q)-3, 13), hashlittle(p, sizeof(q)-4, 13),
+ hashlittle(p, sizeof(q)-5, 13), hashlittle(p, sizeof(q)-6, 13),
+ hashlittle(p, sizeof(q)-7, 13), hashlittle(p, sizeof(q)-8, 13),
+ hashlittle(p, sizeof(q)-9, 13), hashlittle(p, sizeof(q)-10, 13),
+ hashlittle(p, sizeof(q)-11, 13), hashlittle(p, sizeof(q)-12, 13));
+ printf("\n");
+
+ /* check that hashlittle2 and hashlittle produce the same results */
+ i=47; j=0;
+ hashlittle2(q, sizeof(q), &i, &j);
+ if (hashlittle(q, sizeof(q), 47) != i)
+ printf("hashlittle2 and hashlittle mismatch\n");
+
+ /* check that hashword2 and hashword produce the same results */
+ len = 0xdeadbeef;
+ i=47, j=0;
+ hashword2(&len, 1, &i, &j);
+ if (hashword(&len, 1, 47) != i)
+ printf("hashword2 and hashword mismatch %x %x\n",
+ i, hashword(&len, 1, 47));
+
+ /* check hashlittle doesn't read before or after the ends of the string */
+ for (h=0, b=buf+1; h<8; ++h, ++b)
+ {
+ for (i=0; i<MAXLEN; ++i)
+ {
+ len = i;
+ for (j=0; j<i; ++j) *(b+j)=0;
+
+ /* these should all be equal */
+ ref = hashlittle(b, len, (uint32_t)1);
+ *(b+i)=(uint8_t)~0;
+ *(b-1)=(uint8_t)~0;
+ x = hashlittle(b, len, (uint32_t)1);
+ y = hashlittle(b, len, (uint32_t)1);
+ if ((ref != x) || (ref != y))
+ {
+ printf("alignment error: %.8x %.8x %.8x %d %d\n",ref,x,y,
+ h, i);
+ }
+ }
+ }
+}
+
+/* check for problems with nulls */
+ void driver4()
+{
+ uint8_t buf[1];
+ uint32_t h,i,state[HASHSTATE];
+
+ buf[0] = ~0;
+ for (i=0; i<HASHSTATE; ++i) state[i] = 1;
+ printf("These should all be different\n");
+ for (i=0, h=0; i<8; ++i)
+ {
+ h = hashlittle(buf, 0, h);
+ printf("%2ld 0-byte strings, hash is %.8x\n", i, h);
+ }
+}
+
+void driver5()
+{
+ uint32_t b,c;
+ b=0, c=0, hashlittle2("", 0, &c, &b);
+ printf("hash is %.8lx %.8lx\n", c, b); /* deadbeef deadbeef */
+ b=0xdeadbeef, c=0, hashlittle2("", 0, &c, &b);
+ printf("hash is %.8lx %.8lx\n", c, b); /* bd5b7dde deadbeef */
+ b=0xdeadbeef, c=0xdeadbeef, hashlittle2("", 0, &c, &b);
+ printf("hash is %.8lx %.8lx\n", c, b); /* 9c093ccd bd5b7dde */
+ b=0, c=0, hashlittle2("Four score and seven years ago", 30, &c, &b);
+ printf("hash is %.8lx %.8lx\n", c, b); /* 17770551 ce7226e6 */
+ b=1, c=0, hashlittle2("Four score and seven years ago", 30, &c, &b);
+ printf("hash is %.8lx %.8lx\n", c, b); /* e3607cae bd371de4 */
+ b=0, c=1, hashlittle2("Four score and seven years ago", 30, &c, &b);
+ printf("hash is %.8lx %.8lx\n", c, b); /* cd628161 6cbea4b3 */
+ c = hashlittle("Four score and seven years ago", 30, 0);
+ printf("hash is %.8lx\n", c); /* 17770551 */
+ c = hashlittle("Four score and seven years ago", 30, 1);
+ printf("hash is %.8lx\n", c); /* cd628161 */
+}
+
+int main()
+{
+ driver1(); /* test that the key is hashed: used for timings */
+ driver2(); /* test that whole key is hashed thoroughly */
+ driver3(); /* test that nothing but the key is hashed */
+ driver4(); /* test hashing multiple buffers (all buffers are null) */
+ driver5(); /* test the hash against known vectors */
+ return 1;
+}
+
+#endif /* SELF_TEST */
diff --git a/src/libsystemd/sd-journal/lookup3.h b/src/libsystemd/sd-journal/lookup3.h
new file mode 100644
index 0000000..04e493e
--- /dev/null
+++ b/src/libsystemd/sd-journal/lookup3.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: LicenseRef-lookup3-public-domain */
+#pragma once
+
+#include <inttypes.h>
+#include <sys/types.h>
+
+#include "macro.h"
+
+uint32_t jenkins_hashword(const uint32_t *k, size_t length, uint32_t initval) _pure_;
+void jenkins_hashword2(const uint32_t *k, size_t length, uint32_t *pc, uint32_t *pb);
+
+uint32_t jenkins_hashlittle(const void *key, size_t length, uint32_t initval) _pure_;
+void jenkins_hashlittle2(const void *key, size_t length, uint32_t *pc, uint32_t *pb);
+
+uint32_t jenkins_hashbig(const void *key, size_t length, uint32_t initval) _pure_;
+
+static inline uint64_t jenkins_hash64(const void *data, size_t length) {
+ uint32_t a = 0, b = 0;
+
+ jenkins_hashlittle2(data, length, &a, &b);
+
+ return ((uint64_t) a << 32ULL) | (uint64_t) b;
+}
diff --git a/src/libsystemd/sd-journal/mmap-cache.c b/src/libsystemd/sd-journal/mmap-cache.c
new file mode 100644
index 0000000..973ade6
--- /dev/null
+++ b/src/libsystemd/sd-journal/mmap-cache.c
@@ -0,0 +1,562 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+
+#include <errno.h>
+#include <stdlib.h>
+#include <sys/mman.h>
+
+#include "alloc-util.h"
+#include "errno-util.h"
+#include "fd-util.h"
+#include "hashmap.h"
+#include "list.h"
+#include "log.h"
+#include "macro.h"
+#include "memory-util.h"
+#include "mmap-cache.h"
+#include "sigbus.h"
+
+typedef struct Window Window;
+
+typedef enum WindowFlags {
+ WINDOW_KEEP_ALWAYS = 1u << (_MMAP_CACHE_CATEGORY_MAX + 0),
+ WINDOW_IN_UNUSED = 1u << (_MMAP_CACHE_CATEGORY_MAX + 1),
+ WINDOW_INVALIDATED = 1u << (_MMAP_CACHE_CATEGORY_MAX + 2),
+
+ _WINDOW_USED_MASK = WINDOW_IN_UNUSED - 1, /* The mask contains all bits that indicate the windows
+ * is currently in use. Covers the all the object types
+ * and the additional WINDOW_KEEP_ALWAYS flag. */
+} WindowFlags;
+
+#define WINDOW_IS_UNUSED(w) (((w)->flags & _WINDOW_USED_MASK) == 0)
+
+struct Window {
+ MMapFileDescriptor *fd;
+
+ WindowFlags flags;
+
+ void *ptr;
+ uint64_t offset;
+ size_t size;
+
+ LIST_FIELDS(Window, windows);
+ LIST_FIELDS(Window, unused);
+};
+
+struct MMapFileDescriptor {
+ MMapCache *cache;
+
+ int fd;
+ int prot;
+ bool sigbus;
+
+ LIST_HEAD(Window, windows);
+};
+
+struct MMapCache {
+ unsigned n_ref;
+ unsigned n_windows;
+
+ unsigned n_category_cache_hit;
+ unsigned n_window_list_hit;
+ unsigned n_missed;
+
+ Hashmap *fds;
+
+ LIST_HEAD(Window, unused);
+ Window *last_unused;
+
+ Window *windows_by_category[_MMAP_CACHE_CATEGORY_MAX];
+};
+
+#define WINDOWS_MIN 64
+
+#if ENABLE_DEBUG_MMAP_CACHE
+/* Tiny windows increase mmap activity and the chance of exposing unsafe use. */
+# define WINDOW_SIZE (page_size())
+#else
+# define WINDOW_SIZE ((size_t) (UINT64_C(8) * UINT64_C(1024) * UINT64_C(1024)))
+#endif
+
+MMapCache* mmap_cache_new(void) {
+ MMapCache *m;
+
+ m = new(MMapCache, 1);
+ if (!m)
+ return NULL;
+
+ *m = (MMapCache) {
+ .n_ref = 1,
+ };
+
+ return m;
+}
+
+static Window* window_unlink(Window *w) {
+ assert(w);
+
+ MMapCache *m = mmap_cache_fd_cache(w->fd);
+
+ if (w->ptr)
+ munmap(w->ptr, w->size);
+
+ if (FLAGS_SET(w->flags, WINDOW_IN_UNUSED)) {
+ if (m->last_unused == w)
+ m->last_unused = w->unused_prev;
+ LIST_REMOVE(unused, m->unused, w);
+ }
+
+ for (unsigned i = 0; i < _MMAP_CACHE_CATEGORY_MAX; i++)
+ if (FLAGS_SET(w->flags, 1u << i))
+ assert_se(TAKE_PTR(m->windows_by_category[i]) == w);
+
+ return LIST_REMOVE(windows, w->fd->windows, w);
+}
+
+static void window_invalidate(Window *w) {
+ assert(w);
+ assert(w->fd);
+
+ if (FLAGS_SET(w->flags, WINDOW_INVALIDATED))
+ return;
+
+ /* Replace the window with anonymous pages. This is useful when we hit a SIGBUS and want to make sure
+ * the file cannot trigger any further SIGBUS, possibly overrunning the sigbus queue. */
+
+ assert_se(mmap(w->ptr, w->size, w->fd->prot, MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED, -1, 0) == w->ptr);
+ w->flags |= WINDOW_INVALIDATED;
+}
+
+static Window* window_free(Window *w) {
+ if (!w)
+ return NULL;
+
+ window_unlink(w);
+ w->fd->cache->n_windows--;
+
+ return mfree(w);
+}
+
+static bool window_matches(Window *w, MMapFileDescriptor *f, uint64_t offset, size_t size) {
+ assert(size > 0);
+
+ return
+ w &&
+ f == w->fd &&
+ offset >= w->offset &&
+ offset + size <= w->offset + w->size;
+}
+
+static bool window_matches_by_addr(Window *w, MMapFileDescriptor *f, void *addr, size_t size) {
+ assert(size > 0);
+
+ return
+ w &&
+ f == w->fd &&
+ (uint8_t*) addr >= (uint8_t*) w->ptr &&
+ (uint8_t*) addr + size <= (uint8_t*) w->ptr + w->size;
+}
+
+static Window* window_add(MMapFileDescriptor *f, uint64_t offset, size_t size, void *ptr) {
+ MMapCache *m = mmap_cache_fd_cache(f);
+ Window *w;
+
+ if (!m->last_unused || m->n_windows <= WINDOWS_MIN) {
+ /* Allocate a new window */
+ w = new(Window, 1);
+ if (!w)
+ return NULL;
+ m->n_windows++;
+ } else
+ /* Reuse an existing one */
+ w = window_unlink(m->last_unused);
+
+ *w = (Window) {
+ .fd = f,
+ .offset = offset,
+ .size = size,
+ .ptr = ptr,
+ };
+
+ return LIST_PREPEND(windows, f->windows, w);
+}
+
+static void category_detach_window(MMapCache *m, MMapCacheCategory c) {
+ Window *w;
+
+ assert(m);
+ assert(c >= 0 && c < _MMAP_CACHE_CATEGORY_MAX);
+
+ w = TAKE_PTR(m->windows_by_category[c]);
+ if (!w)
+ return; /* Nothing attached. */
+
+ assert(FLAGS_SET(w->flags, 1u << c));
+ w->flags &= ~(1u << c);
+
+ if (WINDOW_IS_UNUSED(w)) {
+ /* Not used anymore? */
+#if ENABLE_DEBUG_MMAP_CACHE
+ /* Unmap unused windows immediately to expose use-after-unmap by SIGSEGV. */
+ window_free(w);
+#else
+ LIST_PREPEND(unused, m->unused, w);
+ if (!m->last_unused)
+ m->last_unused = w;
+ w->flags |= WINDOW_IN_UNUSED;
+#endif
+ }
+}
+
+static void category_attach_window(MMapCache *m, MMapCacheCategory c, Window *w) {
+ assert(m);
+ assert(c >= 0 && c < _MMAP_CACHE_CATEGORY_MAX);
+ assert(w);
+
+ if (m->windows_by_category[c] == w)
+ return; /* Already attached. */
+
+ category_detach_window(m, c);
+
+ if (FLAGS_SET(w->flags, WINDOW_IN_UNUSED)) {
+ /* Used again? */
+ if (m->last_unused == w)
+ m->last_unused = w->unused_prev;
+ LIST_REMOVE(unused, m->unused, w);
+ w->flags &= ~WINDOW_IN_UNUSED;
+ }
+
+ m->windows_by_category[c] = w;
+ w->flags |= (1u << c);
+}
+
+static MMapCache* mmap_cache_free(MMapCache *m) {
+ if (!m)
+ return NULL;
+
+ /* All windows are owned by fds, and each fd takes a reference of MMapCache. So, when this is called,
+ * all fds are already freed, and hence there is no window. */
+
+ assert(hashmap_isempty(m->fds));
+ hashmap_free(m->fds);
+
+ assert(!m->unused);
+ assert(m->n_windows == 0);
+
+ return mfree(m);
+}
+
+DEFINE_TRIVIAL_REF_UNREF_FUNC(MMapCache, mmap_cache, mmap_cache_free);
+
+static int mmap_try_harder(MMapFileDescriptor *f, void *addr, int flags, uint64_t offset, size_t size, void **ret) {
+ MMapCache *m = mmap_cache_fd_cache(f);
+
+ assert(ret);
+
+ for (;;) {
+ void *ptr;
+
+ ptr = mmap(addr, size, f->prot, flags, f->fd, offset);
+ if (ptr != MAP_FAILED) {
+ *ret = ptr;
+ return 0;
+ }
+ if (errno != ENOMEM)
+ return negative_errno();
+
+ /* When failed with ENOMEM, try again after making a room by freeing an unused window. */
+
+ if (!m->last_unused)
+ return -ENOMEM; /* no free window, propagate the original error. */
+
+ window_free(m->last_unused);
+ }
+}
+
+static int add_mmap(
+ MMapFileDescriptor *f,
+ uint64_t offset,
+ size_t size,
+ struct stat *st,
+ Window **ret) {
+
+ Window *w;
+ void *d;
+ int r;
+
+ assert(f);
+ assert(size > 0);
+ assert(ret);
+
+ /* overflow check */
+ if (size > SIZE_MAX - PAGE_OFFSET_U64(offset))
+ return -EADDRNOTAVAIL;
+
+ size = PAGE_ALIGN(size + PAGE_OFFSET_U64(offset));
+ offset = PAGE_ALIGN_DOWN_U64(offset);
+
+ if (size < WINDOW_SIZE) {
+ uint64_t delta;
+
+ delta = PAGE_ALIGN((WINDOW_SIZE - size) / 2);
+ offset = LESS_BY(offset, delta);
+ size = WINDOW_SIZE;
+ }
+
+ if (st) {
+ /* Memory maps that are larger then the files underneath have undefined behavior. Hence,
+ * clamp things to the file size if we know it */
+
+ if (offset >= (uint64_t) st->st_size)
+ return -EADDRNOTAVAIL;
+
+ if (size > (uint64_t) st->st_size - offset)
+ size = PAGE_ALIGN((uint64_t) st->st_size - offset);
+ }
+
+ if (size >= SIZE_MAX)
+ return -EADDRNOTAVAIL;
+
+ r = mmap_try_harder(f, NULL, MAP_SHARED, offset, size, &d);
+ if (r < 0)
+ return r;
+
+ w = window_add(f, offset, size, d);
+ if (!w) {
+ (void) munmap(d, size);
+ return -ENOMEM;
+ }
+
+ *ret = w;
+ return 0;
+}
+
+int mmap_cache_fd_get(
+ MMapFileDescriptor *f,
+ MMapCacheCategory c,
+ bool keep_always,
+ uint64_t offset,
+ size_t size,
+ struct stat *st,
+ void **ret) {
+
+ MMapCache *m = mmap_cache_fd_cache(f);
+ Window *w;
+ int r;
+
+ assert(size > 0);
+ assert(c >= 0 && c < _MMAP_CACHE_CATEGORY_MAX);
+ assert(ret);
+
+ if (f->sigbus)
+ return -EIO;
+
+ /* Check whether the current category is the right one already */
+ if (window_matches(m->windows_by_category[c], f, offset, size)) {
+ m->n_category_cache_hit++;
+ w = m->windows_by_category[c];
+ goto found;
+ }
+
+ /* Drop the reference to the window, since it's unnecessary now */
+ category_detach_window(m, c);
+
+ /* Search for a matching mmap */
+ LIST_FOREACH(windows, i, f->windows)
+ if (window_matches(i, f, offset, size)) {
+ m->n_window_list_hit++;
+ w = i;
+ goto found;
+ }
+
+ m->n_missed++;
+
+ /* Create a new mmap */
+ r = add_mmap(f, offset, size, st, &w);
+ if (r < 0)
+ return r;
+
+found:
+ if (keep_always)
+ w->flags |= WINDOW_KEEP_ALWAYS;
+
+ category_attach_window(m, c, w);
+ *ret = (uint8_t*) w->ptr + (offset - w->offset);
+ return 0;
+}
+
+int mmap_cache_fd_pin(
+ MMapFileDescriptor *f,
+ MMapCacheCategory c,
+ void *addr,
+ size_t size) {
+
+ MMapCache *m = mmap_cache_fd_cache(f);
+ Window *w;
+
+ assert(addr);
+ assert(c >= 0 && c < _MMAP_CACHE_CATEGORY_MAX);
+ assert(size > 0);
+
+ if (f->sigbus)
+ return -EIO;
+
+ /* Check if the current category is the right one. */
+ if (window_matches_by_addr(m->windows_by_category[c], f, addr, size)) {
+ m->n_category_cache_hit++;
+ w = m->windows_by_category[c];
+ goto found;
+ }
+
+ /* Search for a matching mmap. */
+ LIST_FOREACH(windows, i, f->windows)
+ if (window_matches_by_addr(i, f, addr, size)) {
+ m->n_window_list_hit++;
+ w = i;
+ goto found;
+ }
+
+ m->n_missed++;
+ return -EADDRNOTAVAIL; /* Not found. */
+
+found:
+ if (FLAGS_SET(w->flags, WINDOW_KEEP_ALWAYS))
+ return 0; /* The window will never unmapped. */
+
+ /* Attach the window to the 'pinning' category. */
+ category_attach_window(m, MMAP_CACHE_CATEGORY_PIN, w);
+ return 1;
+}
+
+void mmap_cache_stats_log_debug(MMapCache *m) {
+ assert(m);
+
+ log_debug("mmap cache statistics: %u category cache hit, %u window list hit, %u miss",
+ m->n_category_cache_hit, m->n_window_list_hit, m->n_missed);
+}
+
+static void mmap_cache_process_sigbus(MMapCache *m) {
+ bool found = false;
+ MMapFileDescriptor *f;
+ int r;
+
+ assert(m);
+
+ /* Iterate through all triggered pages and mark their files as invalidated. */
+ for (;;) {
+ bool ours;
+ void *addr;
+
+ r = sigbus_pop(&addr);
+ if (_likely_(r == 0))
+ break;
+ if (r < 0) {
+ log_error_errno(r, "SIGBUS handling failed: %m");
+ abort();
+ }
+
+ ours = false;
+ HASHMAP_FOREACH(f, m->fds) {
+ LIST_FOREACH(windows, w, f->windows)
+ if (window_matches_by_addr(w, f, addr, 1)) {
+ found = ours = f->sigbus = true;
+ break;
+ }
+
+ if (ours)
+ break;
+ }
+
+ /* Didn't find a matching window, give up. */
+ if (!ours) {
+ log_error("Unknown SIGBUS page, aborting.");
+ abort();
+ }
+ }
+
+ /* The list of triggered pages is now empty. Now, let's remap all windows of the triggered file to
+ * anonymous maps, so that no page of the file in question is triggered again, so that we can be sure
+ * not to hit the queue size limit. */
+ if (_likely_(!found))
+ return;
+
+ HASHMAP_FOREACH(f, m->fds) {
+ if (!f->sigbus)
+ continue;
+
+ LIST_FOREACH(windows, w, f->windows)
+ window_invalidate(w);
+ }
+}
+
+bool mmap_cache_fd_got_sigbus(MMapFileDescriptor *f) {
+ assert(f);
+
+ mmap_cache_process_sigbus(f->cache);
+
+ return f->sigbus;
+}
+
+int mmap_cache_add_fd(MMapCache *m, int fd, int prot, MMapFileDescriptor **ret) {
+ _cleanup_free_ MMapFileDescriptor *f = NULL;
+ MMapFileDescriptor *existing;
+ int r;
+
+ assert(m);
+ assert(fd >= 0);
+
+ existing = hashmap_get(m->fds, FD_TO_PTR(fd));
+ if (existing) {
+ if (existing->prot != prot)
+ return -EEXIST;
+ if (ret)
+ *ret = existing;
+ return 0;
+ }
+
+ f = new(MMapFileDescriptor, 1);
+ if (!f)
+ return -ENOMEM;
+
+ *f = (MMapFileDescriptor) {
+ .fd = fd,
+ .prot = prot,
+ };
+
+ r = hashmap_ensure_put(&m->fds, NULL, FD_TO_PTR(fd), f);
+ if (r < 0)
+ return r;
+ assert(r > 0);
+
+ f->cache = mmap_cache_ref(m);
+
+ if (ret)
+ *ret = f;
+
+ TAKE_PTR(f);
+ return 1;
+}
+
+MMapFileDescriptor* mmap_cache_fd_free(MMapFileDescriptor *f) {
+ if (!f)
+ return NULL;
+
+ /* Make sure that any queued SIGBUS are first dispatched, so that we don't end up with a SIGBUS entry
+ * we cannot relate to any existing memory map. */
+
+ mmap_cache_process_sigbus(f->cache);
+
+ while (f->windows)
+ window_free(f->windows);
+
+ assert_se(hashmap_remove(f->cache->fds, FD_TO_PTR(f->fd)) == f);
+
+ /* Unref the cache at the end. Otherwise, the assertions in mmap_cache_free() may be triggered. */
+ f->cache = mmap_cache_unref(f->cache);
+
+ return mfree(f);
+}
+
+MMapCache* mmap_cache_fd_cache(MMapFileDescriptor *f) {
+ assert(f);
+ return ASSERT_PTR(f->cache);
+}
diff --git a/src/libsystemd/sd-journal/mmap-cache.h b/src/libsystemd/sd-journal/mmap-cache.h
new file mode 100644
index 0000000..1fbc236
--- /dev/null
+++ b/src/libsystemd/sd-journal/mmap-cache.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+#pragma once
+
+#include <errno.h>
+#include <stdbool.h>
+#include <sys/stat.h>
+
+#include "journal-def.h"
+
+typedef struct MMapCache MMapCache;
+typedef struct MMapFileDescriptor MMapFileDescriptor;
+
+typedef enum MMapCacheCategory {
+ MMAP_CACHE_CATEGORY_ANY = OBJECT_UNUSED,
+ MMAP_CACHE_CATEGORY_DATA = OBJECT_DATA,
+ MMAP_CACHE_CATEGORY_FIELD = OBJECT_FIELD,
+ MMAP_CACHE_CATEGORY_ENTRY = OBJECT_ENTRY,
+ MMAP_CACHE_CATEGORY_DATA_HASH_TABLE = OBJECT_DATA_HASH_TABLE,
+ MMAP_CACHE_CATEGORY_FIELD_HASH_TABLE = OBJECT_FIELD_HASH_TABLE,
+ MMAP_CACHE_CATEGORY_ENTRY_ARRAY = OBJECT_ENTRY_ARRAY,
+ MMAP_CACHE_CATEGORY_TAG = OBJECT_TAG,
+ MMAP_CACHE_CATEGORY_HEADER, /* for reading file header */
+ MMAP_CACHE_CATEGORY_PIN, /* for temporary pinning a object */
+ _MMAP_CACHE_CATEGORY_MAX,
+ _MMAP_CACHE_CATEGORY_INVALID = -EINVAL,
+} MMapCacheCategory;
+
+assert_cc((int) _OBJECT_TYPE_MAX < (int) _MMAP_CACHE_CATEGORY_MAX);
+
+static inline MMapCacheCategory type_to_category(ObjectType type) {
+ return type >= 0 && type < _OBJECT_TYPE_MAX ? (MMapCacheCategory) type : MMAP_CACHE_CATEGORY_ANY;
+}
+
+MMapCache* mmap_cache_new(void);
+MMapCache* mmap_cache_ref(MMapCache *m);
+MMapCache* mmap_cache_unref(MMapCache *m);
+DEFINE_TRIVIAL_CLEANUP_FUNC(MMapCache*, mmap_cache_unref);
+
+int mmap_cache_fd_get(
+ MMapFileDescriptor *f,
+ MMapCacheCategory c,
+ bool keep_always,
+ uint64_t offset,
+ size_t size,
+ struct stat *st,
+ void **ret);
+
+int mmap_cache_fd_pin(
+ MMapFileDescriptor *f,
+ MMapCacheCategory c,
+ void *addr,
+ size_t size);
+
+int mmap_cache_add_fd(MMapCache *m, int fd, int prot, MMapFileDescriptor **ret);
+MMapCache* mmap_cache_fd_cache(MMapFileDescriptor *f);
+MMapFileDescriptor* mmap_cache_fd_free(MMapFileDescriptor *f);
+
+void mmap_cache_stats_log_debug(MMapCache *m);
+
+bool mmap_cache_fd_got_sigbus(MMapFileDescriptor *f);
diff --git a/src/libsystemd/sd-journal/sd-journal.c b/src/libsystemd/sd-journal/sd-journal.c
new file mode 100644
index 0000000..6b9ff0a
--- /dev/null
+++ b/src/libsystemd/sd-journal/sd-journal.c
@@ -0,0 +1,3528 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+
+#include <errno.h>
+#include <fcntl.h>
+#include <inttypes.h>
+#include <linux/magic.h>
+#include <poll.h>
+#include <stddef.h>
+#include <sys/inotify.h>
+#include <sys/vfs.h>
+#include <unistd.h>
+
+#include "sd-journal.h"
+
+#include "alloc-util.h"
+#include "catalog.h"
+#include "compress.h"
+#include "dirent-util.h"
+#include "env-file.h"
+#include "escape.h"
+#include "fd-util.h"
+#include "fileio.h"
+#include "format-util.h"
+#include "fs-util.h"
+#include "hashmap.h"
+#include "hostname-util.h"
+#include "id128-util.h"
+#include "inotify-util.h"
+#include "io-util.h"
+#include "journal-def.h"
+#include "journal-file.h"
+#include "journal-internal.h"
+#include "list.h"
+#include "lookup3.h"
+#include "nulstr-util.h"
+#include "origin-id.h"
+#include "path-util.h"
+#include "prioq.h"
+#include "process-util.h"
+#include "replace-var.h"
+#include "stat-util.h"
+#include "stdio-util.h"
+#include "string-util.h"
+#include "strv.h"
+#include "syslog-util.h"
+#include "uid-alloc-range.h"
+
+#define JOURNAL_FILES_RECHECK_USEC (2 * USEC_PER_SEC)
+
+/* The maximum size of variable values we'll expand in catalog entries. We bind this to PATH_MAX for now, as
+ * we want to be able to show all officially valid paths at least */
+#define REPLACE_VAR_MAX PATH_MAX
+
+#define DEFAULT_DATA_THRESHOLD (64*1024)
+
+DEFINE_PRIVATE_ORIGIN_ID_HELPERS(sd_journal, journal);
+
+static void remove_file_real(sd_journal *j, JournalFile *f);
+static int journal_file_read_tail_timestamp(sd_journal *j, JournalFile *f);
+static void journal_file_unlink_newest_by_boot_id(sd_journal *j, JournalFile *f);
+
+static int journal_put_error(sd_journal *j, int r, const char *path) {
+ _cleanup_free_ char *copy = NULL;
+ int k;
+
+ /* Memorize an error we encountered, and store which
+ * file/directory it was generated from. Note that we store
+ * only *one* path per error code, as the error code is the
+ * key into the hashmap, and the path is the value. This means
+ * we keep track only of all error kinds, but not of all error
+ * locations. This has the benefit that the hashmap cannot
+ * grow beyond bounds.
+ *
+ * We return an error here only if we didn't manage to
+ * memorize the real error. */
+
+ if (r >= 0)
+ return r;
+
+ if (path) {
+ copy = strdup(path);
+ if (!copy)
+ return -ENOMEM;
+ }
+
+ k = hashmap_ensure_put(&j->errors, NULL, INT_TO_PTR(r), copy);
+ if (k < 0) {
+ if (k == -EEXIST)
+ return 0;
+
+ return k;
+ }
+
+ TAKE_PTR(copy);
+ return 0;
+}
+
+static void detach_location(sd_journal *j) {
+ JournalFile *f;
+
+ assert(j);
+
+ j->current_file = NULL;
+ j->current_field = 0;
+
+ ORDERED_HASHMAP_FOREACH(f, j->files)
+ journal_file_reset_location(f);
+}
+
+static void init_location(Location *l, LocationType type, JournalFile *f, Object *o) {
+ assert(l);
+ assert(IN_SET(type, LOCATION_DISCRETE, LOCATION_SEEK));
+ assert(f);
+
+ *l = (Location) {
+ .type = type,
+ .seqnum = le64toh(o->entry.seqnum),
+ .seqnum_id = f->header->seqnum_id,
+ .realtime = le64toh(o->entry.realtime),
+ .monotonic = le64toh(o->entry.monotonic),
+ .boot_id = o->entry.boot_id,
+ .xor_hash = le64toh(o->entry.xor_hash),
+ .seqnum_set = true,
+ .realtime_set = true,
+ .monotonic_set = true,
+ .xor_hash_set = true,
+ };
+}
+
+static void set_location(sd_journal *j, JournalFile *f, Object *o) {
+ assert(j);
+ assert(f);
+ assert(o);
+
+ init_location(&j->current_location, LOCATION_DISCRETE, f, o);
+
+ j->current_file = f;
+ j->current_field = 0;
+
+ /* Let f know its candidate entry was picked. */
+ assert(f->location_type == LOCATION_SEEK);
+ f->location_type = LOCATION_DISCRETE;
+}
+
+static int match_is_valid(const void *data, size_t size) {
+ const char *b = ASSERT_PTR(data);
+
+ if (size < 2)
+ return false;
+
+ if (((char*) data)[0] == '_' && ((char*) data)[1] == '_')
+ return false;
+
+ for (const char *p = b; p < b + size; p++) {
+
+ if (*p == '=')
+ return p > b;
+
+ if (*p == '_')
+ continue;
+
+ if (*p >= 'A' && *p <= 'Z')
+ continue;
+
+ if (ascii_isdigit(*p))
+ continue;
+
+ return false;
+ }
+
+ return false;
+}
+
+static bool same_field(const void *_a, size_t s, const void *_b, size_t t) {
+ const uint8_t *a = _a, *b = _b;
+
+ for (size_t j = 0; j < s && j < t; j++) {
+
+ if (a[j] != b[j])
+ return false;
+
+ if (a[j] == '=')
+ return true;
+ }
+
+ assert_not_reached();
+}
+
+static Match *match_new(Match *p, MatchType t) {
+ Match *m;
+
+ m = new(Match, 1);
+ if (!m)
+ return NULL;
+
+ *m = (Match) {
+ .type = t,
+ .parent = p,
+ };
+
+ if (p)
+ LIST_PREPEND(matches, p->matches, m);
+
+ return m;
+}
+
+static Match *match_free(Match *m) {
+ assert(m);
+
+ while (m->matches)
+ match_free(m->matches);
+
+ if (m->parent)
+ LIST_REMOVE(matches, m->parent->matches, m);
+
+ free(m->data);
+ return mfree(m);
+}
+
+static Match *match_free_if_empty(Match *m) {
+ if (!m || m->matches)
+ return m;
+
+ return match_free(m);
+}
+
+_public_ int sd_journal_add_match(sd_journal *j, const void *data, size_t size) {
+ Match *add_here = NULL, *m = NULL;
+ uint64_t hash;
+
+ assert_return(j, -EINVAL);
+ assert_return(!journal_origin_changed(j), -ECHILD);
+ assert_return(data, -EINVAL);
+
+ if (size == 0)
+ size = strlen(data);
+
+ if (!match_is_valid(data, size))
+ return -EINVAL;
+
+ /* level 0: AND term
+ * level 1: OR terms
+ * level 2: AND terms
+ * level 3: OR terms
+ * level 4: concrete matches */
+
+ if (!j->level0) {
+ j->level0 = match_new(NULL, MATCH_AND_TERM);
+ if (!j->level0)
+ return -ENOMEM;
+ }
+
+ if (!j->level1) {
+ j->level1 = match_new(j->level0, MATCH_OR_TERM);
+ if (!j->level1)
+ return -ENOMEM;
+ }
+
+ if (!j->level2) {
+ j->level2 = match_new(j->level1, MATCH_AND_TERM);
+ if (!j->level2)
+ return -ENOMEM;
+ }
+
+ assert(j->level0->type == MATCH_AND_TERM);
+ assert(j->level1->type == MATCH_OR_TERM);
+ assert(j->level2->type == MATCH_AND_TERM);
+
+ /* Old-style Jenkins (unkeyed) hashing only here. We do not cover new-style siphash (keyed) hashing
+ * here, since it's different for each file, and thus can't be pre-calculated in the Match object. */
+ hash = jenkins_hash64(data, size);
+
+ LIST_FOREACH(matches, l3, j->level2->matches) {
+ assert(l3->type == MATCH_OR_TERM);
+
+ LIST_FOREACH(matches, l4, l3->matches) {
+ assert(l4->type == MATCH_DISCRETE);
+
+ /* Exactly the same match already? Then ignore
+ * this addition */
+ if (l4->hash == hash &&
+ l4->size == size &&
+ memcmp(l4->data, data, size) == 0)
+ return 0;
+
+ /* Same field? Then let's add this to this OR term */
+ if (same_field(data, size, l4->data, l4->size)) {
+ add_here = l3;
+ break;
+ }
+ }
+
+ if (add_here)
+ break;
+ }
+
+ if (!add_here) {
+ add_here = match_new(j->level2, MATCH_OR_TERM);
+ if (!add_here)
+ goto fail;
+ }
+
+ m = match_new(add_here, MATCH_DISCRETE);
+ if (!m)
+ goto fail;
+
+ m->hash = hash;
+ m->size = size;
+ m->data = memdup(data, size);
+ if (!m->data)
+ goto fail;
+
+ detach_location(j);
+
+ return 0;
+
+fail:
+ match_free(m);
+ match_free_if_empty(add_here);
+ j->level2 = match_free_if_empty(j->level2);
+ j->level1 = match_free_if_empty(j->level1);
+ j->level0 = match_free_if_empty(j->level0);
+
+ return -ENOMEM;
+}
+
+_public_ int sd_journal_add_conjunction(sd_journal *j) {
+ assert_return(j, -EINVAL);
+ assert_return(!journal_origin_changed(j), -ECHILD);
+
+ if (!j->level0)
+ return 0;
+
+ if (!j->level1)
+ return 0;
+
+ if (!j->level1->matches)
+ return 0;
+
+ j->level1 = NULL;
+ j->level2 = NULL;
+
+ return 0;
+}
+
+_public_ int sd_journal_add_disjunction(sd_journal *j) {
+ assert_return(j, -EINVAL);
+ assert_return(!journal_origin_changed(j), -ECHILD);
+
+ if (!j->level0)
+ return 0;
+
+ if (!j->level1)
+ return 0;
+
+ if (!j->level2)
+ return 0;
+
+ if (!j->level2->matches)
+ return 0;
+
+ j->level2 = NULL;
+ return 0;
+}
+
+static char *match_make_string(Match *m) {
+ _cleanup_free_ char *p = NULL;
+ bool enclose = false;
+
+ if (!m)
+ return strdup("none");
+
+ if (m->type == MATCH_DISCRETE)
+ return cescape_length(m->data, m->size);
+
+ LIST_FOREACH(matches, i, m->matches) {
+ _cleanup_free_ char *t = NULL;
+
+ t = match_make_string(i);
+ if (!t)
+ return NULL;
+
+ if (p) {
+ if (!strextend(&p, m->type == MATCH_OR_TERM ? " OR " : " AND ", t))
+ return NULL;
+
+ enclose = true;
+ } else
+ p = TAKE_PTR(t);
+ }
+
+ if (enclose)
+ return strjoin("(", p, ")");
+
+ return TAKE_PTR(p);
+}
+
+char *journal_make_match_string(sd_journal *j) {
+ assert(j);
+
+ return match_make_string(j->level0);
+}
+
+_public_ void sd_journal_flush_matches(sd_journal *j) {
+ if (!j || journal_origin_changed(j))
+ return;
+
+ if (j->level0)
+ match_free(j->level0);
+
+ j->level0 = j->level1 = j->level2 = NULL;
+
+ detach_location(j);
+}
+
+static int journal_file_find_newest_for_boot_id(
+ sd_journal *j,
+ sd_id128_t id,
+ JournalFile **ret) {
+
+ JournalFile *prev = NULL;
+ int r;
+
+ assert(j);
+ assert(ret);
+
+ /* Before we use it, let's refresh the timestamp from the header, and reshuffle our prioq
+ * accordingly. We do this only a bunch of times, to not be caught in some update loop. */
+ for (unsigned n_tries = 0;; n_tries++) {
+ JournalFile *f;
+ Prioq *q;
+
+ q = hashmap_get(j->newest_by_boot_id, &id);
+ if (!q)
+ return log_debug_errno(SYNTHETIC_ERRNO(ENODATA),
+ "Requested delta for boot ID %s, but we have no information about that boot ID.", SD_ID128_TO_STRING(id));
+
+ assert_se(f = prioq_peek(q)); /* we delete hashmap entries once the prioq is empty, so this must hold */
+
+ if (f == prev || n_tries >= 5) {
+ /* This was already the best answer in the previous run, or we tried too often, use it */
+ *ret = f;
+ return 0;
+ }
+
+ prev = f;
+
+ /* Let's read the journal file's current timestamp once, before we return it, maybe it has changed. */
+ r = journal_file_read_tail_timestamp(j, f);
+ if (r < 0)
+ return log_debug_errno(r, "Failed to read tail timestamp while trying to find newest journal file for boot ID %s.", SD_ID128_TO_STRING(id));
+
+ /* Refreshing the timestamp we read might have reshuffled the prioq, hence let's check the
+ * prioq again and only use the information once we reached an equilibrium or hit a limit */
+ }
+}
+
+static int compare_boot_ids(sd_journal *j, sd_id128_t a, sd_id128_t b) {
+ JournalFile *x, *y;
+
+ assert(j);
+
+ /* Try to find the newest open journal file for the two boot ids */
+ if (journal_file_find_newest_for_boot_id(j, a, &x) < 0 ||
+ journal_file_find_newest_for_boot_id(j, b, &y) < 0)
+ return 0;
+
+ /* Only compare the boot id timestamps if they originate from the same machine. If they are from
+ * different machines, then we timestamps of the boot ids might be as off as the timestamps on the
+ * entries and hence not useful for comparing. */
+ if (!sd_id128_equal(x->newest_machine_id, y->newest_machine_id))
+ return 0;
+
+ return CMP(x->newest_realtime_usec, y->newest_realtime_usec);
+}
+
+static int compare_with_location(
+ sd_journal *j,
+ const JournalFile *f,
+ const Location *l,
+ const JournalFile *current_file) {
+ int r;
+
+ assert(j);
+ assert(f);
+ assert(l);
+ assert(f->location_type == LOCATION_SEEK);
+ assert(IN_SET(l->type, LOCATION_DISCRETE, LOCATION_SEEK));
+
+ if (l->monotonic_set &&
+ sd_id128_equal(f->current_boot_id, l->boot_id) &&
+ l->realtime_set &&
+ f->current_realtime == l->realtime &&
+ l->xor_hash_set &&
+ f->current_xor_hash == l->xor_hash &&
+ l->seqnum_set &&
+ sd_id128_equal(f->header->seqnum_id, l->seqnum_id) &&
+ f->current_seqnum == l->seqnum &&
+ f != current_file)
+ return 0;
+
+ if (l->seqnum_set &&
+ sd_id128_equal(f->header->seqnum_id, l->seqnum_id)) {
+ r = CMP(f->current_seqnum, l->seqnum);
+ if (r != 0)
+ return r;
+ }
+
+ if (l->monotonic_set) {
+ /* If both arguments have the same boot ID, then we can compare the monotonic timestamps. If
+ * they are distinct, then we might able to lookup the timestamps of those boot IDs (if they
+ * are from the same machine) and order by that. */
+ if (sd_id128_equal(f->current_boot_id, l->boot_id))
+ r = CMP(f->current_monotonic, l->monotonic);
+ else
+ r = compare_boot_ids(j, f->current_boot_id, l->boot_id);
+ if (r != 0)
+ return r;
+ }
+
+ if (l->realtime_set) {
+ r = CMP(f->current_realtime, l->realtime);
+ if (r != 0)
+ return r;
+ }
+
+ if (l->xor_hash_set) {
+ r = CMP(f->current_xor_hash, l->xor_hash);
+ if (r != 0)
+ return r;
+ }
+
+ return 0;
+}
+
+static int next_for_match(
+ sd_journal *j,
+ Match *m,
+ JournalFile *f,
+ uint64_t after_offset,
+ direction_t direction,
+ Object **ret,
+ uint64_t *offset) {
+
+ int r;
+ uint64_t np = 0;
+
+ assert(j);
+ assert(m);
+ assert(f);
+
+ if (m->type == MATCH_DISCRETE) {
+ Object *d;
+ uint64_t hash;
+
+ /* If the keyed hash logic is used, we need to calculate the hash fresh per file. Otherwise
+ * we can use what we pre-calculated. */
+ if (JOURNAL_HEADER_KEYED_HASH(f->header))
+ hash = journal_file_hash_data(f, m->data, m->size);
+ else
+ hash = m->hash;
+
+ r = journal_file_find_data_object_with_hash(f, m->data, m->size, hash, &d, NULL);
+ if (r <= 0)
+ return r;
+
+ return journal_file_move_to_entry_by_offset_for_data(f, d, after_offset, direction, ret, offset);
+
+ } else if (m->type == MATCH_OR_TERM) {
+
+ /* Find the earliest match beyond after_offset */
+
+ LIST_FOREACH(matches, i, m->matches) {
+ uint64_t cp;
+
+ r = next_for_match(j, i, f, after_offset, direction, NULL, &cp);
+ if (r < 0)
+ return r;
+ else if (r > 0) {
+ if (np == 0 || (direction == DIRECTION_DOWN ? cp < np : cp > np))
+ np = cp;
+ }
+ }
+
+ if (np == 0)
+ return 0;
+
+ } else if (m->type == MATCH_AND_TERM) {
+ Match *last_moved;
+
+ /* Always jump to the next matching entry and repeat
+ * this until we find an offset that matches for all
+ * matches. */
+
+ if (!m->matches)
+ return 0;
+
+ r = next_for_match(j, m->matches, f, after_offset, direction, NULL, &np);
+ if (r <= 0)
+ return r;
+
+ assert(direction == DIRECTION_DOWN ? np >= after_offset : np <= after_offset);
+ last_moved = m->matches;
+
+ LIST_LOOP_BUT_ONE(matches, i, m->matches, last_moved) {
+ uint64_t cp;
+
+ r = next_for_match(j, i, f, np, direction, NULL, &cp);
+ if (r <= 0)
+ return r;
+
+ assert(direction == DIRECTION_DOWN ? cp >= np : cp <= np);
+ if (direction == DIRECTION_DOWN ? cp > np : cp < np) {
+ np = cp;
+ last_moved = i;
+ }
+ }
+ }
+
+ assert(np > 0);
+
+ if (ret) {
+ r = journal_file_move_to_object(f, OBJECT_ENTRY, np, ret);
+ if (r < 0)
+ return r;
+ }
+
+ if (offset)
+ *offset = np;
+
+ return 1;
+}
+
+static int find_location_for_match(
+ sd_journal *j,
+ Match *m,
+ JournalFile *f,
+ direction_t direction,
+ Object **ret,
+ uint64_t *offset) {
+
+ int r;
+
+ assert(j);
+ assert(m);
+ assert(f);
+
+ if (m->type == MATCH_DISCRETE) {
+ Object *d;
+ uint64_t dp, hash;
+
+ if (JOURNAL_HEADER_KEYED_HASH(f->header))
+ hash = journal_file_hash_data(f, m->data, m->size);
+ else
+ hash = m->hash;
+
+ r = journal_file_find_data_object_with_hash(f, m->data, m->size, hash, &d, &dp);
+ if (r <= 0)
+ return r;
+
+ /* FIXME: missing: find by monotonic */
+
+ if (j->current_location.type == LOCATION_HEAD)
+ return direction == DIRECTION_DOWN ? journal_file_move_to_entry_for_data(f, d, DIRECTION_DOWN, ret, offset) : 0;
+ if (j->current_location.type == LOCATION_TAIL)
+ return direction == DIRECTION_UP ? journal_file_move_to_entry_for_data(f, d, DIRECTION_UP, ret, offset) : 0;
+ if (j->current_location.seqnum_set && sd_id128_equal(j->current_location.seqnum_id, f->header->seqnum_id))
+ return journal_file_move_to_entry_by_seqnum_for_data(f, d, j->current_location.seqnum, direction, ret, offset);
+ if (j->current_location.monotonic_set) {
+ r = journal_file_move_to_entry_by_monotonic_for_data(f, d, j->current_location.boot_id, j->current_location.monotonic, direction, ret, offset);
+ if (r != 0)
+ return r;
+
+ /* The data object might have been invalidated. */
+ r = journal_file_move_to_object(f, OBJECT_DATA, dp, &d);
+ if (r < 0)
+ return r;
+ }
+ if (j->current_location.realtime_set)
+ return journal_file_move_to_entry_by_realtime_for_data(f, d, j->current_location.realtime, direction, ret, offset);
+
+ return journal_file_move_to_entry_for_data(f, d, direction, ret, offset);
+
+ } else if (m->type == MATCH_OR_TERM) {
+ uint64_t np = 0;
+
+ /* Find the earliest match */
+
+ LIST_FOREACH(matches, i, m->matches) {
+ uint64_t cp;
+
+ r = find_location_for_match(j, i, f, direction, NULL, &cp);
+ if (r < 0)
+ return r;
+ else if (r > 0) {
+ if (np == 0 || (direction == DIRECTION_DOWN ? np > cp : np < cp))
+ np = cp;
+ }
+ }
+
+ if (np == 0)
+ return 0;
+
+ if (ret) {
+ r = journal_file_move_to_object(f, OBJECT_ENTRY, np, ret);
+ if (r < 0)
+ return r;
+ }
+
+ if (offset)
+ *offset = np;
+
+ return 1;
+
+ } else {
+ uint64_t np = 0;
+
+ assert(m->type == MATCH_AND_TERM);
+
+ /* First jump to the last match, and then find the
+ * next one where all matches match */
+
+ if (!m->matches)
+ return 0;
+
+ LIST_FOREACH(matches, i, m->matches) {
+ uint64_t cp;
+
+ r = find_location_for_match(j, i, f, direction, NULL, &cp);
+ if (r <= 0)
+ return r;
+
+ if (np == 0 || (direction == DIRECTION_DOWN ? cp > np : cp < np))
+ np = cp;
+ }
+
+ return next_for_match(j, m, f, np, direction, ret, offset);
+ }
+}
+
+static int find_location_with_matches(
+ sd_journal *j,
+ JournalFile *f,
+ direction_t direction,
+ Object **ret,
+ uint64_t *offset) {
+
+ int r;
+
+ assert(j);
+ assert(f);
+ assert(ret);
+ assert(offset);
+
+ if (!j->level0) {
+ /* No matches is simple */
+
+ if (j->current_location.type == LOCATION_HEAD)
+ return direction == DIRECTION_DOWN ? journal_file_next_entry(f, 0, DIRECTION_DOWN, ret, offset) : 0;
+ if (j->current_location.type == LOCATION_TAIL)
+ return direction == DIRECTION_UP ? journal_file_next_entry(f, 0, DIRECTION_UP, ret, offset) : 0;
+ if (j->current_location.seqnum_set && sd_id128_equal(j->current_location.seqnum_id, f->header->seqnum_id))
+ return journal_file_move_to_entry_by_seqnum(f, j->current_location.seqnum, direction, ret, offset);
+ if (j->current_location.monotonic_set) {
+ r = journal_file_move_to_entry_by_monotonic(f, j->current_location.boot_id, j->current_location.monotonic, direction, ret, offset);
+ if (r != 0)
+ return r;
+ }
+ if (j->current_location.realtime_set)
+ return journal_file_move_to_entry_by_realtime(f, j->current_location.realtime, direction, ret, offset);
+
+ return journal_file_next_entry(f, 0, direction, ret, offset);
+ } else
+ return find_location_for_match(j, j->level0, f, direction, ret, offset);
+}
+
+static int next_with_matches(
+ sd_journal *j,
+ JournalFile *f,
+ direction_t direction,
+ Object **ret,
+ uint64_t *offset) {
+
+ assert(j);
+ assert(f);
+ assert(ret);
+ assert(offset);
+
+ /* No matches is easy. We simple advance the file
+ * pointer by one. */
+ if (!j->level0)
+ return journal_file_next_entry(f, f->current_offset, direction, ret, offset);
+
+ /* If we have a match then we look for the next matching entry
+ * with an offset at least one step larger */
+ return next_for_match(j, j->level0, f,
+ direction == DIRECTION_DOWN ? f->current_offset + 1
+ : f->current_offset - 1,
+ direction, ret, offset);
+}
+
+static int next_beyond_location(sd_journal *j, JournalFile *f, direction_t direction) {
+ Object *c;
+ uint64_t cp, n_entries;
+ int r;
+
+ assert(j);
+ assert(f);
+
+ (void) journal_file_read_tail_timestamp(j, f);
+
+ n_entries = le64toh(f->header->n_entries);
+
+ /* If we hit EOF before, we don't need to look into this file again
+ * unless direction changed or new entries appeared. */
+ if (f->last_direction == direction &&
+ f->location_type == (direction == DIRECTION_DOWN ? LOCATION_TAIL : LOCATION_HEAD) &&
+ n_entries == f->last_n_entries)
+ return 0;
+
+ f->last_n_entries = n_entries;
+
+ if (f->last_direction == direction && f->current_offset > 0) {
+ /* LOCATION_SEEK here means we did the work in a previous
+ * iteration and the current location already points to a
+ * candidate entry. */
+ if (f->location_type != LOCATION_SEEK) {
+ r = next_with_matches(j, f, direction, &c, &cp);
+ if (r <= 0)
+ return r;
+
+ journal_file_save_location(f, c, cp);
+ }
+ } else {
+ f->last_direction = direction;
+
+ r = find_location_with_matches(j, f, direction, &c, &cp);
+ if (r <= 0)
+ return r;
+
+ journal_file_save_location(f, c, cp);
+ }
+
+ /* OK, we found the spot, now let's advance until an entry
+ * that is actually different from what we were previously
+ * looking at. This is necessary to handle entries which exist
+ * in two (or more) journal files, and which shall all be
+ * suppressed but one. */
+
+ for (;;) {
+ bool found;
+
+ if (j->current_location.type == LOCATION_DISCRETE) {
+ int k;
+
+ k = compare_with_location(j, f, &j->current_location, j->current_file);
+
+ found = direction == DIRECTION_DOWN ? k > 0 : k < 0;
+ } else
+ found = true;
+
+ if (found)
+ return 1;
+
+ r = next_with_matches(j, f, direction, &c, &cp);
+ if (r <= 0)
+ return r;
+
+ journal_file_save_location(f, c, cp);
+ }
+}
+
+static int compare_locations(sd_journal *j, JournalFile *af, JournalFile *bf) {
+ int r;
+
+ assert(j);
+ assert(af);
+ assert(af->header);
+ assert(bf);
+ assert(bf->header);
+ assert(af->location_type == LOCATION_SEEK);
+ assert(bf->location_type == LOCATION_SEEK);
+
+ /* If contents, timestamps and seqnum match, these entries are identical. */
+ if (sd_id128_equal(af->current_boot_id, bf->current_boot_id) &&
+ af->current_monotonic == bf->current_monotonic &&
+ af->current_realtime == bf->current_realtime &&
+ af->current_xor_hash == bf->current_xor_hash &&
+ sd_id128_equal(af->header->seqnum_id, bf->header->seqnum_id) &&
+ af->current_seqnum == bf->current_seqnum)
+ return 0;
+
+ if (sd_id128_equal(af->header->seqnum_id, bf->header->seqnum_id)) {
+ /* If this is from the same seqnum source, compare seqnums */
+ r = CMP(af->current_seqnum, bf->current_seqnum);
+ if (r != 0)
+ return r;
+
+ /* Wow! This is weird, different data but the same seqnums? Something is borked, but let's
+ * make the best of it and compare by time. */
+ }
+
+ if (sd_id128_equal(af->current_boot_id, bf->current_boot_id))
+ /* If the boot id matches, compare monotonic time */
+ r = CMP(af->current_monotonic, bf->current_monotonic);
+ else
+ /* If they don't match try to compare boot IDs */
+ r = compare_boot_ids(j, af->current_boot_id, bf->current_boot_id);
+ if (r != 0)
+ return r;
+
+ /* Otherwise, compare UTC time */
+ r = CMP(af->current_realtime, bf->current_realtime);
+ if (r != 0)
+ return r;
+
+ /* Finally, compare by contents */
+ return CMP(af->current_xor_hash, bf->current_xor_hash);
+}
+
+static int real_journal_next(sd_journal *j, direction_t direction) {
+ JournalFile *new_file = NULL;
+ unsigned n_files;
+ const void **files;
+ Object *o;
+ int r;
+
+ assert_return(j, -EINVAL);
+ assert_return(!journal_origin_changed(j), -ECHILD);
+
+ r = iterated_cache_get(j->files_cache, NULL, &files, &n_files);
+ if (r < 0)
+ return r;
+
+ for (unsigned i = 0; i < n_files; i++) {
+ JournalFile *f = (JournalFile *)files[i];
+ bool found;
+
+ r = next_beyond_location(j, f, direction);
+ if (r < 0) {
+ log_debug_errno(r, "Can't iterate through %s, ignoring: %m", f->path);
+ remove_file_real(j, f);
+ continue;
+ } else if (r == 0) {
+ f->location_type = direction == DIRECTION_DOWN ? LOCATION_TAIL : LOCATION_HEAD;
+ continue;
+ }
+
+ if (!new_file)
+ found = true;
+ else {
+ int k;
+
+ k = compare_locations(j, f, new_file);
+
+ found = direction == DIRECTION_DOWN ? k < 0 : k > 0;
+ }
+
+ if (found)
+ new_file = f;
+ }
+
+ if (!new_file)
+ return 0;
+
+ r = journal_file_move_to_object(new_file, OBJECT_ENTRY, new_file->current_offset, &o);
+ if (r < 0)
+ return r;
+
+ set_location(j, new_file, o);
+
+ return 1;
+}
+
+_public_ int sd_journal_next(sd_journal *j) {
+ return real_journal_next(j, DIRECTION_DOWN);
+}
+
+_public_ int sd_journal_previous(sd_journal *j) {
+ return real_journal_next(j, DIRECTION_UP);
+}
+
+_public_ int sd_journal_step_one(sd_journal *j, int advanced) {
+ assert_return(j, -EINVAL);
+
+ if (j->current_location.type == LOCATION_HEAD)
+ return sd_journal_next(j);
+ if (j->current_location.type == LOCATION_TAIL)
+ return sd_journal_previous(j);
+ return real_journal_next(j, advanced ? DIRECTION_DOWN : DIRECTION_UP);
+}
+
+static int real_journal_next_skip(sd_journal *j, direction_t direction, uint64_t skip) {
+ int c = 0, r;
+
+ assert_return(j, -EINVAL);
+ assert_return(!journal_origin_changed(j), -ECHILD);
+ assert_return(skip <= INT_MAX, -ERANGE);
+
+ if (skip == 0) {
+ /* If this is not a discrete skip, then at least
+ * resolve the current location */
+ if (j->current_location.type != LOCATION_DISCRETE) {
+ r = real_journal_next(j, direction);
+ if (r < 0)
+ return r;
+ }
+
+ return 0;
+ }
+
+ do {
+ r = real_journal_next(j, direction);
+ if (r < 0)
+ return r;
+
+ if (r == 0)
+ return c;
+
+ skip--;
+ c++;
+ } while (skip > 0);
+
+ return c;
+}
+
+_public_ int sd_journal_next_skip(sd_journal *j, uint64_t skip) {
+ return real_journal_next_skip(j, DIRECTION_DOWN, skip);
+}
+
+_public_ int sd_journal_previous_skip(sd_journal *j, uint64_t skip) {
+ return real_journal_next_skip(j, DIRECTION_UP, skip);
+}
+
+_public_ int sd_journal_get_cursor(sd_journal *j, char **cursor) {
+ Object *o;
+ int r;
+
+ assert_return(j, -EINVAL);
+ assert_return(!journal_origin_changed(j), -ECHILD);
+ assert_return(cursor, -EINVAL);
+
+ if (!j->current_file || j->current_file->current_offset <= 0)
+ return -EADDRNOTAVAIL;
+
+ r = journal_file_move_to_object(j->current_file, OBJECT_ENTRY, j->current_file->current_offset, &o);
+ if (r < 0)
+ return r;
+
+ if (asprintf(cursor,
+ "s=%s;i=%"PRIx64";b=%s;m=%"PRIx64";t=%"PRIx64";x=%"PRIx64,
+ SD_ID128_TO_STRING(j->current_file->header->seqnum_id), le64toh(o->entry.seqnum),
+ SD_ID128_TO_STRING(o->entry.boot_id), le64toh(o->entry.monotonic),
+ le64toh(o->entry.realtime),
+ le64toh(o->entry.xor_hash)) < 0)
+ return -ENOMEM;
+
+ return 0;
+}
+
+_public_ int sd_journal_seek_cursor(sd_journal *j, const char *cursor) {
+ unsigned long long seqnum, monotonic, realtime, xor_hash;
+ bool seqnum_id_set = false,
+ seqnum_set = false,
+ boot_id_set = false,
+ monotonic_set = false,
+ realtime_set = false,
+ xor_hash_set = false;
+ sd_id128_t seqnum_id, boot_id;
+ int r;
+
+ assert_return(j, -EINVAL);
+ assert_return(!journal_origin_changed(j), -ECHILD);
+ assert_return(!isempty(cursor), -EINVAL);
+
+ for (const char *p = cursor;;) {
+ _cleanup_free_ char *word = NULL;
+
+ r = extract_first_word(&p, &word, ";", EXTRACT_DONT_COALESCE_SEPARATORS);
+ if (r < 0)
+ return r;
+ if (r == 0)
+ break;
+
+ if (word[0] == '\0' || word[1] != '=')
+ return -EINVAL;
+
+ switch (word[0]) {
+ case 's':
+ seqnum_id_set = true;
+ r = sd_id128_from_string(word + 2, &seqnum_id);
+ if (r < 0)
+ return r;
+ break;
+
+ case 'i':
+ seqnum_set = true;
+ if (sscanf(word + 2, "%llx", &seqnum) != 1)
+ return -EINVAL;
+ break;
+
+ case 'b':
+ boot_id_set = true;
+ r = sd_id128_from_string(word + 2, &boot_id);
+ if (r < 0)
+ return r;
+ break;
+
+ case 'm':
+ monotonic_set = true;
+ if (sscanf(word + 2, "%llx", &monotonic) != 1)
+ return -EINVAL;
+ break;
+
+ case 't':
+ realtime_set = true;
+ if (sscanf(word + 2, "%llx", &realtime) != 1)
+ return -EINVAL;
+ break;
+
+ case 'x':
+ xor_hash_set = true;
+ if (sscanf(word + 2, "%llx", &xor_hash) != 1)
+ return -EINVAL;
+ break;
+ }
+ }
+
+ if ((!seqnum_set || !seqnum_id_set) &&
+ (!monotonic_set || !boot_id_set) &&
+ !realtime_set)
+ return -EINVAL;
+
+ detach_location(j);
+ j->current_location = (Location) {
+ .type = LOCATION_SEEK,
+ };
+
+ if (realtime_set) {
+ j->current_location.realtime = (uint64_t) realtime;
+ j->current_location.realtime_set = true;
+ }
+
+ if (seqnum_set && seqnum_id_set) {
+ j->current_location.seqnum = (uint64_t) seqnum;
+ j->current_location.seqnum_id = seqnum_id;
+ j->current_location.seqnum_set = true;
+ }
+
+ if (monotonic_set && boot_id_set) {
+ j->current_location.monotonic = (uint64_t) monotonic;
+ j->current_location.boot_id = boot_id;
+ j->current_location.monotonic_set = true;
+ }
+
+ if (xor_hash_set) {
+ j->current_location.xor_hash = (uint64_t) xor_hash;
+ j->current_location.xor_hash_set = true;
+ }
+
+ return 0;
+}
+
+_public_ int sd_journal_test_cursor(sd_journal *j, const char *cursor) {
+ int r;
+ Object *o;
+
+ assert_return(j, -EINVAL);
+ assert_return(!journal_origin_changed(j), -ECHILD);
+ assert_return(!isempty(cursor), -EINVAL);
+
+ if (!j->current_file || j->current_file->current_offset <= 0)
+ return -EADDRNOTAVAIL;
+
+ r = journal_file_move_to_object(j->current_file, OBJECT_ENTRY, j->current_file->current_offset, &o);
+ if (r < 0)
+ return r;
+
+ for (;;) {
+ _cleanup_free_ char *item = NULL;
+ unsigned long long ll;
+ sd_id128_t id;
+ int k = 0;
+
+ r = extract_first_word(&cursor, &item, ";", EXTRACT_DONT_COALESCE_SEPARATORS);
+ if (r < 0)
+ return r;
+
+ if (r == 0)
+ break;
+
+ if (strlen(item) < 2 || item[1] != '=')
+ return -EINVAL;
+
+ switch (item[0]) {
+
+ case 's':
+ k = sd_id128_from_string(item+2, &id);
+ if (k < 0)
+ return k;
+ if (!sd_id128_equal(id, j->current_file->header->seqnum_id))
+ return 0;
+ break;
+
+ case 'i':
+ if (sscanf(item+2, "%llx", &ll) != 1)
+ return -EINVAL;
+ if (ll != le64toh(o->entry.seqnum))
+ return 0;
+ break;
+
+ case 'b':
+ k = sd_id128_from_string(item+2, &id);
+ if (k < 0)
+ return k;
+ if (!sd_id128_equal(id, o->entry.boot_id))
+ return 0;
+ break;
+
+ case 'm':
+ if (sscanf(item+2, "%llx", &ll) != 1)
+ return -EINVAL;
+ if (ll != le64toh(o->entry.monotonic))
+ return 0;
+ break;
+
+ case 't':
+ if (sscanf(item+2, "%llx", &ll) != 1)
+ return -EINVAL;
+ if (ll != le64toh(o->entry.realtime))
+ return 0;
+ break;
+
+ case 'x':
+ if (sscanf(item+2, "%llx", &ll) != 1)
+ return -EINVAL;
+ if (ll != le64toh(o->entry.xor_hash))
+ return 0;
+ break;
+ }
+ }
+
+ return 1;
+}
+
+_public_ int sd_journal_seek_monotonic_usec(sd_journal *j, sd_id128_t boot_id, uint64_t usec) {
+ assert_return(j, -EINVAL);
+ assert_return(!journal_origin_changed(j), -ECHILD);
+
+ detach_location(j);
+
+ j->current_location = (Location) {
+ .type = LOCATION_SEEK,
+ .boot_id = boot_id,
+ .monotonic = usec,
+ .monotonic_set = true,
+ };
+
+ return 0;
+}
+
+_public_ int sd_journal_seek_realtime_usec(sd_journal *j, uint64_t usec) {
+ assert_return(j, -EINVAL);
+ assert_return(!journal_origin_changed(j), -ECHILD);
+
+ detach_location(j);
+
+ j->current_location = (Location) {
+ .type = LOCATION_SEEK,
+ .realtime = usec,
+ .realtime_set = true,
+ };
+
+ return 0;
+}
+
+_public_ int sd_journal_seek_head(sd_journal *j) {
+ assert_return(j, -EINVAL);
+ assert_return(!journal_origin_changed(j), -ECHILD);
+
+ detach_location(j);
+
+ j->current_location = (Location) {
+ .type = LOCATION_HEAD,
+ };
+
+ return 0;
+}
+
+_public_ int sd_journal_seek_tail(sd_journal *j) {
+ assert_return(j, -EINVAL);
+ assert_return(!journal_origin_changed(j), -ECHILD);
+
+ detach_location(j);
+
+ j->current_location = (Location) {
+ .type = LOCATION_TAIL,
+ };
+
+ return 0;
+}
+
+static void check_network(sd_journal *j, int fd) {
+ assert(j);
+
+ if (j->on_network)
+ return;
+
+ j->on_network = fd_is_network_fs(fd);
+}
+
+static bool file_has_type_prefix(const char *prefix, const char *filename) {
+ const char *full, *tilded, *atted;
+
+ full = strjoina(prefix, ".journal");
+ tilded = strjoina(full, "~");
+ atted = strjoina(prefix, "@");
+
+ return STR_IN_SET(filename, full, tilded) ||
+ startswith(filename, atted);
+}
+
+static bool file_type_wanted(int flags, const char *filename) {
+ assert(filename);
+
+ if (!ENDSWITH_SET(filename, ".journal", ".journal~"))
+ return false;
+
+ /* no flags set → every type is OK */
+ if (!(flags & (SD_JOURNAL_SYSTEM | SD_JOURNAL_CURRENT_USER)))
+ return true;
+
+ if (FLAGS_SET(flags, SD_JOURNAL_CURRENT_USER)) {
+ char prefix[5 + DECIMAL_STR_MAX(uid_t) + 1];
+
+ xsprintf(prefix, "user-" UID_FMT, getuid());
+
+ if (file_has_type_prefix(prefix, filename))
+ return true;
+
+ /* If SD_JOURNAL_CURRENT_USER is specified and we are invoked under a system UID, then
+ * automatically enable SD_JOURNAL_SYSTEM too, because journald will actually put system user
+ * data into the system journal. */
+
+ if (uid_for_system_journal(getuid()))
+ flags |= SD_JOURNAL_SYSTEM;
+ }
+
+ if (FLAGS_SET(flags, SD_JOURNAL_SYSTEM) && file_has_type_prefix("system", filename))
+ return true;
+
+ return false;
+}
+
+static bool path_has_prefix(sd_journal *j, const char *path, const char *prefix) {
+ assert(j);
+ assert(path);
+ assert(prefix);
+
+ if (j->toplevel_fd >= 0)
+ return false;
+
+ return path_startswith(path, prefix);
+}
+
+static void track_file_disposition(sd_journal *j, JournalFile *f) {
+ assert(j);
+ assert(f);
+
+ if (!j->has_runtime_files && path_has_prefix(j, f->path, "/run"))
+ j->has_runtime_files = true;
+ else if (!j->has_persistent_files && path_has_prefix(j, f->path, "/var"))
+ j->has_persistent_files = true;
+}
+
+static const char *skip_slash(const char *p) {
+
+ if (!p)
+ return NULL;
+
+ while (*p == '/')
+ p++;
+
+ return p;
+}
+
+static int add_any_file(
+ sd_journal *j,
+ int fd,
+ const char *path) {
+
+ _cleanup_close_ int our_fd = -EBADF;
+ JournalFile *f;
+ struct stat st;
+ int r;
+
+ assert(j);
+ assert(fd >= 0 || path);
+
+ if (fd < 0) {
+ assert(path); /* For gcc. */
+ if (j->toplevel_fd >= 0)
+ /* If there's a top-level fd defined make the path relative, explicitly, since otherwise
+ * openat() ignores the first argument. */
+
+ fd = our_fd = openat(j->toplevel_fd, skip_slash(path), O_RDONLY|O_CLOEXEC|O_NONBLOCK);
+ else
+ fd = our_fd = open(path, O_RDONLY|O_CLOEXEC|O_NONBLOCK);
+ if (fd < 0) {
+ r = log_debug_errno(errno, "Failed to open journal file %s: %m", path);
+ goto error;
+ }
+
+ r = fd_nonblock(fd, false);
+ if (r < 0) {
+ r = log_debug_errno(errno, "Failed to turn off O_NONBLOCK for %s: %m", path);
+ goto error;
+ }
+ }
+
+ if (fstat(fd, &st) < 0) {
+ r = log_debug_errno(errno, "Failed to fstat %s: %m", path ?: "fd");
+ goto error;
+ }
+
+ r = stat_verify_regular(&st);
+ if (r < 0) {
+ log_debug_errno(r, "Refusing to open %s: %m", path ?: "fd");
+ goto error;
+ }
+
+ if (path) {
+ f = ordered_hashmap_get(j->files, path);
+ if (f) {
+ if (stat_inode_same(&f->last_stat, &st)) {
+ /* We already track this file, under the same path and with the same
+ * device/inode numbers, it's hence really the same. Mark this file as seen
+ * in this generation. This is used to GC old files in process_q_overflow()
+ * to detect journal files that are still there and discern them from those
+ * which are gone. */
+
+ f->last_seen_generation = j->generation;
+ (void) journal_file_read_tail_timestamp(j, f);
+ return 0;
+ }
+
+ /* So we tracked a file under this name, but it has a different inode/device. In that
+ * case, it got replaced (probably due to rotation?), let's drop it hence from our
+ * list. */
+ remove_file_real(j, f);
+ f = NULL;
+ }
+ }
+
+ if (ordered_hashmap_size(j->files) >= JOURNAL_FILES_MAX) {
+ r = log_debug_errno(SYNTHETIC_ERRNO(ETOOMANYREFS),
+ "Too many open journal files, not adding %s.", path ?: "fd");
+ goto error;
+ }
+
+ r = journal_file_open(fd, path, O_RDONLY, 0, 0, 0, NULL, j->mmap, NULL, &f);
+ if (r < 0) {
+ log_debug_errno(r, "Failed to open journal file %s: %m", path ?: "from fd");
+ goto error;
+ }
+
+ /* journal_file_dump(f); */
+
+ /* journal_file_open() generates an replacement fname if necessary, so we can use f->path. */
+ r = ordered_hashmap_put(j->files, f->path, f);
+ if (r < 0) {
+ f->close_fd = false; /* Make sure journal_file_close() doesn't close the caller's fd
+ * (or our own). The caller or we will do that ourselves. */
+ (void) journal_file_close(f);
+ goto error;
+ }
+
+ TAKE_FD(our_fd); /* the fd is now owned by the JournalFile object */
+
+ f->last_seen_generation = j->generation;
+
+ track_file_disposition(j, f);
+ check_network(j, f->fd);
+ (void) journal_file_read_tail_timestamp(j, f);
+
+ j->current_invalidate_counter++;
+
+ log_debug("File %s added.", f->path);
+
+ return 0;
+
+error:
+ (void) journal_put_error(j, r, path); /* path==NULL is OK. */
+ return r;
+}
+
+static int add_file_by_name(
+ sd_journal *j,
+ const char *prefix,
+ const char *filename) {
+
+ _cleanup_free_ char *path = NULL;
+
+ assert(j);
+ assert(prefix);
+ assert(filename);
+
+ if (j->no_new_files)
+ return 0;
+
+ if (!file_type_wanted(j->flags, filename))
+ return 0;
+
+ path = path_join(prefix, filename);
+ if (!path)
+ return -ENOMEM;
+
+ return add_any_file(j, -1, path);
+}
+
+static int remove_file_by_name(
+ sd_journal *j,
+ const char *prefix,
+ const char *filename) {
+
+ _cleanup_free_ char *path = NULL;
+ JournalFile *f;
+
+ assert(j);
+ assert(prefix);
+ assert(filename);
+
+ path = path_join(prefix, filename);
+ if (!path)
+ return -ENOMEM;
+
+ f = ordered_hashmap_get(j->files, path);
+ if (!f)
+ return 0;
+
+ remove_file_real(j, f);
+ return 1;
+}
+
+static void remove_file_real(sd_journal *j, JournalFile *f) {
+ assert(j);
+ assert(f);
+
+ (void) ordered_hashmap_remove(j->files, f->path);
+
+ log_debug("File %s removed.", f->path);
+
+ if (j->current_file == f) {
+ j->current_file = NULL;
+ j->current_field = 0;
+ }
+
+ if (j->unique_file == f) {
+ /* Jump to the next unique_file or NULL if that one was last */
+ j->unique_file = ordered_hashmap_next(j->files, j->unique_file->path);
+ j->unique_offset = 0;
+ if (!j->unique_file)
+ j->unique_file_lost = true;
+ }
+
+ if (j->fields_file == f) {
+ j->fields_file = ordered_hashmap_next(j->files, j->fields_file->path);
+ j->fields_offset = 0;
+ if (!j->fields_file)
+ j->fields_file_lost = true;
+ }
+
+ journal_file_unlink_newest_by_boot_id(j, f);
+ (void) journal_file_close(f);
+
+ j->current_invalidate_counter++;
+}
+
+static int dirname_is_machine_id(const char *fn) {
+ sd_id128_t id, machine;
+ const char *e;
+ int r;
+
+ /* Returns true if the specified directory name matches the local machine ID */
+
+ r = sd_id128_get_machine(&machine);
+ if (r < 0)
+ return r;
+
+ e = strchr(fn, '.');
+ if (e) {
+ const char *k;
+
+ /* Looks like it has a namespace suffix. Verify that. */
+ if (!log_namespace_name_valid(e + 1))
+ return false;
+
+ k = strndupa_safe(fn, e - fn);
+ r = sd_id128_from_string(k, &id);
+ } else
+ r = sd_id128_from_string(fn, &id);
+ if (r < 0)
+ return r;
+
+ return sd_id128_equal(id, machine);
+}
+
+static int dirname_has_namespace(const char *fn, const char *namespace) {
+ const char *e;
+
+ /* Returns true if the specified directory name matches the specified namespace */
+
+ e = strchr(fn, '.');
+ if (e) {
+ const char *k;
+
+ if (!namespace)
+ return false;
+
+ if (!streq(e + 1, namespace))
+ return false;
+
+ k = strndupa_safe(fn, e - fn);
+ return id128_is_valid(k);
+ }
+
+ if (namespace)
+ return false;
+
+ return id128_is_valid(fn);
+}
+
+static bool dirent_is_journal_file(const struct dirent *de) {
+ assert(de);
+
+ /* Returns true if the specified directory entry looks like a journal file we might be interested in */
+
+ if (!IN_SET(de->d_type, DT_REG, DT_LNK, DT_UNKNOWN))
+ return false;
+
+ return endswith(de->d_name, ".journal") ||
+ endswith(de->d_name, ".journal~");
+}
+
+static bool dirent_is_journal_subdir(const struct dirent *de) {
+ const char *e, *n;
+ assert(de);
+
+ /* returns true if the specified directory entry looks like a directory that might contain journal
+ * files we might be interested in, i.e. is either a 128-bit ID or a 128-bit ID suffixed by a
+ * namespace. */
+
+ if (!IN_SET(de->d_type, DT_DIR, DT_LNK, DT_UNKNOWN))
+ return false;
+
+ e = strchr(de->d_name, '.');
+ if (!e)
+ return id128_is_valid(de->d_name); /* No namespace */
+
+ n = strndupa_safe(de->d_name, e - de->d_name);
+ if (!id128_is_valid(n))
+ return false;
+
+ return log_namespace_name_valid(e + 1);
+}
+
+static int directory_open(sd_journal *j, const char *path, DIR **ret) {
+ DIR *d;
+
+ assert(j);
+ assert(path);
+ assert(ret);
+
+ if (j->toplevel_fd < 0)
+ d = opendir(path);
+ else
+ /* Open the specified directory relative to the toplevel fd. Enforce that the path specified is
+ * relative, by dropping the initial slash */
+ d = xopendirat(j->toplevel_fd, skip_slash(path), 0);
+ if (!d)
+ return -errno;
+
+ *ret = d;
+ return 0;
+}
+
+static int add_directory(sd_journal *j, const char *prefix, const char *dirname);
+
+static void directory_enumerate(sd_journal *j, Directory *m, DIR *d) {
+ assert(j);
+ assert(m);
+ assert(d);
+
+ FOREACH_DIRENT_ALL(de, d, goto fail) {
+ if (dirent_is_journal_file(de))
+ (void) add_file_by_name(j, m->path, de->d_name);
+
+ if (m->is_root && dirent_is_journal_subdir(de))
+ (void) add_directory(j, m->path, de->d_name);
+ }
+
+ return;
+fail:
+ log_debug_errno(errno, "Failed to enumerate directory %s, ignoring: %m", m->path);
+}
+
+static void directory_watch(sd_journal *j, Directory *m, int fd, uint32_t mask) {
+ int r;
+
+ assert(j);
+ assert(m);
+ assert(fd >= 0);
+
+ /* Watch this directory if that's enabled and if it not being watched yet. */
+
+ if (m->wd > 0) /* Already have a watch? */
+ return;
+ if (j->inotify_fd < 0) /* Not watching at all? */
+ return;
+
+ m->wd = inotify_add_watch_fd(j->inotify_fd, fd, mask);
+ if (m->wd < 0) {
+ log_debug_errno(errno, "Failed to watch journal directory '%s', ignoring: %m", m->path);
+ return;
+ }
+
+ r = hashmap_put(j->directories_by_wd, INT_TO_PTR(m->wd), m);
+ if (r == -EEXIST)
+ log_debug_errno(r, "Directory '%s' already being watched under a different path, ignoring: %m", m->path);
+ if (r < 0) {
+ log_debug_errno(r, "Failed to add watch for journal directory '%s' to hashmap, ignoring: %m", m->path);
+ (void) inotify_rm_watch(j->inotify_fd, m->wd);
+ m->wd = -1;
+ }
+}
+
+static int add_directory(
+ sd_journal *j,
+ const char *prefix,
+ const char *dirname) {
+
+ _cleanup_free_ char *path = NULL;
+ _cleanup_closedir_ DIR *d = NULL;
+ Directory *m;
+ int r, k;
+
+ assert(j);
+ assert(prefix);
+
+ /* Adds a journal file directory to watch. If the directory is already tracked this updates the inotify watch
+ * and reenumerates directory contents */
+
+ path = path_join(prefix, dirname);
+ if (!path) {
+ r = -ENOMEM;
+ goto fail;
+ }
+
+ log_debug("Considering directory '%s'.", path);
+
+ /* We consider everything local that is in a directory for the local machine ID, or that is stored in /run */
+ if ((j->flags & SD_JOURNAL_LOCAL_ONLY) &&
+ !((dirname && dirname_is_machine_id(dirname) > 0) || path_has_prefix(j, path, "/run")))
+ return 0;
+
+ if (dirname &&
+ (!(FLAGS_SET(j->flags, SD_JOURNAL_ALL_NAMESPACES) ||
+ dirname_has_namespace(dirname, j->namespace) > 0 ||
+ (FLAGS_SET(j->flags, SD_JOURNAL_INCLUDE_DEFAULT_NAMESPACE) && dirname_has_namespace(dirname, NULL) > 0))))
+ return 0;
+
+ r = directory_open(j, path, &d);
+ if (r < 0) {
+ log_debug_errno(r, "Failed to open directory '%s': %m", path);
+ goto fail;
+ }
+
+ m = hashmap_get(j->directories_by_path, path);
+ if (!m) {
+ m = new(Directory, 1);
+ if (!m) {
+ r = -ENOMEM;
+ goto fail;
+ }
+
+ *m = (Directory) {
+ .is_root = false,
+ .path = path,
+ };
+
+ if (hashmap_put(j->directories_by_path, m->path, m) < 0) {
+ free(m);
+ r = -ENOMEM;
+ goto fail;
+ }
+
+ path = NULL; /* avoid freeing in cleanup */
+ j->current_invalidate_counter++;
+
+ log_debug("Directory %s added.", m->path);
+
+ } else if (m->is_root)
+ return 0; /* Don't 'downgrade' from root directory */
+
+ m->last_seen_generation = j->generation;
+
+ directory_watch(j, m, dirfd(d),
+ IN_CREATE|IN_MOVED_TO|IN_MODIFY|IN_ATTRIB|IN_DELETE|
+ IN_DELETE_SELF|IN_MOVE_SELF|IN_UNMOUNT|IN_MOVED_FROM|
+ IN_ONLYDIR);
+
+ if (!j->no_new_files)
+ directory_enumerate(j, m, d);
+
+ check_network(j, dirfd(d));
+
+ return 0;
+
+fail:
+ k = journal_put_error(j, r, path ?: prefix);
+ if (k < 0)
+ return k;
+
+ return r;
+}
+
+static int add_root_directory(sd_journal *j, const char *p, bool missing_ok) {
+
+ _cleanup_closedir_ DIR *d = NULL;
+ Directory *m;
+ int r, k;
+
+ assert(j);
+
+ /* Adds a root directory to our set of directories to use. If the root directory is already in the set, we
+ * update the inotify logic, and renumerate the directory entries. This call may hence be called to initially
+ * populate the set, as well as to update it later. */
+
+ if (p) {
+ /* If there's a path specified, use it. */
+
+ log_debug("Considering root directory '%s'.", p);
+
+ if ((j->flags & SD_JOURNAL_RUNTIME_ONLY) &&
+ !path_has_prefix(j, p, "/run"))
+ return -EINVAL;
+
+ if (j->prefix)
+ p = strjoina(j->prefix, p);
+
+ r = directory_open(j, p, &d);
+ if (r == -ENOENT && missing_ok)
+ return 0;
+ if (r < 0) {
+ log_debug_errno(r, "Failed to open root directory %s: %m", p);
+ goto fail;
+ }
+ } else {
+ _cleanup_close_ int dfd = -EBADF;
+
+ /* If there's no path specified, then we use the top-level fd itself. We duplicate the fd here, since
+ * opendir() will take possession of the fd, and close it, which we don't want. */
+
+ p = "."; /* store this as "." in the directories hashmap */
+
+ dfd = fcntl(j->toplevel_fd, F_DUPFD_CLOEXEC, 3);
+ if (dfd < 0) {
+ r = -errno;
+ goto fail;
+ }
+
+ d = take_fdopendir(&dfd);
+ if (!d) {
+ r = -errno;
+ goto fail;
+ }
+
+ rewinddir(d);
+ }
+
+ m = hashmap_get(j->directories_by_path, p);
+ if (!m) {
+ m = new0(Directory, 1);
+ if (!m) {
+ r = -ENOMEM;
+ goto fail;
+ }
+
+ m->is_root = true;
+
+ m->path = strdup(p);
+ if (!m->path) {
+ free(m);
+ r = -ENOMEM;
+ goto fail;
+ }
+
+ if (hashmap_put(j->directories_by_path, m->path, m) < 0) {
+ free(m->path);
+ free(m);
+ r = -ENOMEM;
+ goto fail;
+ }
+
+ j->current_invalidate_counter++;
+
+ log_debug("Root directory %s added.", m->path);
+
+ } else if (!m->is_root)
+ return 0;
+
+ directory_watch(j, m, dirfd(d),
+ IN_CREATE|IN_MOVED_TO|IN_MODIFY|IN_ATTRIB|IN_DELETE|
+ IN_ONLYDIR);
+
+ if (!j->no_new_files)
+ directory_enumerate(j, m, d);
+
+ check_network(j, dirfd(d));
+
+ return 0;
+
+fail:
+ k = journal_put_error(j, r, p);
+ if (k < 0)
+ return k;
+
+ return r;
+}
+
+static void remove_directory(sd_journal *j, Directory *d) {
+ assert(j);
+
+ if (d->wd > 0) {
+ hashmap_remove(j->directories_by_wd, INT_TO_PTR(d->wd));
+
+ if (j->inotify_fd >= 0)
+ (void) inotify_rm_watch(j->inotify_fd, d->wd);
+ }
+
+ hashmap_remove(j->directories_by_path, d->path);
+
+ if (d->is_root)
+ log_debug("Root directory %s removed.", d->path);
+ else
+ log_debug("Directory %s removed.", d->path);
+
+ free(d->path);
+ free(d);
+}
+
+static int add_search_paths(sd_journal *j) {
+
+ static const char search_paths[] =
+ "/run/log/journal\0"
+ "/var/log/journal\0";
+
+ assert(j);
+
+ /* We ignore most errors here, since the idea is to only open
+ * what's actually accessible, and ignore the rest. */
+
+ NULSTR_FOREACH(p, search_paths)
+ (void) add_root_directory(j, p, true);
+
+ if (!(j->flags & SD_JOURNAL_LOCAL_ONLY))
+ (void) add_root_directory(j, "/var/log/journal/remote", true);
+
+ return 0;
+}
+
+static int add_current_paths(sd_journal *j) {
+ JournalFile *f;
+
+ assert(j);
+ assert(j->no_new_files);
+
+ /* Simply adds all directories for files we have open as directories. We don't expect errors here, so we
+ * treat them as fatal. */
+
+ ORDERED_HASHMAP_FOREACH(f, j->files) {
+ _cleanup_free_ char *dir = NULL;
+ int r;
+
+ r = path_extract_directory(f->path, &dir);
+ if (r < 0)
+ return r;
+
+ r = add_directory(j, dir, NULL);
+ if (r < 0)
+ return r;
+ }
+
+ return 0;
+}
+
+static int allocate_inotify(sd_journal *j) {
+ assert(j);
+
+ if (j->inotify_fd < 0) {
+ j->inotify_fd = inotify_init1(IN_NONBLOCK|IN_CLOEXEC);
+ if (j->inotify_fd < 0)
+ return -errno;
+ }
+
+ return hashmap_ensure_allocated(&j->directories_by_wd, NULL);
+}
+
+static sd_journal *journal_new(int flags, const char *path, const char *namespace) {
+ _cleanup_(sd_journal_closep) sd_journal *j = NULL;
+
+ j = new(sd_journal, 1);
+ if (!j)
+ return NULL;
+
+ *j = (sd_journal) {
+ .origin_id = origin_id_query(),
+ .toplevel_fd = -EBADF,
+ .inotify_fd = -EBADF,
+ .flags = flags,
+ .data_threshold = DEFAULT_DATA_THRESHOLD,
+ };
+
+ if (path) {
+ char *t;
+
+ t = strdup(path);
+ if (!t)
+ return NULL;
+
+ if (flags & SD_JOURNAL_OS_ROOT)
+ j->prefix = t;
+ else
+ j->path = t;
+ }
+
+ if (namespace) {
+ j->namespace = strdup(namespace);
+ if (!j->namespace)
+ return NULL;
+ }
+
+ j->files = ordered_hashmap_new(&path_hash_ops);
+ if (!j->files)
+ return NULL;
+
+ j->files_cache = ordered_hashmap_iterated_cache_new(j->files);
+ j->directories_by_path = hashmap_new(&path_hash_ops);
+ j->mmap = mmap_cache_new();
+ if (!j->files_cache || !j->directories_by_path || !j->mmap)
+ return NULL;
+
+ return TAKE_PTR(j);
+}
+
+#define OPEN_ALLOWED_FLAGS \
+ (SD_JOURNAL_LOCAL_ONLY | \
+ SD_JOURNAL_RUNTIME_ONLY | \
+ SD_JOURNAL_SYSTEM | \
+ SD_JOURNAL_CURRENT_USER | \
+ SD_JOURNAL_ALL_NAMESPACES | \
+ SD_JOURNAL_INCLUDE_DEFAULT_NAMESPACE)
+
+_public_ int sd_journal_open_namespace(sd_journal **ret, const char *namespace, int flags) {
+ _cleanup_(sd_journal_closep) sd_journal *j = NULL;
+ int r;
+
+ assert_return(ret, -EINVAL);
+ assert_return((flags & ~OPEN_ALLOWED_FLAGS) == 0, -EINVAL);
+
+ j = journal_new(flags, NULL, namespace);
+ if (!j)
+ return -ENOMEM;
+
+ r = add_search_paths(j);
+ if (r < 0)
+ return r;
+
+ *ret = TAKE_PTR(j);
+ return 0;
+}
+
+_public_ int sd_journal_open(sd_journal **ret, int flags) {
+ return sd_journal_open_namespace(ret, NULL, flags);
+}
+
+#define OPEN_CONTAINER_ALLOWED_FLAGS \
+ (SD_JOURNAL_LOCAL_ONLY | SD_JOURNAL_SYSTEM)
+
+_public_ int sd_journal_open_container(sd_journal **ret, const char *machine, int flags) {
+ _cleanup_free_ char *root = NULL, *class = NULL;
+ _cleanup_(sd_journal_closep) sd_journal *j = NULL;
+ char *p;
+ int r;
+
+ /* This is deprecated, people should use machined's OpenMachineRootDirectory() call instead in
+ * combination with sd_journal_open_directory_fd(). */
+
+ assert_return(machine, -EINVAL);
+ assert_return(ret, -EINVAL);
+ assert_return((flags & ~OPEN_CONTAINER_ALLOWED_FLAGS) == 0, -EINVAL);
+ assert_return(hostname_is_valid(machine, 0), -EINVAL);
+
+ p = strjoina("/run/systemd/machines/", machine);
+ r = parse_env_file(NULL, p,
+ "ROOT", &root,
+ "CLASS", &class);
+ if (r == -ENOENT)
+ return -EHOSTDOWN;
+ if (r < 0)
+ return r;
+ if (!root)
+ return -ENODATA;
+
+ if (!streq_ptr(class, "container"))
+ return -EIO;
+
+ j = journal_new(flags, root, NULL);
+ if (!j)
+ return -ENOMEM;
+
+ r = add_search_paths(j);
+ if (r < 0)
+ return r;
+
+ *ret = TAKE_PTR(j);
+ return 0;
+}
+
+#define OPEN_DIRECTORY_ALLOWED_FLAGS \
+ (SD_JOURNAL_OS_ROOT | \
+ SD_JOURNAL_SYSTEM | SD_JOURNAL_CURRENT_USER )
+
+_public_ int sd_journal_open_directory(sd_journal **ret, const char *path, int flags) {
+ _cleanup_(sd_journal_closep) sd_journal *j = NULL;
+ int r;
+
+ assert_return(ret, -EINVAL);
+ assert_return(path, -EINVAL);
+ assert_return((flags & ~OPEN_DIRECTORY_ALLOWED_FLAGS) == 0, -EINVAL);
+
+ j = journal_new(flags, path, NULL);
+ if (!j)
+ return -ENOMEM;
+
+ if (flags & SD_JOURNAL_OS_ROOT)
+ r = add_search_paths(j);
+ else
+ r = add_root_directory(j, path, false);
+ if (r < 0)
+ return r;
+
+ *ret = TAKE_PTR(j);
+ return 0;
+}
+
+_public_ int sd_journal_open_files(sd_journal **ret, const char **paths, int flags) {
+ _cleanup_(sd_journal_closep) sd_journal *j = NULL;
+ int r;
+
+ assert_return(ret, -EINVAL);
+ assert_return(flags == 0, -EINVAL);
+
+ j = journal_new(flags, NULL, NULL);
+ if (!j)
+ return -ENOMEM;
+
+ STRV_FOREACH(path, paths) {
+ r = add_any_file(j, -1, *path);
+ if (r < 0)
+ return r;
+ }
+
+ j->no_new_files = true;
+
+ *ret = TAKE_PTR(j);
+ return 0;
+}
+
+#define OPEN_DIRECTORY_FD_ALLOWED_FLAGS \
+ (SD_JOURNAL_OS_ROOT | \
+ SD_JOURNAL_SYSTEM | \
+ SD_JOURNAL_CURRENT_USER | \
+ SD_JOURNAL_TAKE_DIRECTORY_FD)
+
+_public_ int sd_journal_open_directory_fd(sd_journal **ret, int fd, int flags) {
+ _cleanup_(sd_journal_closep) sd_journal *j = NULL;
+ struct stat st;
+ bool take_fd;
+ int r;
+
+ assert_return(ret, -EINVAL);
+ assert_return(fd >= 0, -EBADF);
+ assert_return((flags & ~OPEN_DIRECTORY_FD_ALLOWED_FLAGS) == 0, -EINVAL);
+
+ if (fstat(fd, &st) < 0)
+ return -errno;
+
+ if (!S_ISDIR(st.st_mode))
+ return -EBADFD;
+
+ take_fd = FLAGS_SET(flags, SD_JOURNAL_TAKE_DIRECTORY_FD);
+ j = journal_new(flags & ~SD_JOURNAL_TAKE_DIRECTORY_FD, NULL, NULL);
+ if (!j)
+ return -ENOMEM;
+
+ j->toplevel_fd = fd;
+
+ if (flags & SD_JOURNAL_OS_ROOT)
+ r = add_search_paths(j);
+ else
+ r = add_root_directory(j, NULL, false);
+ if (r < 0)
+ return r;
+
+ SET_FLAG(j->flags, SD_JOURNAL_TAKE_DIRECTORY_FD, take_fd);
+
+ *ret = TAKE_PTR(j);
+ return 0;
+}
+
+_public_ int sd_journal_open_files_fd(sd_journal **ret, int fds[], unsigned n_fds, int flags) {
+ JournalFile *f;
+ _cleanup_(sd_journal_closep) sd_journal *j = NULL;
+ int r;
+
+ assert_return(ret, -EINVAL);
+ assert_return(n_fds > 0, -EBADF);
+ assert_return(flags == 0, -EINVAL);
+
+ j = journal_new(flags, NULL, NULL);
+ if (!j)
+ return -ENOMEM;
+
+ for (unsigned i = 0; i < n_fds; i++) {
+ struct stat st;
+
+ if (fds[i] < 0) {
+ r = -EBADF;
+ goto fail;
+ }
+
+ if (fstat(fds[i], &st) < 0) {
+ r = -errno;
+ goto fail;
+ }
+
+ r = stat_verify_regular(&st);
+ if (r < 0)
+ goto fail;
+
+ r = add_any_file(j, fds[i], NULL);
+ if (r < 0)
+ goto fail;
+ }
+
+ j->no_new_files = true;
+ j->no_inotify = true;
+
+ *ret = TAKE_PTR(j);
+ return 0;
+
+fail:
+ /* If we fail, make sure we don't take possession of the files we managed to make use of successfully, and they
+ * remain open */
+ ORDERED_HASHMAP_FOREACH(f, j->files)
+ f->close_fd = false;
+
+ return r;
+}
+
+_public_ void sd_journal_close(sd_journal *j) {
+ Directory *d;
+ Prioq *p;
+
+ if (!j || journal_origin_changed(j))
+ return;
+
+ while ((p = hashmap_first(j->newest_by_boot_id)))
+ journal_file_unlink_newest_by_boot_id(j, prioq_peek(p));
+ hashmap_free(j->newest_by_boot_id);
+
+ sd_journal_flush_matches(j);
+
+ ordered_hashmap_free_with_destructor(j->files, journal_file_close);
+ iterated_cache_free(j->files_cache);
+
+ while ((d = hashmap_first(j->directories_by_path)))
+ remove_directory(j, d);
+
+ while ((d = hashmap_first(j->directories_by_wd)))
+ remove_directory(j, d);
+
+ hashmap_free(j->directories_by_path);
+ hashmap_free(j->directories_by_wd);
+
+ if (FLAGS_SET(j->flags, SD_JOURNAL_TAKE_DIRECTORY_FD))
+ safe_close(j->toplevel_fd);
+
+ safe_close(j->inotify_fd);
+
+ if (j->mmap) {
+ mmap_cache_stats_log_debug(j->mmap);
+ mmap_cache_unref(j->mmap);
+ }
+
+ hashmap_free_free(j->errors);
+
+ free(j->path);
+ free(j->prefix);
+ free(j->namespace);
+ free(j->unique_field);
+ free(j->fields_buffer);
+ free(j);
+}
+
+static void journal_file_unlink_newest_by_boot_id(sd_journal *j, JournalFile *f) {
+ JournalFile *nf;
+ Prioq *p;
+
+ assert(j);
+ assert(f);
+
+ if (f->newest_boot_id_prioq_idx == PRIOQ_IDX_NULL) /* not linked currently, hence this is a NOP */
+ return;
+
+ assert_se(p = hashmap_get(j->newest_by_boot_id, &f->newest_boot_id));
+ assert_se(prioq_remove(p, f, &f->newest_boot_id_prioq_idx) > 0);
+
+ nf = prioq_peek(p);
+ if (nf)
+ /* There's still a member in the prioq? Then make sure the hashmap key now points to its
+ * .newest_boot_id field (and not ours!). Not we only replace the memory of the key here, the
+ * value of the key (and the data associated with it) remain the same. */
+ assert_se(hashmap_replace(j->newest_by_boot_id, &nf->newest_boot_id, p) >= 0);
+ else {
+ assert_se(hashmap_remove(j->newest_by_boot_id, &f->newest_boot_id) == p);
+ prioq_free(p);
+ }
+
+ f->newest_boot_id_prioq_idx = PRIOQ_IDX_NULL;
+}
+
+static int journal_file_newest_monotonic_compare(const void *a, const void *b) {
+ const JournalFile *x = a, *y = b;
+
+ return -CMP(x->newest_monotonic_usec, y->newest_monotonic_usec); /* Invert order, we want newest first! */
+}
+
+static int journal_file_reshuffle_newest_by_boot_id(sd_journal *j, JournalFile *f) {
+ Prioq *p;
+ int r;
+
+ assert(j);
+ assert(f);
+
+ p = hashmap_get(j->newest_by_boot_id, &f->newest_boot_id);
+ if (p) {
+ /* There's already a priority queue for this boot ID */
+
+ if (f->newest_boot_id_prioq_idx == PRIOQ_IDX_NULL) {
+ r = prioq_put(p, f, &f->newest_boot_id_prioq_idx); /* Insert if we aren't in there yet */
+ if (r < 0)
+ return r;
+ } else
+ prioq_reshuffle(p, f, &f->newest_boot_id_prioq_idx); /* Reshuffle otherwise */
+
+ } else {
+ _cleanup_(prioq_freep) Prioq *q = NULL;
+
+ /* No priority queue yet, then allocate one */
+
+ assert(f->newest_boot_id_prioq_idx == PRIOQ_IDX_NULL); /* we can't be a member either */
+
+ q = prioq_new(journal_file_newest_monotonic_compare);
+ if (!q)
+ return -ENOMEM;
+
+ r = prioq_put(q, f, &f->newest_boot_id_prioq_idx);
+ if (r < 0)
+ return r;
+
+ r = hashmap_ensure_put(&j->newest_by_boot_id, &id128_hash_ops, &f->newest_boot_id, q);
+ if (r < 0) {
+ f->newest_boot_id_prioq_idx = PRIOQ_IDX_NULL;
+ return r;
+ }
+
+ TAKE_PTR(q);
+ }
+
+ return 0;
+}
+
+static int journal_file_read_tail_timestamp(sd_journal *j, JournalFile *f) {
+ uint64_t offset, mo, rt;
+ sd_id128_t id;
+ ObjectType type;
+ Object *o;
+ int r;
+
+ assert(j);
+ assert(f);
+ assert(f->header);
+
+ /* Tries to read the timestamp of the most recently written entry. */
+
+ r = journal_file_fstat(f);
+ if (r < 0)
+ return r;
+ if (f->newest_mtime == timespec_load(&f->last_stat.st_mtim))
+ return 0; /* mtime didn't change since last time, don't bother */
+
+ if (JOURNAL_HEADER_CONTAINS(f->header, tail_entry_offset)) {
+ offset = le64toh(READ_NOW(f->header->tail_entry_offset));
+ type = OBJECT_ENTRY;
+ } else {
+ offset = le64toh(READ_NOW(f->header->tail_object_offset));
+ type = OBJECT_UNUSED;
+ }
+ if (offset == 0)
+ return -ENODATA; /* not a single object/entry, hence no tail timestamp */
+
+ /* Move to the last object in the journal file, in the hope it is an entry (which it usually will
+ * be). If we lack the "tail_entry_offset" field in the header, we specify the type as OBJECT_UNUSED
+ * here, since we cannot be sure what the last object will be, and want no noisy logging if it isn't
+ * an entry. We instead check after figuring out the pointer. */
+ r = journal_file_move_to_object(f, type, offset, &o);
+ if (r < 0) {
+ log_debug_errno(r, "Failed to move to last object in journal file, ignoring: %m");
+ o = NULL;
+ }
+ if (o && o->object.type == OBJECT_ENTRY) {
+ /* Yay, last object is an entry, let's use the data. */
+ id = o->entry.boot_id;
+ mo = le64toh(o->entry.monotonic);
+ rt = le64toh(o->entry.realtime);
+ } else {
+ /* So the object is not an entry or we couldn't access it? In that case, let's read the most
+ * recent entry timestamps from the header. It's equally good. Unfortunately though, in old
+ * versions of the journal the boot ID in the header doesn't have to match the monotonic
+ * timestamp of the header. Let's check the header flag that indicates whether this strictly
+ * matches first hence, before using the data. */
+
+ if (JOURNAL_HEADER_TAIL_ENTRY_BOOT_ID(f->header) && f->header->state == STATE_ARCHIVED) {
+ mo = le64toh(f->header->tail_entry_monotonic);
+ rt = le64toh(f->header->tail_entry_realtime);
+ id = f->header->tail_entry_boot_id;
+ } else {
+ /* Otherwise let's find the last entry manually (this possibly means traversing the
+ * chain of entry arrays, till the end */
+ r = journal_file_next_entry(f, 0, DIRECTION_UP, &o, NULL);
+ if (r < 0)
+ return r;
+ if (r == 0)
+ return -ENODATA;
+
+ id = o->entry.boot_id;
+ mo = le64toh(o->entry.monotonic);
+ rt = le64toh(o->entry.realtime);
+ }
+ }
+
+ if (mo > rt) /* monotonic clock is further ahead than realtime? that's weird, refuse to use the data */
+ return -ENODATA;
+
+ if (!sd_id128_equal(f->newest_boot_id, id))
+ journal_file_unlink_newest_by_boot_id(j, f);
+
+ f->newest_boot_id = id;
+ f->newest_monotonic_usec = mo;
+ f->newest_realtime_usec = rt;
+ f->newest_machine_id = f->header->machine_id;
+ f->newest_mtime = timespec_load(&f->last_stat.st_mtim);
+
+ r = journal_file_reshuffle_newest_by_boot_id(j, f);
+ if (r < 0)
+ return r;
+
+ return 0;
+}
+
+_public_ int sd_journal_get_realtime_usec(sd_journal *j, uint64_t *ret) {
+ JournalFile *f;
+ Object *o;
+ int r;
+
+ assert_return(j, -EINVAL);
+ assert_return(!journal_origin_changed(j), -ECHILD);
+
+ f = j->current_file;
+ if (!f)
+ return -EADDRNOTAVAIL;
+ if (f->current_offset <= 0)
+ return -EADDRNOTAVAIL;
+
+ r = journal_file_move_to_object(f, OBJECT_ENTRY, f->current_offset, &o);
+ if (r < 0)
+ return r;
+
+ uint64_t t = le64toh(o->entry.realtime);
+ if (!VALID_REALTIME(t))
+ return -EBADMSG;
+
+ if (ret)
+ *ret = t;
+
+ return 0;
+}
+
+_public_ int sd_journal_get_monotonic_usec(sd_journal *j, uint64_t *ret, sd_id128_t *ret_boot_id) {
+ JournalFile *f;
+ Object *o;
+ int r;
+
+ assert_return(j, -EINVAL);
+ assert_return(!journal_origin_changed(j), -ECHILD);
+
+ f = j->current_file;
+ if (!f)
+ return -EADDRNOTAVAIL;
+ if (f->current_offset <= 0)
+ return -EADDRNOTAVAIL;
+
+ r = journal_file_move_to_object(f, OBJECT_ENTRY, f->current_offset, &o);
+ if (r < 0)
+ return r;
+
+ if (ret_boot_id)
+ *ret_boot_id = o->entry.boot_id;
+ else {
+ sd_id128_t id;
+
+ r = sd_id128_get_boot(&id);
+ if (r < 0)
+ return r;
+
+ if (!sd_id128_equal(id, o->entry.boot_id))
+ return -ESTALE;
+ }
+
+ uint64_t t = le64toh(o->entry.monotonic);
+ if (!VALID_MONOTONIC(t))
+ return -EBADMSG;
+
+ if (ret)
+ *ret = t;
+
+ return 0;
+}
+
+_public_ int sd_journal_get_seqnum(
+ sd_journal *j,
+ uint64_t *ret_seqnum,
+ sd_id128_t *ret_seqnum_id) {
+
+ JournalFile *f;
+ Object *o;
+ int r;
+
+ assert_return(j, -EINVAL);
+ assert_return(!journal_origin_changed(j), -ECHILD);
+
+ f = j->current_file;
+ if (!f)
+ return -EADDRNOTAVAIL;
+
+ if (f->current_offset <= 0)
+ return -EADDRNOTAVAIL;
+
+ r = journal_file_move_to_object(f, OBJECT_ENTRY, f->current_offset, &o);
+ if (r < 0)
+ return r;
+
+ if (ret_seqnum_id)
+ *ret_seqnum_id = f->header->seqnum_id;
+ if (ret_seqnum)
+ *ret_seqnum = le64toh(o->entry.seqnum);
+
+ return 0;
+}
+
+static bool field_is_valid(const char *field) {
+ assert(field);
+
+ if (isempty(field))
+ return false;
+
+ if (startswith(field, "__"))
+ return false;
+
+ for (const char *p = field; *p; p++) {
+
+ if (*p == '_')
+ continue;
+
+ if (*p >= 'A' && *p <= 'Z')
+ continue;
+
+ if (ascii_isdigit(*p))
+ continue;
+
+ return false;
+ }
+
+ return true;
+}
+
+_public_ int sd_journal_get_data(sd_journal *j, const char *field, const void **data, size_t *size) {
+ JournalFile *f;
+ size_t field_length;
+ Object *o;
+ int r;
+
+ assert_return(j, -EINVAL);
+ assert_return(!journal_origin_changed(j), -ECHILD);
+ assert_return(field, -EINVAL);
+ assert_return(data, -EINVAL);
+ assert_return(size, -EINVAL);
+ assert_return(field_is_valid(field), -EINVAL);
+
+ f = j->current_file;
+ if (!f)
+ return -EADDRNOTAVAIL;
+
+ if (f->current_offset <= 0)
+ return -EADDRNOTAVAIL;
+
+ r = journal_file_move_to_object(f, OBJECT_ENTRY, f->current_offset, &o);
+ if (r < 0)
+ return r;
+
+ field_length = strlen(field);
+
+ uint64_t n = journal_file_entry_n_items(f, o);
+ for (uint64_t i = 0; i < n; i++) {
+ uint64_t p;
+ void *d;
+ size_t l;
+
+ p = journal_file_entry_item_object_offset(f, o, i);
+ r = journal_file_data_payload(f, NULL, p, field, field_length, j->data_threshold, &d, &l);
+ if (r == 0)
+ continue;
+ if (IN_SET(r, -EADDRNOTAVAIL, -EBADMSG)) {
+ log_debug_errno(r, "Entry item %"PRIu64" data object is bad, skipping over it: %m", i);
+ continue;
+ }
+ if (r < 0)
+ return r;
+
+ *data = d;
+ *size = l;
+
+ return 0;
+ }
+
+ return -ENOENT;
+}
+
+_public_ int sd_journal_enumerate_data(sd_journal *j, const void **data, size_t *size) {
+ JournalFile *f;
+ Object *o;
+ int r;
+
+ assert_return(j, -EINVAL);
+ assert_return(!journal_origin_changed(j), -ECHILD);
+ assert_return(data, -EINVAL);
+ assert_return(size, -EINVAL);
+
+ f = j->current_file;
+ if (!f)
+ return -EADDRNOTAVAIL;
+
+ if (f->current_offset <= 0)
+ return -EADDRNOTAVAIL;
+
+ r = journal_file_move_to_object(f, OBJECT_ENTRY, f->current_offset, &o);
+ if (r < 0)
+ return r;
+
+ for (uint64_t n = journal_file_entry_n_items(f, o); j->current_field < n; j->current_field++) {
+ uint64_t p;
+ void *d;
+ size_t l;
+
+ p = journal_file_entry_item_object_offset(f, o, j->current_field);
+ r = journal_file_data_payload(f, NULL, p, NULL, 0, j->data_threshold, &d, &l);
+ if (IN_SET(r, -EADDRNOTAVAIL, -EBADMSG)) {
+ log_debug_errno(r, "Entry item %"PRIu64" data object is bad, skipping over it: %m", j->current_field);
+ continue;
+ }
+ if (r < 0)
+ return r;
+ assert(r > 0);
+
+ *data = d;
+ *size = l;
+
+ j->current_field++;
+
+ return 1;
+ }
+
+ return 0;
+}
+
+_public_ int sd_journal_enumerate_available_data(sd_journal *j, const void **data, size_t *size) {
+ for (;;) {
+ int r;
+
+ r = sd_journal_enumerate_data(j, data, size);
+ if (r >= 0)
+ return r;
+ if (!JOURNAL_ERRNO_IS_UNAVAILABLE_FIELD(r))
+ return r;
+ j->current_field++; /* Try with the next field */
+ }
+}
+
+_public_ void sd_journal_restart_data(sd_journal *j) {
+ if (!j || journal_origin_changed(j))
+ return;
+
+ j->current_field = 0;
+}
+
+static int reiterate_all_paths(sd_journal *j) {
+ assert(j);
+
+ if (j->no_new_files)
+ return add_current_paths(j);
+
+ if (j->flags & SD_JOURNAL_OS_ROOT)
+ return add_search_paths(j);
+
+ if (j->toplevel_fd >= 0)
+ return add_root_directory(j, NULL, false);
+
+ if (j->path)
+ return add_root_directory(j, j->path, true);
+
+ return add_search_paths(j);
+}
+
+_public_ int sd_journal_get_fd(sd_journal *j) {
+ int r;
+
+ assert_return(j, -EINVAL);
+ assert_return(!journal_origin_changed(j), -ECHILD);
+
+ if (j->no_inotify)
+ return -EMEDIUMTYPE;
+
+ if (j->inotify_fd >= 0)
+ return j->inotify_fd;
+
+ r = allocate_inotify(j);
+ if (r < 0)
+ return r;
+
+ log_debug("Reiterating files to get inotify watches established.");
+
+ /* Iterate through all dirs again, to add them to the inotify */
+ r = reiterate_all_paths(j);
+ if (r < 0)
+ return r;
+
+ return j->inotify_fd;
+}
+
+_public_ int sd_journal_get_events(sd_journal *j) {
+ int fd;
+
+ assert_return(j, -EINVAL);
+ assert_return(!journal_origin_changed(j), -ECHILD);
+
+ fd = sd_journal_get_fd(j);
+ if (fd < 0)
+ return fd;
+
+ return POLLIN;
+}
+
+_public_ int sd_journal_get_timeout(sd_journal *j, uint64_t *timeout_usec) {
+ int fd;
+
+ assert_return(j, -EINVAL);
+ assert_return(!journal_origin_changed(j), -ECHILD);
+ assert_return(timeout_usec, -EINVAL);
+
+ fd = sd_journal_get_fd(j);
+ if (fd < 0)
+ return fd;
+
+ if (!j->on_network) {
+ *timeout_usec = UINT64_MAX;
+ return 0;
+ }
+
+ /* If we are on the network we need to regularly check for
+ * changes manually */
+
+ *timeout_usec = j->last_process_usec + JOURNAL_FILES_RECHECK_USEC;
+ return 1;
+}
+
+static void process_q_overflow(sd_journal *j) {
+ JournalFile *f;
+ Directory *m;
+
+ assert(j);
+
+ /* When the inotify queue overruns we need to enumerate and re-validate all journal files to bring our list
+ * back in sync with what's on disk. For this we pick a new generation counter value. It'll be assigned to all
+ * journal files we encounter. All journal files and all directories that don't carry it after reenumeration
+ * are subject for unloading. */
+
+ log_debug("Inotify queue overrun, reiterating everything.");
+
+ j->generation++;
+ (void) reiterate_all_paths(j);
+
+ ORDERED_HASHMAP_FOREACH(f, j->files) {
+
+ if (f->last_seen_generation == j->generation)
+ continue;
+
+ log_debug("File '%s' hasn't been seen in this enumeration, removing.", f->path);
+ remove_file_real(j, f);
+ }
+
+ HASHMAP_FOREACH(m, j->directories_by_path) {
+
+ if (m->last_seen_generation == j->generation)
+ continue;
+
+ if (m->is_root) /* Never GC root directories */
+ continue;
+
+ log_debug("Directory '%s' hasn't been seen in this enumeration, removing.", f->path);
+ remove_directory(j, m);
+ }
+
+ log_debug("Reiteration complete.");
+}
+
+static void process_inotify_event(sd_journal *j, const struct inotify_event *e) {
+ Directory *d;
+
+ assert(j);
+ assert(e);
+
+ if (e->mask & IN_Q_OVERFLOW) {
+ process_q_overflow(j);
+ return;
+ }
+
+ /* Is this a subdirectory we watch? */
+ d = hashmap_get(j->directories_by_wd, INT_TO_PTR(e->wd));
+ if (d) {
+ if (!(e->mask & IN_ISDIR) && e->len > 0 &&
+ (endswith(e->name, ".journal") ||
+ endswith(e->name, ".journal~"))) {
+
+ /* Event for a journal file */
+
+ if (e->mask & (IN_CREATE|IN_MOVED_TO|IN_MODIFY|IN_ATTRIB))
+ (void) add_file_by_name(j, d->path, e->name);
+ else if (e->mask & (IN_DELETE|IN_MOVED_FROM|IN_UNMOUNT))
+ (void) remove_file_by_name(j, d->path, e->name);
+
+ } else if (!d->is_root && e->len == 0) {
+
+ /* Event for a subdirectory */
+
+ if (e->mask & (IN_DELETE_SELF|IN_MOVE_SELF|IN_UNMOUNT))
+ remove_directory(j, d);
+
+ } else if (d->is_root && (e->mask & IN_ISDIR) && e->len > 0 && id128_is_valid(e->name)) {
+
+ /* Event for root directory */
+
+ if (e->mask & (IN_CREATE|IN_MOVED_TO|IN_MODIFY|IN_ATTRIB))
+ (void) add_directory(j, d->path, e->name);
+ }
+
+ return;
+ }
+
+ if (e->mask & IN_IGNORED)
+ return;
+
+ log_debug("Unexpected inotify event.");
+}
+
+static int determine_change(sd_journal *j) {
+ bool b;
+
+ assert(j);
+
+ b = j->current_invalidate_counter != j->last_invalidate_counter;
+ j->last_invalidate_counter = j->current_invalidate_counter;
+
+ return b ? SD_JOURNAL_INVALIDATE : SD_JOURNAL_APPEND;
+}
+
+_public_ int sd_journal_process(sd_journal *j) {
+ bool got_something = false;
+
+ assert_return(j, -EINVAL);
+ assert_return(!journal_origin_changed(j), -ECHILD);
+
+ if (j->inotify_fd < 0) /* We have no inotify fd yet? Then there's noting to process. */
+ return 0;
+
+ j->last_process_usec = now(CLOCK_MONOTONIC);
+ j->last_invalidate_counter = j->current_invalidate_counter;
+
+ for (;;) {
+ union inotify_event_buffer buffer;
+ ssize_t l;
+
+ l = read(j->inotify_fd, &buffer, sizeof(buffer));
+ if (l < 0) {
+ if (ERRNO_IS_TRANSIENT(errno))
+ return got_something ? determine_change(j) : SD_JOURNAL_NOP;
+
+ return -errno;
+ }
+
+ got_something = true;
+
+ FOREACH_INOTIFY_EVENT(e, buffer, l)
+ process_inotify_event(j, e);
+ }
+}
+
+_public_ int sd_journal_wait(sd_journal *j, uint64_t timeout_usec) {
+ int r;
+ uint64_t t;
+
+ assert_return(j, -EINVAL);
+ assert_return(!journal_origin_changed(j), -ECHILD);
+
+ if (j->inotify_fd < 0) {
+ JournalFile *f;
+
+ /* This is the first invocation, hence create the inotify watch */
+ r = sd_journal_get_fd(j);
+ if (r < 0)
+ return r;
+
+ /* Server might have done some vacuuming while we weren't watching. Get rid of the deleted
+ * files now so they don't stay around indefinitely. */
+ ORDERED_HASHMAP_FOREACH(f, j->files) {
+ r = journal_file_fstat(f);
+ if (r == -EIDRM)
+ remove_file_real(j, f);
+ else if (r < 0)
+ log_debug_errno(r, "Failed to fstat() journal file '%s', ignoring: %m", f->path);
+ }
+
+ /* The journal might have changed since the context object was created and we weren't
+ * watching before, hence don't wait for anything, and return immediately. */
+ return determine_change(j);
+ }
+
+ r = sd_journal_get_timeout(j, &t);
+ if (r < 0)
+ return r;
+
+ if (t != UINT64_MAX) {
+ t = usec_sub_unsigned(t, now(CLOCK_MONOTONIC));
+
+ if (timeout_usec == UINT64_MAX || timeout_usec > t)
+ timeout_usec = t;
+ }
+
+ do {
+ r = fd_wait_for_event(j->inotify_fd, POLLIN, timeout_usec);
+ } while (r == -EINTR);
+
+ if (r < 0)
+ return r;
+
+ return sd_journal_process(j);
+}
+
+_public_ int sd_journal_get_cutoff_realtime_usec(sd_journal *j, uint64_t *from, uint64_t *to) {
+ JournalFile *f;
+ bool first = true;
+ uint64_t fmin = 0, tmax = 0;
+ int r;
+
+ assert_return(j, -EINVAL);
+ assert_return(!journal_origin_changed(j), -ECHILD);
+ assert_return(from || to, -EINVAL);
+ assert_return(from != to, -EINVAL);
+
+ ORDERED_HASHMAP_FOREACH(f, j->files) {
+ usec_t fr, t;
+
+ r = journal_file_get_cutoff_realtime_usec(f, &fr, &t);
+ if (r == -ENOENT)
+ continue;
+ if (r < 0)
+ return r;
+ if (r == 0)
+ continue;
+
+ if (first) {
+ fmin = fr;
+ tmax = t;
+ first = false;
+ } else {
+ fmin = MIN(fr, fmin);
+ tmax = MAX(t, tmax);
+ }
+ }
+
+ if (from)
+ *from = fmin;
+ if (to)
+ *to = tmax;
+
+ return first ? 0 : 1;
+}
+
+_public_ int sd_journal_get_cutoff_monotonic_usec(
+ sd_journal *j,
+ sd_id128_t boot_id,
+ uint64_t *ret_from,
+ uint64_t *ret_to) {
+
+ uint64_t from = UINT64_MAX, to = UINT64_MAX;
+ bool found = false;
+ JournalFile *f;
+ int r;
+
+ assert_return(j, -EINVAL);
+ assert_return(!journal_origin_changed(j), -ECHILD);
+ assert_return(ret_from != ret_to, -EINVAL);
+
+ ORDERED_HASHMAP_FOREACH(f, j->files) {
+ usec_t ff, tt;
+
+ r = journal_file_get_cutoff_monotonic_usec(f, boot_id, &ff, &tt);
+ if (r == -ENOENT)
+ continue;
+ if (r < 0)
+ return r;
+ if (r == 0)
+ continue;
+
+ if (found) {
+ from = MIN(ff, from);
+ to = MAX(tt, to);
+ } else {
+ from = ff;
+ to = tt;
+ found = true;
+ }
+ }
+
+ if (ret_from)
+ *ret_from = from;
+ if (ret_to)
+ *ret_to = to;
+
+ return found;
+}
+
+void journal_print_header(sd_journal *j) {
+ JournalFile *f;
+ bool newline = false;
+
+ assert(j);
+
+ ORDERED_HASHMAP_FOREACH(f, j->files) {
+ if (newline)
+ putchar('\n');
+ else
+ newline = true;
+
+ journal_file_print_header(f);
+ }
+}
+
+_public_ int sd_journal_get_usage(sd_journal *j, uint64_t *ret) {
+ JournalFile *f;
+ uint64_t sum = 0;
+
+ assert_return(j, -EINVAL);
+ assert_return(!journal_origin_changed(j), -ECHILD);
+ assert_return(ret, -EINVAL);
+
+ ORDERED_HASHMAP_FOREACH(f, j->files) {
+ struct stat st;
+ uint64_t b;
+
+ if (fstat(f->fd, &st) < 0)
+ return -errno;
+
+ b = (uint64_t) st.st_blocks;
+ if (b > UINT64_MAX / 512)
+ return -EOVERFLOW;
+ b *= 512;
+
+ if (sum > UINT64_MAX - b)
+ return -EOVERFLOW;
+ sum += b;
+ }
+
+ *ret = sum;
+ return 0;
+}
+
+_public_ int sd_journal_query_unique(sd_journal *j, const char *field) {
+ int r;
+
+ assert_return(j, -EINVAL);
+ assert_return(!journal_origin_changed(j), -ECHILD);
+
+ if (!field_is_valid(field))
+ return -EINVAL;
+
+ r = free_and_strdup(&j->unique_field, field);
+ if (r < 0)
+ return r;
+
+ j->unique_file = NULL;
+ j->unique_offset = 0;
+ j->unique_file_lost = false;
+
+ return 0;
+}
+
+_public_ int sd_journal_enumerate_unique(
+ sd_journal *j,
+ const void **ret_data,
+ size_t *ret_size) {
+
+ size_t k;
+
+ assert_return(j, -EINVAL);
+ assert_return(!journal_origin_changed(j), -ECHILD);
+ assert_return(j->unique_field, -EINVAL);
+
+ k = strlen(j->unique_field);
+
+ if (!j->unique_file) {
+ if (j->unique_file_lost)
+ return 0;
+
+ j->unique_file = ordered_hashmap_first(j->files);
+ if (!j->unique_file)
+ return 0;
+
+ j->unique_offset = 0;
+ }
+
+ for (;;) {
+ JournalFile *of;
+ Object *o;
+ void *odata;
+ size_t ol;
+ bool found;
+ int r;
+
+ /* Proceed to next data object in the field's linked list */
+ if (j->unique_offset == 0) {
+ r = journal_file_find_field_object(j->unique_file, j->unique_field, k, &o, NULL);
+ if (r < 0)
+ return r;
+
+ j->unique_offset = r > 0 ? le64toh(o->field.head_data_offset) : 0;
+ } else {
+ r = journal_file_move_to_object(j->unique_file, OBJECT_DATA, j->unique_offset, &o);
+ if (r < 0)
+ return r;
+
+ j->unique_offset = le64toh(o->data.next_field_offset);
+ }
+
+ /* We reached the end of the list? Then start again, with the next file */
+ if (j->unique_offset == 0) {
+ j->unique_file = ordered_hashmap_next(j->files, j->unique_file->path);
+ if (!j->unique_file)
+ return 0;
+
+ continue;
+ }
+
+ r = journal_file_move_to_object(j->unique_file, OBJECT_DATA, j->unique_offset, &o);
+ if (r < 0)
+ return r;
+
+ /* Let's pin the data object, so we can look at it at the same time as one on another file. */
+ r = journal_file_pin_object(j->unique_file, o);
+ if (r < 0)
+ return r;
+
+ r = journal_file_data_payload(j->unique_file, o, j->unique_offset, NULL, 0,
+ j->data_threshold, &odata, &ol);
+ if (r < 0)
+ return r;
+
+ /* Check if we have at least the field name and "=". */
+ if (ol <= k)
+ return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
+ "%s:offset " OFSfmt ": object has size %zu, expected at least %zu",
+ j->unique_file->path,
+ j->unique_offset, ol, k + 1);
+
+ if (memcmp(odata, j->unique_field, k) != 0 || ((const char*) odata)[k] != '=')
+ return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
+ "%s:offset " OFSfmt ": object does not start with \"%s=\"",
+ j->unique_file->path,
+ j->unique_offset,
+ j->unique_field);
+
+ /* OK, now let's see if we already returned this data object by checking if it exists in the
+ * earlier traversed files. */
+ found = false;
+ ORDERED_HASHMAP_FOREACH(of, j->files) {
+ if (of == j->unique_file)
+ break;
+
+ /* Skip this file it didn't have any fields indexed */
+ if (JOURNAL_HEADER_CONTAINS(of->header, n_fields) && le64toh(of->header->n_fields) <= 0)
+ continue;
+
+ /* We can reuse the hash from our current file only on old-style journal files
+ * without keyed hashes. On new-style files we have to calculate the hash anew, to
+ * take the per-file hash seed into consideration. */
+ if (!JOURNAL_HEADER_KEYED_HASH(j->unique_file->header) && !JOURNAL_HEADER_KEYED_HASH(of->header))
+ r = journal_file_find_data_object_with_hash(of, odata, ol, le64toh(o->data.hash), NULL, NULL);
+ else
+ r = journal_file_find_data_object(of, odata, ol, NULL, NULL);
+ if (r < 0)
+ return r;
+ if (r > 0) {
+ found = true;
+ break;
+ }
+ }
+
+ if (found)
+ continue;
+
+ *ret_data = odata;
+ *ret_size = ol;
+
+ return 1;
+ }
+}
+
+_public_ int sd_journal_enumerate_available_unique(sd_journal *j, const void **data, size_t *size) {
+ for (;;) {
+ int r;
+
+ r = sd_journal_enumerate_unique(j, data, size);
+ if (r >= 0)
+ return r;
+ if (!JOURNAL_ERRNO_IS_UNAVAILABLE_FIELD(r))
+ return r;
+ /* Try with the next field. sd_journal_enumerate_unique() modifies state, so on the next try
+ * we will access the next field. */
+ }
+}
+
+_public_ void sd_journal_restart_unique(sd_journal *j) {
+ if (!j || journal_origin_changed(j))
+ return;
+
+ j->unique_file = NULL;
+ j->unique_offset = 0;
+ j->unique_file_lost = false;
+}
+
+_public_ int sd_journal_enumerate_fields(sd_journal *j, const char **field) {
+ int r;
+
+ assert_return(j, -EINVAL);
+ assert_return(!journal_origin_changed(j), -ECHILD);
+ assert_return(field, -EINVAL);
+
+ if (!j->fields_file) {
+ if (j->fields_file_lost)
+ return 0;
+
+ j->fields_file = ordered_hashmap_first(j->files);
+ if (!j->fields_file)
+ return 0;
+
+ j->fields_hash_table_index = 0;
+ j->fields_offset = 0;
+ }
+
+ for (;;) {
+ JournalFile *f, *of;
+ uint64_t m;
+ Object *o;
+ size_t sz;
+ bool found;
+
+ f = j->fields_file;
+
+ if (j->fields_offset == 0) {
+ bool eof = false;
+
+ /* We are not yet positioned at any field. Let's pick the first one */
+ r = journal_file_map_field_hash_table(f);
+ if (r < 0)
+ return r;
+
+ m = le64toh(f->header->field_hash_table_size) / sizeof(HashItem);
+ for (;;) {
+ if (j->fields_hash_table_index >= m) {
+ /* Reached the end of the hash table, go to the next file. */
+ eof = true;
+ break;
+ }
+
+ j->fields_offset = le64toh(f->field_hash_table[j->fields_hash_table_index].head_hash_offset);
+
+ if (j->fields_offset != 0)
+ break;
+
+ /* Empty hash table bucket, go to next one */
+ j->fields_hash_table_index++;
+ }
+
+ if (eof) {
+ /* Proceed with next file */
+ j->fields_file = ordered_hashmap_next(j->files, f->path);
+ if (!j->fields_file) {
+ *field = NULL;
+ return 0;
+ }
+
+ j->fields_offset = 0;
+ j->fields_hash_table_index = 0;
+ continue;
+ }
+
+ } else {
+ /* We are already positioned at a field. If so, let's figure out the next field from it */
+
+ r = journal_file_move_to_object(f, OBJECT_FIELD, j->fields_offset, &o);
+ if (r < 0)
+ return r;
+
+ j->fields_offset = le64toh(o->field.next_hash_offset);
+ if (j->fields_offset == 0) {
+ /* Reached the end of the hash table chain */
+ j->fields_hash_table_index++;
+ continue;
+ }
+ }
+
+ /* We use OBJECT_UNUSED here, so that the iterator below doesn't remove our mmap window */
+ r = journal_file_move_to_object(f, OBJECT_UNUSED, j->fields_offset, &o);
+ if (r < 0)
+ return r;
+
+ /* Because we used OBJECT_UNUSED above, we need to do our type check manually */
+ if (o->object.type != OBJECT_FIELD)
+ return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
+ "%s:offset " OFSfmt ": object has type %i, expected %i",
+ f->path, j->fields_offset,
+ o->object.type, OBJECT_FIELD);
+
+ sz = le64toh(o->object.size) - offsetof(Object, field.payload);
+
+ /* Let's see if we already returned this field name before. */
+ found = false;
+ ORDERED_HASHMAP_FOREACH(of, j->files) {
+ if (of == f)
+ break;
+
+ /* Skip this file it didn't have any fields indexed */
+ if (JOURNAL_HEADER_CONTAINS(of->header, n_fields) && le64toh(of->header->n_fields) <= 0)
+ continue;
+
+ if (!JOURNAL_HEADER_KEYED_HASH(f->header) && !JOURNAL_HEADER_KEYED_HASH(of->header))
+ r = journal_file_find_field_object_with_hash(of, o->field.payload, sz,
+ le64toh(o->field.hash), NULL, NULL);
+ else
+ r = journal_file_find_field_object(of, o->field.payload, sz, NULL, NULL);
+ if (r < 0)
+ return r;
+ if (r > 0) {
+ found = true;
+ break;
+ }
+ }
+
+ if (found)
+ continue;
+
+ /* Check if this is really a valid string containing no NUL byte */
+ if (memchr(o->field.payload, 0, sz))
+ return -EBADMSG;
+
+ if (j->data_threshold > 0 && sz > j->data_threshold)
+ sz = j->data_threshold;
+
+ if (!GREEDY_REALLOC(j->fields_buffer, sz + 1))
+ return -ENOMEM;
+
+ memcpy(j->fields_buffer, o->field.payload, sz);
+ j->fields_buffer[sz] = 0;
+
+ if (!field_is_valid(j->fields_buffer))
+ return -EBADMSG;
+
+ *field = j->fields_buffer;
+ return 1;
+ }
+}
+
+_public_ void sd_journal_restart_fields(sd_journal *j) {
+ if (!j || journal_origin_changed(j))
+ return;
+
+ j->fields_file = NULL;
+ j->fields_hash_table_index = 0;
+ j->fields_offset = 0;
+ j->fields_file_lost = false;
+}
+
+_public_ int sd_journal_reliable_fd(sd_journal *j) {
+ assert_return(j, -EINVAL);
+ assert_return(!journal_origin_changed(j), -ECHILD);
+
+ return !j->on_network;
+}
+
+static char *lookup_field(const char *field, void *userdata) {
+ sd_journal *j = ASSERT_PTR(userdata);
+ const void *data;
+ size_t size, d;
+ int r;
+
+ assert(field);
+
+ r = sd_journal_get_data(j, field, &data, &size);
+ if (r < 0 ||
+ size > REPLACE_VAR_MAX)
+ return strdup(field);
+
+ d = strlen(field) + 1;
+
+ return strndup((const char*) data + d, size - d);
+}
+
+_public_ int sd_journal_get_catalog(sd_journal *j, char **ret) {
+ const void *data;
+ size_t size;
+ sd_id128_t id;
+ _cleanup_free_ char *text = NULL, *cid = NULL;
+ char *t;
+ int r;
+
+ assert_return(j, -EINVAL);
+ assert_return(!journal_origin_changed(j), -ECHILD);
+ assert_return(ret, -EINVAL);
+
+ r = sd_journal_get_data(j, "MESSAGE_ID", &data, &size);
+ if (r < 0)
+ return r;
+
+ cid = strndup((const char*) data + 11, size - 11);
+ if (!cid)
+ return -ENOMEM;
+
+ r = sd_id128_from_string(cid, &id);
+ if (r < 0)
+ return r;
+
+ r = catalog_get(secure_getenv("SYSTEMD_CATALOG") ?: CATALOG_DATABASE, id, &text);
+ if (r < 0)
+ return r;
+
+ t = replace_var(text, lookup_field, j);
+ if (!t)
+ return -ENOMEM;
+
+ *ret = t;
+ return 0;
+}
+
+_public_ int sd_journal_get_catalog_for_message_id(sd_id128_t id, char **ret) {
+ assert_return(ret, -EINVAL);
+
+ return catalog_get(CATALOG_DATABASE, id, ret);
+}
+
+_public_ int sd_journal_set_data_threshold(sd_journal *j, size_t sz) {
+ assert_return(j, -EINVAL);
+ assert_return(!journal_origin_changed(j), -ECHILD);
+
+ j->data_threshold = sz;
+ return 0;
+}
+
+_public_ int sd_journal_get_data_threshold(sd_journal *j, size_t *sz) {
+ assert_return(j, -EINVAL);
+ assert_return(!journal_origin_changed(j), -ECHILD);
+ assert_return(sz, -EINVAL);
+
+ *sz = j->data_threshold;
+ return 0;
+}
+
+_public_ int sd_journal_has_runtime_files(sd_journal *j) {
+ assert_return(j, -EINVAL);
+
+ return j->has_runtime_files;
+}
+
+_public_ int sd_journal_has_persistent_files(sd_journal *j) {
+ assert_return(j, -EINVAL);
+
+ return j->has_persistent_files;
+}
diff --git a/src/libsystemd/sd-journal/test-audit-type.c b/src/libsystemd/sd-journal/test-audit-type.c
new file mode 100644
index 0000000..1d5003b
--- /dev/null
+++ b/src/libsystemd/sd-journal/test-audit-type.c
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+
+#include <stdio.h>
+#include <linux/audit.h>
+
+#include "audit-type.h"
+#include "tests.h"
+
+static void print_audit_label(int i) {
+ const char *name;
+
+ name = audit_type_name_alloca(i);
+ /* This is a separate function only because of alloca */
+ printf("%i → %s → %s\n", i, audit_type_to_string(i), name);
+}
+
+TEST(audit_type) {
+ int i;
+
+ for (i = 0; i <= AUDIT_KERNEL; i++)
+ print_audit_label(i);
+}
+
+DEFINE_TEST_MAIN(LOG_INFO);
diff --git a/src/libsystemd/sd-journal/test-catalog.c b/src/libsystemd/sd-journal/test-catalog.c
new file mode 100644
index 0000000..603952e
--- /dev/null
+++ b/src/libsystemd/sd-journal/test-catalog.c
@@ -0,0 +1,235 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+
+#include <errno.h>
+#include <fcntl.h>
+#include <locale.h>
+#include <unistd.h>
+
+#include "sd-messages.h"
+
+#include "alloc-util.h"
+#include "catalog.h"
+#include "fd-util.h"
+#include "fs-util.h"
+#include "log.h"
+#include "macro.h"
+#include "path-util.h"
+#include "string-util.h"
+#include "strv.h"
+#include "tests.h"
+#include "tmpfile-util.h"
+
+static char** catalog_dirs = NULL;
+static const char *no_catalog_dirs[] = {
+ "/bin/hopefully/with/no/catalog",
+ NULL
+};
+
+static OrderedHashmap* test_import(const char* contents, ssize_t size, int code) {
+ _cleanup_(unlink_tempfilep) char name[] = "/tmp/test-catalog.XXXXXX";
+ _cleanup_close_ int fd = -EBADF;
+ OrderedHashmap *h;
+
+ if (size < 0)
+ size = strlen(contents);
+
+ assert_se(h = ordered_hashmap_new(&catalog_hash_ops));
+
+ fd = mkostemp_safe(name);
+ assert_se(fd >= 0);
+ assert_se(write(fd, contents, size) == size);
+
+ assert_se(catalog_import_file(h, name) == code);
+
+ return h;
+}
+
+static void test_catalog_import_invalid(void) {
+ _cleanup_ordered_hashmap_free_free_free_ OrderedHashmap *h = NULL;
+
+ h = test_import("xxx", -1, -EINVAL);
+ assert_se(ordered_hashmap_isempty(h));
+}
+
+static void test_catalog_import_badid(void) {
+ _unused_ _cleanup_ordered_hashmap_free_free_free_ OrderedHashmap *h = NULL;
+ const char *input =
+"-- 0027229ca0644181a76c4e92458afaff dededededededededededededededede\n" \
+"Subject: message\n" \
+"\n" \
+"payload\n";
+ h = test_import(input, -1, -EINVAL);
+}
+
+static void test_catalog_import_one(void) {
+ _cleanup_ordered_hashmap_free_free_free_ OrderedHashmap *h = NULL;
+ char *payload;
+
+ const char *input =
+"-- 0027229ca0644181a76c4e92458afaff dededededededededededededededed\n" \
+"Subject: message\n" \
+"\n" \
+"payload\n";
+ const char *expect =
+"Subject: message\n" \
+"\n" \
+"payload\n";
+
+ h = test_import(input, -1, 0);
+ assert_se(ordered_hashmap_size(h) == 1);
+
+ ORDERED_HASHMAP_FOREACH(payload, h) {
+ printf("expect: %s\n", expect);
+ printf("actual: %s\n", payload);
+ assert_se(streq(expect, payload));
+ }
+}
+
+static void test_catalog_import_merge(void) {
+ _cleanup_ordered_hashmap_free_free_free_ OrderedHashmap *h = NULL;
+ char *payload;
+
+ const char *input =
+"-- 0027229ca0644181a76c4e92458afaff dededededededededededededededed\n" \
+"Subject: message\n" \
+"Defined-By: me\n" \
+"\n" \
+"payload\n" \
+"\n" \
+"-- 0027229ca0644181a76c4e92458afaff dededededededededededededededed\n" \
+"Subject: override subject\n" \
+"X-Header: hello\n" \
+"\n" \
+"override payload\n";
+
+ const char *combined =
+"Subject: override subject\n" \
+"X-Header: hello\n" \
+"Subject: message\n" \
+"Defined-By: me\n" \
+"\n" \
+"override payload\n";
+
+ h = test_import(input, -1, 0);
+ assert_se(ordered_hashmap_size(h) == 1);
+
+ ORDERED_HASHMAP_FOREACH(payload, h)
+ assert_se(streq(combined, payload));
+}
+
+static void test_catalog_import_merge_no_body(void) {
+ _cleanup_ordered_hashmap_free_free_free_ OrderedHashmap *h = NULL;
+ char *payload;
+
+ const char *input =
+"-- 0027229ca0644181a76c4e92458afaff dededededededededededededededed\n" \
+"Subject: message\n" \
+"Defined-By: me\n" \
+"\n" \
+"payload\n" \
+"\n" \
+"-- 0027229ca0644181a76c4e92458afaff dededededededededededededededed\n" \
+"Subject: override subject\n" \
+"X-Header: hello\n" \
+"\n";
+
+ const char *combined =
+"Subject: override subject\n" \
+"X-Header: hello\n" \
+"Subject: message\n" \
+"Defined-By: me\n" \
+"\n" \
+"payload\n";
+
+ h = test_import(input, -1, 0);
+ assert_se(ordered_hashmap_size(h) == 1);
+
+ ORDERED_HASHMAP_FOREACH(payload, h)
+ assert_se(streq(combined, payload));
+}
+
+static void test_catalog_update(const char *database) {
+ int r;
+
+ /* Test what happens if there are no files. */
+ r = catalog_update(database, NULL, NULL);
+ assert_se(r == 0);
+
+ /* Test what happens if there are no files in the directory. */
+ r = catalog_update(database, NULL, no_catalog_dirs);
+ assert_se(r == 0);
+
+ /* Make sure that we at least have some files loaded or the
+ * catalog_list below will fail. */
+ r = catalog_update(database, NULL, (const char * const *) catalog_dirs);
+ assert_se(r == 0);
+}
+
+static void test_catalog_file_lang(void) {
+ _cleanup_free_ char *lang = NULL, *lang2 = NULL, *lang3 = NULL, *lang4 = NULL;
+
+ assert_se(catalog_file_lang("systemd.de_DE.catalog", &lang) == 1);
+ assert_se(streq(lang, "de_DE"));
+
+ assert_se(catalog_file_lang("systemd..catalog", &lang2) == 0);
+ assert_se(lang2 == NULL);
+
+ assert_se(catalog_file_lang("systemd.fr.catalog", &lang2) == 1);
+ assert_se(streq(lang2, "fr"));
+
+ assert_se(catalog_file_lang("systemd.fr.catalog.gz", &lang3) == 0);
+ assert_se(lang3 == NULL);
+
+ assert_se(catalog_file_lang("systemd.01234567890123456789012345678901.catalog", &lang3) == 0);
+ assert_se(lang3 == NULL);
+
+ assert_se(catalog_file_lang("systemd.0123456789012345678901234567890.catalog", &lang3) == 1);
+ assert_se(streq(lang3, "0123456789012345678901234567890"));
+
+ assert_se(catalog_file_lang("/x/y/systemd.catalog", &lang4) == 0);
+ assert_se(lang4 == NULL);
+
+ assert_se(catalog_file_lang("/x/y/systemd.ru_RU.catalog", &lang4) == 1);
+ assert_se(streq(lang4, "ru_RU"));
+}
+
+int main(int argc, char *argv[]) {
+ _cleanup_(unlink_tempfilep) char database[] = "/tmp/test-catalog.XXXXXX";
+ _cleanup_close_ int fd = -EBADF;
+ _cleanup_free_ char *text = NULL;
+ int r;
+
+ setlocale(LC_ALL, "de_DE.UTF-8");
+
+ test_setup_logging(LOG_DEBUG);
+
+ /* If test-catalog is located at the build directory, then use catalogs in that.
+ * If it is not, e.g. installed by systemd-tests package, then use installed catalogs. */
+ catalog_dirs = STRV_MAKE(get_catalog_dir());
+
+ assert_se(access(catalog_dirs[0], F_OK) >= 0);
+ log_notice("Using catalog directory '%s'", catalog_dirs[0]);
+
+ test_catalog_file_lang();
+
+ test_catalog_import_invalid();
+ test_catalog_import_badid();
+ test_catalog_import_one();
+ test_catalog_import_merge();
+ test_catalog_import_merge_no_body();
+
+ assert_se((fd = mkostemp_safe(database)) >= 0);
+
+ test_catalog_update(database);
+
+ r = catalog_list(stdout, database, true);
+ assert_se(r >= 0);
+
+ r = catalog_list(stdout, database, false);
+ assert_se(r >= 0);
+
+ assert_se(catalog_get(database, SD_MESSAGE_COREDUMP, &text) >= 0);
+ printf(">>>%s<<<\n", text);
+
+ return 0;
+}
diff --git a/src/libsystemd/sd-journal/test-journal-append.c b/src/libsystemd/sd-journal/test-journal-append.c
new file mode 100644
index 0000000..24b98c8
--- /dev/null
+++ b/src/libsystemd/sd-journal/test-journal-append.c
@@ -0,0 +1,269 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+
+#include <fcntl.h>
+#include <getopt.h>
+#include <stdio.h>
+#include <unistd.h>
+
+#include "chattr-util.h"
+#include "fd-util.h"
+#include "fs-util.h"
+#include "iovec-util.h"
+#include "journal-file-util.h"
+#include "log.h"
+#include "mmap-cache.h"
+#include "parse-util.h"
+#include "random-util.h"
+#include "rm-rf.h"
+#include "strv.h"
+#include "terminal-util.h"
+#include "tests.h"
+#include "tmpfile-util.h"
+
+static int journal_append_message(JournalFile *mj, const char *message) {
+ struct iovec iovec;
+ struct dual_timestamp ts;
+
+ assert(mj);
+ assert(message);
+
+ dual_timestamp_now(&ts);
+ iovec = IOVEC_MAKE_STRING(message);
+ return journal_file_append_entry(
+ mj,
+ &ts,
+ /* boot_id= */ NULL,
+ &iovec,
+ /* n_iovec= */ 1,
+ /* seqnum= */ NULL,
+ /* seqnum_id= */ NULL,
+ /* ret_object= */ NULL,
+ /* ret_offset= */ NULL);
+}
+
+static int journal_corrupt_and_append(uint64_t start_offset, uint64_t step) {
+ _cleanup_(mmap_cache_unrefp) MMapCache *mmap_cache = NULL;
+ _cleanup_(rm_rf_physical_and_freep) char *tempdir = NULL;
+ _cleanup_(journal_file_offline_closep) JournalFile *mj = NULL;
+ uint64_t start, end;
+ int r;
+
+ mmap_cache = mmap_cache_new();
+ assert_se(mmap_cache);
+
+ /* journal_file_open() requires a valid machine id */
+ if (sd_id128_get_machine(NULL) < 0)
+ return log_tests_skipped("No valid machine ID found");
+
+ assert_se(mkdtemp_malloc("/tmp/journal-append-XXXXXX", &tempdir) >= 0);
+ assert_se(chdir(tempdir) >= 0);
+ (void) chattr_path(tempdir, FS_NOCOW_FL, FS_NOCOW_FL, NULL);
+
+ log_debug("Opening journal %s/system.journal", tempdir);
+
+ r = journal_file_open(
+ /* fd= */ -1,
+ "system.journal",
+ O_RDWR|O_CREAT,
+ JOURNAL_COMPRESS,
+ 0644,
+ /* compress_threshold_bytes= */ UINT64_MAX,
+ /* metrics= */ NULL,
+ mmap_cache,
+ /* template= */ NULL,
+ &mj);
+ if (r < 0)
+ return log_error_errno(r, "Failed to open the journal: %m");
+
+ assert_se(mj);
+
+ /* Add a couple of initial messages */
+ for (int i = 0; i < 10; i++) {
+ _cleanup_free_ char *message = NULL;
+
+ assert_se(asprintf(&message, "MESSAGE=Initial message %d", i) >= 0);
+ r = journal_append_message(mj, message);
+ if (r < 0)
+ return log_error_errno(r, "Failed to write to the journal: %m");
+ }
+
+ start = start_offset == UINT64_MAX ? random_u64() % mj->last_stat.st_size : start_offset;
+ end = (uint64_t) mj->last_stat.st_size;
+
+ /* Print the initial offset at which we start flipping bits, which can be
+ * later used to reproduce a potential fail */
+ log_info("Start offset: %" PRIu64 ", corrupt-step: %" PRIu64, start, step);
+ fflush(stdout);
+
+ if (start >= end)
+ return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
+ "Start offset >= journal size, can't continue");
+
+ for (uint64_t offset = start; offset < end; offset += step) {
+ _cleanup_free_ char *message = NULL;
+ uint8_t b;
+
+ /* Flip a bit in the journal file */
+ r = pread(mj->fd, &b, 1, offset);
+ assert_se(r == 1);
+ b |= 0x1;
+ r = pwrite(mj->fd, &b, 1, offset);
+ assert_se(r == 1);
+
+ /* Close and reopen the journal to flush all caches and remap
+ * the corrupted journal */
+ mj = journal_file_offline_close(mj);
+ r = journal_file_open(
+ /* fd= */ -1,
+ "system.journal",
+ O_RDWR|O_CREAT,
+ JOURNAL_COMPRESS,
+ 0644,
+ /* compress_threshold_bytes= */ UINT64_MAX,
+ /* metrics= */ NULL,
+ mmap_cache,
+ /* template= */ NULL,
+ &mj);
+ if (r < 0) {
+ /* The corrupted journal might get rejected during reopening
+ * if it's corrupted enough (especially its header), so
+ * treat this as a success if it doesn't crash */
+ log_info_errno(r, "Failed to reopen the journal: %m");
+ break;
+ }
+
+ /* Try to write something to the (possibly corrupted) journal */
+ assert_se(asprintf(&message, "MESSAGE=Hello world %" PRIu64, offset) >= 0);
+ r = journal_append_message(mj, message);
+ if (r < 0) {
+ /* We care only about crashes or sanitizer errors,
+ * failed write without any crash is a success */
+ log_info_errno(r, "Failed to write to the journal: %m");
+ break;
+ }
+ }
+
+ return 0;
+}
+
+int main(int argc, char *argv[]) {
+ uint64_t start_offset = UINT64_MAX;
+ uint64_t iterations = 100;
+ uint64_t iteration_step = 1;
+ uint64_t corrupt_step = 31;
+ bool sequential = false, run_one = false;
+ int c, r;
+
+ test_setup_logging(LOG_DEBUG);
+
+ enum {
+ ARG_START_OFFSET = 0x1000,
+ ARG_ITERATIONS,
+ ARG_ITERATION_STEP,
+ ARG_CORRUPT_STEP,
+ ARG_SEQUENTIAL,
+ ARG_RUN_ONE,
+ };
+
+ static const struct option options[] = {
+ { "help", no_argument, NULL, 'h' },
+ { "start-offset", required_argument, NULL, ARG_START_OFFSET },
+ { "iterations", required_argument, NULL, ARG_ITERATIONS },
+ { "iteration-step", required_argument, NULL, ARG_ITERATION_STEP },
+ { "corrupt-step", required_argument, NULL, ARG_CORRUPT_STEP },
+ { "sequential", no_argument, NULL, ARG_SEQUENTIAL },
+ { "run-one", required_argument, NULL, ARG_RUN_ONE },
+ {}
+ };
+
+ assert_se(argc >= 0);
+ assert_se(argv);
+
+ while ((c = getopt_long(argc, argv, "h", options, NULL)) >= 0)
+ switch (c) {
+
+ case 'h':
+ printf("Syntax:\n"
+ " %s [OPTION...]\n"
+ "Options:\n"
+ " --start-offset=OFFSET Offset at which to start corrupting the journal\n"
+ " (default: random offset is picked, unless\n"
+ " --sequential is used - in that case we use 0 + iteration)\n"
+ " --iterations=ITER Number of iterations to perform before exiting\n"
+ " (default: 100)\n"
+ " --iteration-step=STEP Iteration step (default: 1)\n"
+ " --corrupt-step=STEP Corrupt every n-th byte starting from OFFSET (default: 31)\n"
+ " --sequential Go through offsets sequentially instead of picking\n"
+ " a random one on each iteration. If set, we go through\n"
+ " offsets <0; ITER), or <OFFSET, ITER) if --start-offset=\n"
+ " is set (default: false)\n"
+ " --run-one=OFFSET Single shot mode for reproducing issues. Takes the same\n"
+ " offset as --start-offset= and does only one iteration\n"
+ , program_invocation_short_name);
+ return 0;
+
+ case ARG_START_OFFSET:
+ r = safe_atou64(optarg, &start_offset);
+ if (r < 0)
+ return log_error_errno(r, "Invalid starting offset: %m");
+ break;
+
+ case ARG_ITERATIONS:
+ r = safe_atou64(optarg, &iterations);
+ if (r < 0)
+ return log_error_errno(r, "Invalid value for iterations: %m");
+ break;
+
+ case ARG_CORRUPT_STEP:
+ r = safe_atou64(optarg, &corrupt_step);
+ if (r < 0)
+ return log_error_errno(r, "Invalid value for corrupt-step: %m");
+ break;
+
+ case ARG_ITERATION_STEP:
+ r = safe_atou64(optarg, &iteration_step);
+ if (r < 0)
+ return log_error_errno(r, "Invalid value for iteration-step: %m");
+ break;
+
+ case ARG_SEQUENTIAL:
+ sequential = true;
+ break;
+
+ case ARG_RUN_ONE:
+ r = safe_atou64(optarg, &start_offset);
+ if (r < 0)
+ return log_error_errno(r, "Invalid offset: %m");
+
+ run_one = true;
+ break;
+
+ case '?':
+ return -EINVAL;
+
+ default:
+ assert_not_reached();
+ }
+
+ if (run_one)
+ /* Reproducer mode */
+ return journal_corrupt_and_append(start_offset, corrupt_step);
+
+ for (uint64_t i = 0; i < iterations; i++) {
+ uint64_t offset = UINT64_MAX;
+
+ log_info("Iteration #%" PRIu64 ", step: %" PRIu64, i, iteration_step);
+
+ if (sequential)
+ offset = (start_offset == UINT64_MAX ? 0 : start_offset) + i * iteration_step;
+
+ r = journal_corrupt_and_append(offset, corrupt_step);
+ if (r < 0)
+ return EXIT_FAILURE;
+ if (r > 0)
+ /* Reached the end of the journal file */
+ break;
+ }
+
+ return EXIT_SUCCESS;
+}
diff --git a/src/libsystemd/sd-journal/test-journal-enum.c b/src/libsystemd/sd-journal/test-journal-enum.c
new file mode 100644
index 0000000..03fe8e2
--- /dev/null
+++ b/src/libsystemd/sd-journal/test-journal-enum.c
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+
+#include <stdio.h>
+
+#include "sd-journal.h"
+
+#include "journal-internal.h"
+#include "log.h"
+#include "macro.h"
+#include "tests.h"
+
+int main(int argc, char *argv[]) {
+ unsigned n = 0;
+ _cleanup_(sd_journal_closep) sd_journal *j = NULL;
+
+ test_setup_logging(LOG_DEBUG);
+
+ assert_se(sd_journal_open(&j, SD_JOURNAL_LOCAL_ONLY) >= 0);
+
+ assert_se(sd_journal_add_match(j, "_TRANSPORT=syslog", 0) >= 0);
+ assert_se(sd_journal_add_match(j, "_UID=0", 0) >= 0);
+
+ SD_JOURNAL_FOREACH_BACKWARDS(j) {
+ const void *d;
+ size_t l;
+
+ assert_se(sd_journal_get_data(j, "MESSAGE", &d, &l) >= 0);
+
+ printf("%.*s\n", (int) l, (char*) d);
+
+ n++;
+ if (n >= 10)
+ break;
+ }
+
+ return 0;
+}
diff --git a/src/libsystemd/sd-journal/test-journal-file.c b/src/libsystemd/sd-journal/test-journal-file.c
new file mode 100644
index 0000000..729de1f
--- /dev/null
+++ b/src/libsystemd/sd-journal/test-journal-file.c
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+
+#include <errno.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include "journal-file.h"
+#include "tests.h"
+#include "user-util.h"
+
+static void test_journal_file_parse_uid_from_filename_simple(
+ const char *path,
+ uid_t expected_uid,
+ int expected_error) {
+
+ uid_t uid = UID_INVALID;
+ int r;
+
+ log_info("testing %s", path);
+
+ r = journal_file_parse_uid_from_filename(path, &uid);
+ assert_se(r == expected_error);
+ if (r < 0)
+ assert_se(uid == UID_INVALID);
+ else
+ assert_se(uid == expected_uid);
+}
+
+TEST(journal_file_parse_uid_from_filename) {
+
+ test_journal_file_parse_uid_from_filename_simple("/var/log/journal/", 0, -EISDIR);
+
+ /* The helper should return -EREMOTE for any filenames that don't look like an online or offline user
+ * journals. This includes archived and disposed journal files. */
+ test_journal_file_parse_uid_from_filename_simple("/etc/password", 0, -EREMOTE);
+ test_journal_file_parse_uid_from_filename_simple("system.journal", 0, -EREMOTE);
+ test_journal_file_parse_uid_from_filename_simple("user-1000@0005d26980bdce6e-2f2a4939583822ef.journal~", 0, -EREMOTE);
+ test_journal_file_parse_uid_from_filename_simple("user-1000@xxx-yyy-zzz.journal", 0, -EREMOTE);
+
+ test_journal_file_parse_uid_from_filename_simple("user-1000.journal", 1000, 0);
+ test_journal_file_parse_uid_from_filename_simple("user-foo.journal", 0, -EINVAL);
+ test_journal_file_parse_uid_from_filename_simple("user-65535.journal", 0, -ENXIO);
+}
+
+DEFINE_TEST_MAIN(LOG_INFO);
diff --git a/src/libsystemd/sd-journal/test-journal-flush.c b/src/libsystemd/sd-journal/test-journal-flush.c
new file mode 100644
index 0000000..3f07835
--- /dev/null
+++ b/src/libsystemd/sd-journal/test-journal-flush.c
@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+
+#include <fcntl.h>
+#include <unistd.h>
+
+#include "sd-journal.h"
+
+#include "alloc-util.h"
+#include "chattr-util.h"
+#include "journal-file-util.h"
+#include "journal-internal.h"
+#include "logs-show.h"
+#include "macro.h"
+#include "path-util.h"
+#include "rm-rf.h"
+#include "string-util.h"
+#include "tests.h"
+#include "tmpfile-util.h"
+
+static void test_journal_flush_one(int argc, char *argv[]) {
+ _cleanup_(mmap_cache_unrefp) MMapCache *m = NULL;
+ _cleanup_free_ char *fn = NULL;
+ _cleanup_(rm_rf_physical_and_freep) char *dn = NULL;
+ _cleanup_(journal_file_offline_closep) JournalFile *new_journal = NULL;
+ _cleanup_(sd_journal_closep) sd_journal *j = NULL;
+ unsigned n, limit;
+ int r;
+
+ assert_se(m = mmap_cache_new());
+ assert_se(mkdtemp_malloc("/var/tmp/test-journal-flush.XXXXXX", &dn) >= 0);
+ (void) chattr_path(dn, FS_NOCOW_FL, FS_NOCOW_FL, NULL);
+
+ assert_se(fn = path_join(dn, "test.journal"));
+
+ r = journal_file_open(-1, fn, O_CREAT|O_RDWR, 0, 0644, 0, NULL, m, NULL, &new_journal);
+ assert_se(r >= 0);
+
+ if (argc > 1)
+ r = sd_journal_open_files(&j, (const char **) strv_skip(argv, 1), 0);
+ else
+ r = sd_journal_open(&j, 0);
+ assert_se(r == 0);
+
+ sd_journal_set_data_threshold(j, 0);
+
+ n = 0;
+ limit = slow_tests_enabled() ? 10000 : 1000;
+ SD_JOURNAL_FOREACH(j) {
+ Object *o;
+ JournalFile *f;
+
+ f = j->current_file;
+ assert_se(f && f->current_offset > 0);
+
+ r = journal_file_move_to_object(f, OBJECT_ENTRY, f->current_offset, &o);
+ if (r < 0)
+ log_error_errno(r, "journal_file_move_to_object failed: %m");
+ assert_se(r >= 0);
+
+ r = journal_file_copy_entry(f, new_journal, o, f->current_offset, NULL, NULL);
+ if (r < 0)
+ log_warning_errno(r, "journal_file_copy_entry failed: %m");
+ assert_se(r >= 0 ||
+ IN_SET(r, -EBADMSG, /* corrupted file */
+ -EPROTONOSUPPORT, /* unsupported compression */
+ -EIO, /* file rotated */
+ -EREMCHG)); /* clock rollback */
+
+ if (++n >= limit)
+ break;
+ }
+
+ if (n == 0)
+ return (void) log_tests_skipped("No journal entry found");
+
+ /* Open the new journal before archiving and offlining the file. */
+ sd_journal_close(j);
+ assert_se(sd_journal_open_directory(&j, dn, 0) >= 0);
+
+ /* Read the online journal. */
+ assert_se(sd_journal_seek_tail(j) >= 0);
+ assert_se(sd_journal_step_one(j, 0) > 0);
+ printf("current_journal: %s (%i)\n", j->current_file->path, j->current_file->fd);
+ assert_se(show_journal_entry(stdout, j, OUTPUT_EXPORT, 0, 0, NULL, NULL, NULL, &(dual_timestamp) {}, &(sd_id128_t) {}) >= 0);
+
+ uint64_t p;
+ assert_se(journal_file_tail_end_by_mmap(j->current_file, &p) >= 0);
+ for (uint64_t q = ALIGN64(p + 1); q < (uint64_t) j->current_file->last_stat.st_size; q = ALIGN64(q + 1)) {
+ Object *o;
+
+ r = journal_file_move_to_object(j->current_file, OBJECT_UNUSED, q, &o);
+ assert_se(IN_SET(r, -EBADMSG, -EADDRNOTAVAIL));
+ }
+
+ /* Archive and offline file. */
+ assert_se(journal_file_archive(new_journal, NULL) >= 0);
+ assert_se(journal_file_set_offline(new_journal, /* wait = */ true) >= 0);
+
+ /* Read the archived and offline journal. */
+ for (uint64_t q = ALIGN64(p + 1); q < (uint64_t) j->current_file->last_stat.st_size; q = ALIGN64(q + 1)) {
+ Object *o;
+
+ r = journal_file_move_to_object(j->current_file, OBJECT_UNUSED, q, &o);
+ assert_se(IN_SET(r, -EBADMSG, -EADDRNOTAVAIL, -EIDRM));
+ }
+}
+
+TEST(journal_flush) {
+ assert_se(setenv("SYSTEMD_JOURNAL_COMPACT", "0", 1) >= 0);
+ test_journal_flush_one(saved_argc, saved_argv);
+}
+
+TEST(journal_flush_compact) {
+ assert_se(setenv("SYSTEMD_JOURNAL_COMPACT", "1", 1) >= 0);
+ test_journal_flush_one(saved_argc, saved_argv);
+}
+
+DEFINE_TEST_MAIN(LOG_INFO);
diff --git a/src/libsystemd/sd-journal/test-journal-init.c b/src/libsystemd/sd-journal/test-journal-init.c
new file mode 100644
index 0000000..c8a1977
--- /dev/null
+++ b/src/libsystemd/sd-journal/test-journal-init.c
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+
+#include <unistd.h>
+
+#include "sd-journal.h"
+
+#include "chattr-util.h"
+#include "journal-internal.h"
+#include "log.h"
+#include "parse-util.h"
+#include "process-util.h"
+#include "rm-rf.h"
+#include "tests.h"
+
+int main(int argc, char *argv[]) {
+ sd_journal *j;
+ int r, i, I = 100;
+ char t[] = "/var/tmp/journal-stream-XXXXXX";
+
+ test_setup_logging(LOG_DEBUG);
+
+ if (argc >= 2) {
+ r = safe_atoi(argv[1], &I);
+ if (r < 0)
+ log_info("Could not parse loop count argument. Using default.");
+ }
+
+ log_info("Running %d loops", I);
+
+ assert_se(mkdtemp(t));
+ (void) chattr_path(t, FS_NOCOW_FL, FS_NOCOW_FL, NULL);
+
+ for (i = 0; i < I; i++) {
+ r = sd_journal_open(&j, SD_JOURNAL_LOCAL_ONLY);
+ assert_se(r == 0);
+
+ sd_journal_close(j);
+
+ r = sd_journal_open_directory(&j, t, 0);
+ assert_se(r == 0);
+
+ assert_se(sd_journal_seek_head(j) == 0);
+ assert_se(j->current_location.type == LOCATION_HEAD);
+
+ r = safe_fork("(journal-fork-test)", FORK_WAIT|FORK_LOG, NULL);
+ if (r == 0) {
+ assert_se(j);
+ assert_se(sd_journal_get_realtime_usec(j, NULL) == -ECHILD);
+ assert_se(sd_journal_seek_tail(j) == -ECHILD);
+ assert_se(j->current_location.type == LOCATION_HEAD);
+ sd_journal_close(j);
+ _exit(EXIT_SUCCESS);
+ }
+
+ assert_se(r >= 0);
+
+ sd_journal_close(j);
+
+ j = NULL;
+ r = sd_journal_open_directory(&j, t, SD_JOURNAL_LOCAL_ONLY);
+ assert_se(r == -EINVAL);
+ assert_se(j == NULL);
+ }
+
+ assert_se(rm_rf(t, REMOVE_ROOT|REMOVE_PHYSICAL) >= 0);
+
+ return 0;
+}
diff --git a/src/libsystemd/sd-journal/test-journal-interleaving.c b/src/libsystemd/sd-journal/test-journal-interleaving.c
new file mode 100644
index 0000000..8aeef8f
--- /dev/null
+++ b/src/libsystemd/sd-journal/test-journal-interleaving.c
@@ -0,0 +1,737 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+
+#include <fcntl.h>
+#include <unistd.h>
+
+#include "sd-id128.h"
+#include "sd-journal.h"
+
+#include "alloc-util.h"
+#include "chattr-util.h"
+#include "iovec-util.h"
+#include "journal-file-util.h"
+#include "journal-vacuum.h"
+#include "log.h"
+#include "logs-show.h"
+#include "parse-util.h"
+#include "random-util.h"
+#include "rm-rf.h"
+#include "tests.h"
+
+/* This program tests skipping around in a multi-file journal. */
+
+static bool arg_keep = false;
+static dual_timestamp previous_ts = {};
+
+_noreturn_ static void log_assert_errno(const char *text, int error, const char *file, unsigned line, const char *func) {
+ log_internal(LOG_CRIT, error, file, line, func,
+ "'%s' failed at %s:%u (%s): %m", text, file, line, func);
+ abort();
+}
+
+#define assert_ret(expr) \
+ do { \
+ int _r_ = (expr); \
+ if (_unlikely_(_r_ < 0)) \
+ log_assert_errno(#expr, -_r_, PROJECT_FILE, __LINE__, __func__); \
+ } while (false)
+
+static JournalFile *test_open_internal(const char *name, JournalFileFlags flags) {
+ _cleanup_(mmap_cache_unrefp) MMapCache *m = NULL;
+ JournalFile *f;
+
+ m = mmap_cache_new();
+ assert_se(m != NULL);
+
+ assert_ret(journal_file_open(-1, name, O_RDWR|O_CREAT, flags, 0644, UINT64_MAX, NULL, m, NULL, &f));
+ return f;
+}
+
+static JournalFile *test_open(const char *name) {
+ return test_open_internal(name, JOURNAL_COMPRESS);
+}
+
+static JournalFile *test_open_strict(const char *name) {
+ return test_open_internal(name, JOURNAL_COMPRESS | JOURNAL_STRICT_ORDER);
+}
+
+static void test_close(JournalFile *f) {
+ (void) journal_file_offline_close(f);
+}
+
+static void test_done(const char *t) {
+ log_info("Done...");
+
+ if (arg_keep)
+ log_info("Not removing %s", t);
+ else {
+ journal_directory_vacuum(".", 3000000, 0, 0, NULL, true);
+
+ assert_se(rm_rf(t, REMOVE_ROOT|REMOVE_PHYSICAL) >= 0);
+ }
+
+ log_info("------------------------------------------------------------");
+}
+
+static void append_number(JournalFile *f, int n, const sd_id128_t *boot_id, uint64_t *seqnum, uint64_t *ret_offset) {
+ _cleanup_free_ char *p = NULL, *q = NULL;
+ dual_timestamp ts;
+ struct iovec iovec[2];
+ size_t n_iov = 0;
+
+ dual_timestamp_now(&ts);
+
+ if (ts.monotonic <= previous_ts.monotonic)
+ ts.monotonic = previous_ts.monotonic + 1;
+
+ if (ts.realtime <= previous_ts.realtime)
+ ts.realtime = previous_ts.realtime + 1;
+
+ previous_ts = ts;
+
+ assert_se(asprintf(&p, "NUMBER=%d", n) >= 0);
+ iovec[n_iov++] = IOVEC_MAKE_STRING(p);
+
+ if (boot_id) {
+ assert_se(q = strjoin("_BOOT_ID=", SD_ID128_TO_STRING(*boot_id)));
+ iovec[n_iov++] = IOVEC_MAKE_STRING(q);
+ }
+
+ assert_ret(journal_file_append_entry(f, &ts, boot_id, iovec, n_iov, seqnum, NULL, NULL, ret_offset));
+}
+
+static void append_unreferenced_data(JournalFile *f, const sd_id128_t *boot_id) {
+ _cleanup_free_ char *q = NULL;
+ dual_timestamp ts;
+ struct iovec iovec;
+
+ assert(boot_id);
+
+ ts.monotonic = usec_sub_unsigned(previous_ts.monotonic, 10);
+ ts.realtime = usec_sub_unsigned(previous_ts.realtime, 10);
+
+ assert_se(q = strjoin("_BOOT_ID=", SD_ID128_TO_STRING(*boot_id)));
+ iovec = IOVEC_MAKE_STRING(q);
+
+ assert_se(journal_file_append_entry(f, &ts, boot_id, &iovec, 1, NULL, NULL, NULL, NULL) == -EREMCHG);
+}
+
+static void test_check_number(sd_journal *j, int n) {
+ sd_id128_t boot_id;
+ const void *d;
+ _cleanup_free_ char *k = NULL;
+ size_t l;
+ int x;
+
+ assert_se(sd_journal_get_monotonic_usec(j, NULL, &boot_id) >= 0);
+ assert_ret(sd_journal_get_data(j, "NUMBER", &d, &l));
+ assert_se(k = strndup(d, l));
+ printf("%s %s (expected=%i)\n", SD_ID128_TO_STRING(boot_id), k, n);
+
+ assert_se(safe_atoi(k + STRLEN("NUMBER="), &x) >= 0);
+ assert_se(n == x);
+}
+
+static void test_check_numbers_down(sd_journal *j, int count) {
+ int i;
+
+ for (i = 1; i <= count; i++) {
+ int r;
+ test_check_number(j, i);
+ assert_ret(r = sd_journal_next(j));
+ if (i == count)
+ assert_se(r == 0);
+ else
+ assert_se(r == 1);
+ }
+
+}
+
+static void test_check_numbers_up(sd_journal *j, int count) {
+ for (int i = count; i >= 1; i--) {
+ int r;
+ test_check_number(j, i);
+ assert_ret(r = sd_journal_previous(j));
+ if (i == 1)
+ assert_se(r == 0);
+ else
+ assert_se(r == 1);
+ }
+
+}
+
+static void setup_sequential(void) {
+ JournalFile *f1, *f2, *f3;
+ sd_id128_t id;
+
+ f1 = test_open("one.journal");
+ f2 = test_open("two.journal");
+ f3 = test_open("three.journal");
+ assert_se(sd_id128_randomize(&id) >= 0);
+ log_info("boot_id: %s", SD_ID128_TO_STRING(id));
+ append_number(f1, 1, &id, NULL, NULL);
+ append_number(f1, 2, &id, NULL, NULL);
+ append_number(f1, 3, &id, NULL, NULL);
+ append_number(f2, 4, &id, NULL, NULL);
+ assert_se(sd_id128_randomize(&id) >= 0);
+ log_info("boot_id: %s", SD_ID128_TO_STRING(id));
+ append_number(f2, 5, &id, NULL, NULL);
+ append_number(f2, 6, &id, NULL, NULL);
+ append_number(f3, 7, &id, NULL, NULL);
+ append_number(f3, 8, &id, NULL, NULL);
+ assert_se(sd_id128_randomize(&id) >= 0);
+ log_info("boot_id: %s", SD_ID128_TO_STRING(id));
+ append_number(f3, 9, &id, NULL, NULL);
+ test_close(f1);
+ test_close(f2);
+ test_close(f3);
+}
+
+static void setup_interleaved(void) {
+ JournalFile *f1, *f2, *f3;
+ sd_id128_t id;
+
+ f1 = test_open("one.journal");
+ f2 = test_open("two.journal");
+ f3 = test_open("three.journal");
+ assert_se(sd_id128_randomize(&id) >= 0);
+ log_info("boot_id: %s", SD_ID128_TO_STRING(id));
+ append_number(f1, 1, &id, NULL, NULL);
+ append_number(f2, 2, &id, NULL, NULL);
+ append_number(f3, 3, &id, NULL, NULL);
+ append_number(f1, 4, &id, NULL, NULL);
+ append_number(f2, 5, &id, NULL, NULL);
+ append_number(f3, 6, &id, NULL, NULL);
+ append_number(f1, 7, &id, NULL, NULL);
+ append_number(f2, 8, &id, NULL, NULL);
+ append_number(f3, 9, &id, NULL, NULL);
+ test_close(f1);
+ test_close(f2);
+ test_close(f3);
+}
+
+static void setup_unreferenced_data(void) {
+ JournalFile *f1, *f2, *f3;
+ sd_id128_t id;
+
+ /* For issue #29275. */
+
+ f1 = test_open_strict("one.journal");
+ f2 = test_open_strict("two.journal");
+ f3 = test_open_strict("three.journal");
+ assert_se(sd_id128_randomize(&id) >= 0);
+ log_info("boot_id: %s", SD_ID128_TO_STRING(id));
+ append_number(f1, 1, &id, NULL, NULL);
+ append_number(f1, 2, &id, NULL, NULL);
+ append_number(f1, 3, &id, NULL, NULL);
+ assert_se(sd_id128_randomize(&id) >= 0);
+ log_info("boot_id: %s", SD_ID128_TO_STRING(id));
+ append_unreferenced_data(f1, &id);
+ append_number(f2, 4, &id, NULL, NULL);
+ append_number(f2, 5, &id, NULL, NULL);
+ append_number(f2, 6, &id, NULL, NULL);
+ assert_se(sd_id128_randomize(&id) >= 0);
+ log_info("boot_id: %s", SD_ID128_TO_STRING(id));
+ append_unreferenced_data(f2, &id);
+ append_number(f3, 7, &id, NULL, NULL);
+ append_number(f3, 8, &id, NULL, NULL);
+ append_number(f3, 9, &id, NULL, NULL);
+ test_close(f1);
+ test_close(f2);
+ test_close(f3);
+}
+
+static void mkdtemp_chdir_chattr(char *path) {
+ assert_se(mkdtemp(path));
+ assert_se(chdir(path) >= 0);
+
+ /* Speed up things a bit on btrfs, ensuring that CoW is turned off for all files created in our
+ * directory during the test run */
+ (void) chattr_path(path, FS_NOCOW_FL, FS_NOCOW_FL, NULL);
+}
+
+static void test_skip_one(void (*setup)(void)) {
+ char t[] = "/var/tmp/journal-skip-XXXXXX";
+ sd_journal *j;
+ int r;
+
+ mkdtemp_chdir_chattr(t);
+
+ setup();
+
+ /* Seek to head, iterate down. */
+ assert_ret(sd_journal_open_directory(&j, t, 0));
+ assert_ret(sd_journal_seek_head(j));
+ assert_se(sd_journal_next(j) == 1); /* pointing to the first entry */
+ test_check_numbers_down(j, 9);
+ sd_journal_close(j);
+
+ /* Seek to head, iterate down. */
+ assert_ret(sd_journal_open_directory(&j, t, 0));
+ assert_ret(sd_journal_seek_head(j));
+ assert_se(sd_journal_next(j) == 1); /* pointing to the first entry */
+ assert_se(sd_journal_previous(j) == 0); /* no-op */
+ test_check_numbers_down(j, 9);
+ sd_journal_close(j);
+
+ /* Seek to head twice, iterate down. */
+ assert_ret(sd_journal_open_directory(&j, t, 0));
+ assert_ret(sd_journal_seek_head(j));
+ assert_se(sd_journal_next(j) == 1); /* pointing to the first entry */
+ assert_ret(sd_journal_seek_head(j));
+ assert_se(sd_journal_next(j) == 1); /* pointing to the first entry */
+ test_check_numbers_down(j, 9);
+ sd_journal_close(j);
+
+ /* Seek to head, move to previous, then iterate down. */
+ assert_ret(sd_journal_open_directory(&j, t, 0));
+ assert_ret(sd_journal_seek_head(j));
+ assert_se(sd_journal_previous(j) == 0); /* no-op */
+ assert_se(sd_journal_next(j) == 1); /* pointing to the first entry */
+ test_check_numbers_down(j, 9);
+ sd_journal_close(j);
+
+ /* Seek to head, walk several steps, then iterate down. */
+ assert_ret(sd_journal_open_directory(&j, t, 0));
+ assert_ret(sd_journal_seek_head(j));
+ assert_se(sd_journal_previous(j) == 0); /* no-op */
+ assert_se(sd_journal_previous(j) == 0); /* no-op */
+ assert_se(sd_journal_previous(j) == 0); /* no-op */
+ assert_se(sd_journal_next(j) == 1); /* pointing to the first entry */
+ assert_se(sd_journal_previous(j) == 0); /* no-op */
+ assert_se(sd_journal_previous(j) == 0); /* no-op */
+ test_check_numbers_down(j, 9);
+ sd_journal_close(j);
+
+ /* Seek to tail, iterate up. */
+ assert_ret(sd_journal_open_directory(&j, t, 0));
+ assert_ret(sd_journal_seek_tail(j));
+ assert_se(sd_journal_previous(j) == 1); /* pointing to the last entry */
+ test_check_numbers_up(j, 9);
+ sd_journal_close(j);
+
+ /* Seek to tail twice, iterate up. */
+ assert_ret(sd_journal_open_directory(&j, t, 0));
+ assert_ret(sd_journal_seek_tail(j));
+ assert_se(sd_journal_previous(j) == 1); /* pointing to the last entry */
+ assert_ret(sd_journal_seek_tail(j));
+ assert_se(sd_journal_previous(j) == 1); /* pointing to the last entry */
+ test_check_numbers_up(j, 9);
+ sd_journal_close(j);
+
+ /* Seek to tail, move to next, then iterate up. */
+ assert_ret(sd_journal_open_directory(&j, t, 0));
+ assert_ret(sd_journal_seek_tail(j));
+ assert_se(sd_journal_next(j) == 0); /* no-op */
+ assert_se(sd_journal_previous(j) == 1); /* pointing to the last entry */
+ test_check_numbers_up(j, 9);
+ sd_journal_close(j);
+
+ /* Seek to tail, walk several steps, then iterate up. */
+ assert_ret(sd_journal_open_directory(&j, t, 0));
+ assert_ret(sd_journal_seek_tail(j));
+ assert_se(sd_journal_next(j) == 0); /* no-op */
+ assert_se(sd_journal_next(j) == 0); /* no-op */
+ assert_se(sd_journal_next(j) == 0); /* no-op */
+ assert_se(sd_journal_previous(j) == 1); /* pointing to the last entry. */
+ assert_se(sd_journal_next(j) == 0); /* no-op */
+ assert_se(sd_journal_next(j) == 0); /* no-op */
+ test_check_numbers_up(j, 9);
+ sd_journal_close(j);
+
+ /* Seek to tail, skip to head, iterate down. */
+ assert_ret(sd_journal_open_directory(&j, t, 0));
+ assert_ret(sd_journal_seek_tail(j));
+ assert_se(sd_journal_previous_skip(j, 9) == 9); /* pointing to the first entry. */
+ test_check_numbers_down(j, 9);
+ sd_journal_close(j);
+
+ /* Seek to tail, skip to head in a more complex way, then iterate down. */
+ assert_ret(sd_journal_open_directory(&j, t, 0));
+ assert_ret(sd_journal_seek_tail(j));
+ assert_se(sd_journal_next(j) == 0);
+ assert_se(sd_journal_previous_skip(j, 4) == 4);
+ assert_se(sd_journal_previous_skip(j, 5) == 5);
+ assert_se(sd_journal_previous(j) == 0);
+ assert_se(sd_journal_previous_skip(j, 5) == 0);
+ assert_se(sd_journal_next(j) == 1);
+ assert_se(sd_journal_previous_skip(j, 5) == 1);
+ assert_se(sd_journal_next(j) == 1);
+ assert_se(sd_journal_next(j) == 1);
+ assert_se(sd_journal_previous(j) == 1);
+ assert_se(sd_journal_next(j) == 1);
+ assert_se(sd_journal_next(j) == 1);
+ assert_se(sd_journal_previous_skip(j, 5) == 3);
+ test_check_numbers_down(j, 9);
+ sd_journal_close(j);
+
+ /* Seek to head, skip to tail, iterate up. */
+ assert_ret(sd_journal_open_directory(&j, t, 0));
+ assert_ret(sd_journal_seek_head(j));
+ assert_se(sd_journal_next_skip(j, 9) == 9);
+ test_check_numbers_up(j, 9);
+ sd_journal_close(j);
+
+ /* Seek to head, skip to tail in a more complex way, then iterate up. */
+ assert_ret(sd_journal_open_directory(&j, t, 0));
+ assert_ret(sd_journal_seek_head(j));
+ assert_se(sd_journal_previous(j) == 0);
+ assert_se(sd_journal_next_skip(j, 4) == 4);
+ assert_se(sd_journal_next_skip(j, 5) == 5);
+ assert_se(sd_journal_next(j) == 0);
+ assert_se(sd_journal_next_skip(j, 5) == 0);
+ assert_se(sd_journal_previous(j) == 1);
+ assert_se(sd_journal_next_skip(j, 5) == 1);
+ assert_se(sd_journal_previous(j) == 1);
+ assert_se(sd_journal_previous(j) == 1);
+ assert_se(sd_journal_next(j) == 1);
+ assert_se(sd_journal_previous(j) == 1);
+ assert_se(sd_journal_previous(j) == 1);
+ assert_se(r = sd_journal_next_skip(j, 5) == 3);
+ test_check_numbers_up(j, 9);
+ sd_journal_close(j);
+
+ test_done(t);
+}
+
+TEST(skip) {
+ test_skip_one(setup_sequential);
+ test_skip_one(setup_interleaved);
+}
+
+static void test_boot_id_one(void (*setup)(void), size_t n_boots_expected) {
+ char t[] = "/var/tmp/journal-boot-id-XXXXXX";
+ sd_journal *j;
+ _cleanup_free_ BootId *boots = NULL;
+ size_t n_boots;
+
+ mkdtemp_chdir_chattr(t);
+
+ setup();
+
+ assert_ret(sd_journal_open_directory(&j, t, 0));
+ assert_se(journal_get_boots(j, &boots, &n_boots) >= 0);
+ assert_se(boots);
+ assert_se(n_boots == n_boots_expected);
+ sd_journal_close(j);
+
+ FOREACH_ARRAY(b, boots, n_boots) {
+ assert_ret(sd_journal_open_directory(&j, t, 0));
+ assert_se(journal_find_boot_by_id(j, b->id) == 1);
+ sd_journal_close(j);
+ }
+
+ for (int i = - (int) n_boots + 1; i <= (int) n_boots; i++) {
+ sd_id128_t id;
+
+ assert_ret(sd_journal_open_directory(&j, t, 0));
+ assert_se(journal_find_boot_by_offset(j, i, &id) == 1);
+ if (i <= 0)
+ assert_se(sd_id128_equal(id, boots[n_boots + i - 1].id));
+ else
+ assert_se(sd_id128_equal(id, boots[i - 1].id));
+ sd_journal_close(j);
+ }
+
+ test_done(t);
+}
+
+TEST(boot_id) {
+ test_boot_id_one(setup_sequential, 3);
+ test_boot_id_one(setup_unreferenced_data, 3);
+}
+
+static void test_sequence_numbers_one(void) {
+ _cleanup_(mmap_cache_unrefp) MMapCache *m = NULL;
+ char t[] = "/var/tmp/journal-seq-XXXXXX";
+ JournalFile *one, *two;
+ uint64_t seqnum = 0;
+ sd_id128_t seqnum_id;
+
+ m = mmap_cache_new();
+ assert_se(m != NULL);
+
+ mkdtemp_chdir_chattr(t);
+
+ assert_se(journal_file_open(-1, "one.journal", O_RDWR|O_CREAT, JOURNAL_COMPRESS, 0644,
+ UINT64_MAX, NULL, m, NULL, &one) == 0);
+
+ append_number(one, 1, NULL, &seqnum, NULL);
+ printf("seqnum=%"PRIu64"\n", seqnum);
+ assert_se(seqnum == 1);
+ append_number(one, 2, NULL, &seqnum, NULL);
+ printf("seqnum=%"PRIu64"\n", seqnum);
+ assert_se(seqnum == 2);
+
+ assert_se(one->header->state == STATE_ONLINE);
+ assert_se(!sd_id128_equal(one->header->file_id, one->header->machine_id));
+ assert_se(!sd_id128_equal(one->header->file_id, one->header->tail_entry_boot_id));
+ assert_se(sd_id128_equal(one->header->file_id, one->header->seqnum_id));
+
+ memcpy(&seqnum_id, &one->header->seqnum_id, sizeof(sd_id128_t));
+
+ assert_se(journal_file_open(-1, "two.journal", O_RDWR|O_CREAT, JOURNAL_COMPRESS, 0644,
+ UINT64_MAX, NULL, m, one, &two) == 0);
+
+ assert_se(two->header->state == STATE_ONLINE);
+ assert_se(!sd_id128_equal(two->header->file_id, one->header->file_id));
+ assert_se(sd_id128_equal(two->header->machine_id, one->header->machine_id));
+ assert_se(sd_id128_is_null(two->header->tail_entry_boot_id)); /* Not written yet. */
+ assert_se(sd_id128_equal(two->header->seqnum_id, one->header->seqnum_id));
+
+ append_number(two, 3, NULL, &seqnum, NULL);
+ printf("seqnum=%"PRIu64"\n", seqnum);
+ assert_se(seqnum == 3);
+ append_number(two, 4, NULL, &seqnum, NULL);
+ printf("seqnum=%"PRIu64"\n", seqnum);
+ assert_se(seqnum == 4);
+
+ /* Verify tail_entry_boot_id. */
+ assert_se(sd_id128_equal(two->header->tail_entry_boot_id, one->header->tail_entry_boot_id));
+
+ test_close(two);
+
+ append_number(one, 5, NULL, &seqnum, NULL);
+ printf("seqnum=%"PRIu64"\n", seqnum);
+ assert_se(seqnum == 5);
+
+ append_number(one, 6, NULL, &seqnum, NULL);
+ printf("seqnum=%"PRIu64"\n", seqnum);
+ assert_se(seqnum == 6);
+
+ test_close(one);
+
+ /* If the machine-id is not initialized, the header file verification
+ * (which happens when re-opening a journal file) will fail. */
+ if (sd_id128_get_machine(NULL) >= 0) {
+ /* restart server */
+ seqnum = 0;
+
+ assert_se(journal_file_open(-1, "two.journal", O_RDWR, JOURNAL_COMPRESS, 0,
+ UINT64_MAX, NULL, m, NULL, &two) == 0);
+
+ assert_se(sd_id128_equal(two->header->seqnum_id, seqnum_id));
+
+ append_number(two, 7, NULL, &seqnum, NULL);
+ printf("seqnum=%"PRIu64"\n", seqnum);
+ assert_se(seqnum == 5);
+
+ /* So..., here we have the same seqnum in two files with the
+ * same seqnum_id. */
+
+ test_close(two);
+ }
+
+ test_done(t);
+}
+
+TEST(sequence_numbers) {
+ assert_se(setenv("SYSTEMD_JOURNAL_COMPACT", "0", 1) >= 0);
+ test_sequence_numbers_one();
+
+ assert_se(setenv("SYSTEMD_JOURNAL_COMPACT", "1", 1) >= 0);
+ test_sequence_numbers_one();
+}
+
+static int expected_result(uint64_t needle, const uint64_t *candidates, const uint64_t *offset, size_t n, direction_t direction, uint64_t *ret) {
+ switch (direction) {
+ case DIRECTION_DOWN:
+ for (size_t i = 0; i < n; i++) {
+ if (candidates[i] == 0) {
+ *ret = 0;
+ return 0;
+ }
+ if (needle <= candidates[i]) {
+ *ret = offset[i];
+ return 1;
+ }
+ }
+ *ret = 0;
+ return 0;
+
+ case DIRECTION_UP:
+ for (size_t i = 0; i < n; i++)
+ if (needle < candidates[i] || candidates[i] == 0) {
+ if (i == 0) {
+ *ret = 0;
+ return 0;
+ }
+ *ret = offset[i - 1];
+ return 1;
+ }
+ *ret = offset[n - 1];
+ return 1;
+
+ default:
+ assert_not_reached();
+ }
+}
+
+static void verify(JournalFile *f, const uint64_t *seqnum, const uint64_t *offset, size_t n) {
+ uint64_t p, q;
+ int r, e;
+
+ /* by seqnum (sequential) */
+ for (uint64_t i = 0; i < n + 2; i++) {
+ p = 0;
+ r = journal_file_move_to_entry_by_seqnum(f, i, DIRECTION_DOWN, NULL, &p);
+ e = expected_result(i, seqnum, offset, n, DIRECTION_DOWN, &q);
+ assert_se(r == e);
+ assert_se(p == q);
+
+ p = 0;
+ r = journal_file_move_to_entry_by_seqnum(f, i, DIRECTION_UP, NULL, &p);
+ e = expected_result(i, seqnum, offset, n, DIRECTION_UP, &q);
+ assert_se(r == e);
+ assert_se(p == q);
+ }
+
+ /* by seqnum (random) */
+ for (size_t trial = 0; trial < 3 * n; trial++) {
+ uint64_t i = random_u64_range(n + 2);
+
+ p = 0;
+ r = journal_file_move_to_entry_by_seqnum(f, i, DIRECTION_DOWN, NULL, &p);
+ e = expected_result(i, seqnum, offset, n, DIRECTION_DOWN, &q);
+ assert_se(r == e);
+ assert_se(p == q);
+ }
+ for (size_t trial = 0; trial < 3 * n; trial++) {
+ uint64_t i = random_u64_range(n + 2);
+
+ p = 0;
+ r = journal_file_move_to_entry_by_seqnum(f, i, DIRECTION_UP, NULL, &p);
+ e = expected_result(i, seqnum, offset, n, DIRECTION_UP, &q);
+ assert_se(r == e);
+ assert_se(p == q);
+ }
+
+ /* by offset (sequential) */
+ for (size_t i = 0; i < n; i++) {
+ p = 0;
+ r = journal_file_move_to_entry_by_offset(f, offset[i] - 1, DIRECTION_DOWN, NULL, &p);
+ e = expected_result(offset[i] - 1, offset, offset, n, DIRECTION_DOWN, &q);
+ assert_se(r == e);
+ assert_se(p == q);
+
+ p = 0;
+ r = journal_file_move_to_entry_by_offset(f, offset[i], DIRECTION_DOWN, NULL, &p);
+ e = expected_result(offset[i], offset, offset, n, DIRECTION_DOWN, &q);
+ assert_se(r == e);
+ assert_se(p == q);
+
+ p = 0;
+ r = journal_file_move_to_entry_by_offset(f, offset[i] + 1, DIRECTION_DOWN, NULL, &p);
+ e = expected_result(offset[i] + 1, offset, offset, n, DIRECTION_DOWN, &q);
+ assert_se(r == e);
+ assert_se(p == q);
+
+ p = 0;
+ r = journal_file_move_to_entry_by_offset(f, offset[i] - 1, DIRECTION_UP, NULL, &p);
+ e = expected_result(offset[i] - 1, offset, offset, n, DIRECTION_UP, &q);
+ assert_se(r == e);
+ assert_se(p == q);
+
+ p = 0;
+ r = journal_file_move_to_entry_by_offset(f, offset[i], DIRECTION_UP, NULL, &p);
+ e = expected_result(offset[i], offset, offset, n, DIRECTION_UP, &q);
+ assert_se(r == e);
+ assert_se(p == q);
+
+ p = 0;
+ r = journal_file_move_to_entry_by_offset(f, offset[i] + 1, DIRECTION_UP, NULL, &p);
+ e = expected_result(offset[i] + 1, offset, offset, n, DIRECTION_UP, &q);
+ assert_se(r == e);
+ assert_se(p == q);
+ }
+
+ /* by offset (random) */
+ for (size_t trial = 0; trial < 3 * n; trial++) {
+ uint64_t i = offset[0] - 1 + random_u64_range(offset[n-1] - offset[0] + 2);
+
+ p = 0;
+ r = journal_file_move_to_entry_by_offset(f, i, DIRECTION_DOWN, NULL, &p);
+ e = expected_result(i, offset, offset, n, DIRECTION_DOWN, &q);
+ assert_se(r == e);
+ assert_se(p == q);
+ }
+ for (size_t trial = 0; trial < 3 * n; trial++) {
+ uint64_t i = offset[0] - 1 + random_u64_range(offset[n-1] - offset[0] + 2);
+
+ p = 0;
+ r = journal_file_move_to_entry_by_offset(f, i, DIRECTION_UP, NULL, &p);
+ e = expected_result(i, offset, offset, n, DIRECTION_UP, &q);
+ assert_se(r == e);
+ assert_se(p == q);
+ }
+}
+
+static void test_generic_array_bisect_one(size_t n, size_t num_corrupted) {
+ _cleanup_(mmap_cache_unrefp) MMapCache *m = NULL;
+ char t[] = "/var/tmp/journal-seq-XXXXXX";
+ _cleanup_free_ uint64_t *seqnum = NULL, *offset = NULL;
+ JournalFile *f;
+
+ log_info("/* %s(%zu, %zu) */", __func__, n, num_corrupted);
+
+ assert_se(m = mmap_cache_new());
+
+ mkdtemp_chdir_chattr(t);
+
+ assert_se(journal_file_open(-1, "test.journal", O_RDWR|O_CREAT, JOURNAL_COMPRESS, 0644,
+ UINT64_MAX, NULL, m, NULL, &f) == 0);
+
+ assert_se(seqnum = new0(uint64_t, n));
+ assert_se(offset = new0(uint64_t, n));
+
+ for (size_t i = 0; i < n; i++) {
+ append_number(f, i, NULL, seqnum + i, offset + i);
+ if (i == 0) {
+ assert_se(seqnum[i] > 0);
+ assert_se(offset[i] > 0);
+ } else {
+ assert_se(seqnum[i] > seqnum[i-1]);
+ assert_se(offset[i] > offset[i-1]);
+ }
+ }
+
+ verify(f, seqnum, offset, n);
+
+ /* Reset chain cache. */
+ assert_se(journal_file_move_to_entry_by_offset(f, offset[0], DIRECTION_DOWN, NULL, NULL) > 0);
+
+ /* make journal corrupted by clearing seqnum. */
+ for (size_t i = n - num_corrupted; i < n; i++) {
+ Object *o;
+
+ assert_se(journal_file_move_to_object(f, OBJECT_ENTRY, offset[i], &o) >= 0);
+ assert_se(o);
+ o->entry.seqnum = 0;
+ seqnum[i] = 0;
+ }
+
+ verify(f, seqnum, offset, n);
+
+ test_close(f);
+ test_done(t);
+}
+
+TEST(generic_array_bisect) {
+ for (size_t n = 1; n < 10; n++)
+ for (size_t m = 1; m <= n; m++)
+ test_generic_array_bisect_one(n, m);
+
+ test_generic_array_bisect_one(100, 40);
+}
+
+static int intro(void) {
+ /* journal_file_open() requires a valid machine id */
+ if (access("/etc/machine-id", F_OK) != 0)
+ return log_tests_skipped("/etc/machine-id not found");
+
+ arg_keep = saved_argc > 1;
+
+ return EXIT_SUCCESS;
+}
+
+DEFINE_TEST_MAIN_WITH_INTRO(LOG_DEBUG, intro);
diff --git a/src/libsystemd/sd-journal/test-journal-match.c b/src/libsystemd/sd-journal/test-journal-match.c
new file mode 100644
index 0000000..571a88c
--- /dev/null
+++ b/src/libsystemd/sd-journal/test-journal-match.c
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+
+#include <stdio.h>
+
+#include "sd-journal.h"
+
+#include "alloc-util.h"
+#include "journal-internal.h"
+#include "log.h"
+#include "string-util.h"
+#include "tests.h"
+
+int main(int argc, char *argv[]) {
+ _cleanup_(sd_journal_closep) sd_journal *j = NULL;
+ _cleanup_free_ char *t;
+
+ test_setup_logging(LOG_DEBUG);
+
+ assert_se(sd_journal_open(&j, 0) >= 0);
+
+ assert_se(sd_journal_add_match(j, "foobar", 0) < 0);
+ assert_se(sd_journal_add_match(j, "foobar=waldo", 0) < 0);
+ assert_se(sd_journal_add_match(j, "", 0) < 0);
+ assert_se(sd_journal_add_match(j, "=", 0) < 0);
+ assert_se(sd_journal_add_match(j, "=xxxxx", 0) < 0);
+ assert_se(sd_journal_add_match(j, (uint8_t[4]){'A', '=', '\1', '\2'}, 4) >= 0);
+ assert_se(sd_journal_add_match(j, (uint8_t[5]){'B', '=', 'C', '\0', 'D'}, 5) >= 0);
+ assert_se(sd_journal_add_match(j, "HALLO=WALDO", 0) >= 0);
+ assert_se(sd_journal_add_match(j, "QUUX=mmmm", 0) >= 0);
+ assert_se(sd_journal_add_match(j, "QUUX=xxxxx", 0) >= 0);
+ assert_se(sd_journal_add_match(j, "HALLO=", 0) >= 0);
+ assert_se(sd_journal_add_match(j, "QUUX=xxxxx", 0) >= 0);
+ assert_se(sd_journal_add_match(j, "QUUX=yyyyy", 0) >= 0);
+ assert_se(sd_journal_add_match(j, "PIFF=paff", 0) >= 0);
+
+ assert_se(sd_journal_add_disjunction(j) >= 0);
+
+ assert_se(sd_journal_add_match(j, "ONE=one", 0) >= 0);
+ assert_se(sd_journal_add_match(j, "ONE=two", 0) >= 0);
+ assert_se(sd_journal_add_match(j, "TWO=two", 0) >= 0);
+
+ assert_se(sd_journal_add_conjunction(j) >= 0);
+
+ assert_se(sd_journal_add_match(j, "L4_1=yes", 0) >= 0);
+ assert_se(sd_journal_add_match(j, "L4_1=ok", 0) >= 0);
+ assert_se(sd_journal_add_match(j, "L4_2=yes", 0) >= 0);
+ assert_se(sd_journal_add_match(j, "L4_2=ok", 0) >= 0);
+
+ assert_se(sd_journal_add_disjunction(j) >= 0);
+
+ assert_se(sd_journal_add_match(j, "L3=yes", 0) >= 0);
+ assert_se(sd_journal_add_match(j, "L3=ok", 0) >= 0);
+
+ assert_se(t = journal_make_match_string(j));
+
+ printf("resulting match expression is: %s\n", t);
+
+ assert_se(streq(t, "(((L3=ok OR L3=yes) OR ((L4_2=ok OR L4_2=yes) AND (L4_1=ok OR L4_1=yes))) AND ((TWO=two AND (ONE=two OR ONE=one)) OR (PIFF=paff AND (QUUX=yyyyy OR QUUX=xxxxx OR QUUX=mmmm) AND (HALLO= OR HALLO=WALDO) AND B=C\\000D AND A=\\001\\002)))"));
+
+ return 0;
+}
diff --git a/src/libsystemd/sd-journal/test-journal-send.c b/src/libsystemd/sd-journal/test-journal-send.c
new file mode 100644
index 0000000..ca1fe7c
--- /dev/null
+++ b/src/libsystemd/sd-journal/test-journal-send.c
@@ -0,0 +1,111 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+
+#include <errno.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include "sd-journal.h"
+
+#include "fileio.h"
+#include "journal-send.h"
+#include "macro.h"
+#include "memory-util.h"
+#include "tests.h"
+
+TEST(journal_print) {
+ assert_se(sd_journal_print(LOG_INFO, "XXX") == 0);
+ assert_se(sd_journal_print(LOG_INFO, "%s", "YYY") == 0);
+ assert_se(sd_journal_print(LOG_INFO, "X%4094sY", "ZZZ") == 0);
+ assert_se(sd_journal_print(LOG_INFO, "X%*sY", (int) LONG_LINE_MAX - 8 - 3, "ZZZ") == 0);
+ assert_se(sd_journal_print(LOG_INFO, "X%*sY", (int) LONG_LINE_MAX - 8 - 2, "ZZZ") == -ENOBUFS);
+}
+
+TEST(journal_send) {
+ _cleanup_free_ char *huge = NULL;
+
+#define HUGE_SIZE (4096*1024)
+ assert_se(huge = malloc(HUGE_SIZE));
+
+ /* utf-8 and non-utf-8, message-less and message-ful iovecs */
+ struct iovec graph1[] = {
+ {(char*) "GRAPH=graph", STRLEN("GRAPH=graph")}
+ };
+ struct iovec graph2[] = {
+ {(char*) "GRAPH=graph\n", STRLEN("GRAPH=graph\n")}
+ };
+ struct iovec message1[] = {
+ {(char*) "MESSAGE=graph", STRLEN("MESSAGE=graph")}
+ };
+ struct iovec message2[] = {
+ {(char*) "MESSAGE=graph\n", STRLEN("MESSAGE=graph\n")}
+ };
+
+ assert_se(sd_journal_print(LOG_INFO, "piepapo") == 0);
+
+ assert_se(sd_journal_send("MESSAGE=foobar",
+ "VALUE=%i", 7,
+ NULL) == 0);
+
+ errno = ENOENT;
+ assert_se(sd_journal_perror("Foobar") == 0);
+
+ assert_se(sd_journal_perror("") == 0);
+
+ memcpy(huge, "HUGE=", STRLEN("HUGE="));
+ memset(&huge[STRLEN("HUGE=")], 'x', HUGE_SIZE - STRLEN("HUGE=") - 1);
+ huge[HUGE_SIZE - 1] = '\0';
+
+ assert_se(sd_journal_send("MESSAGE=Huge field attached",
+ huge,
+ NULL) == 0);
+
+ assert_se(sd_journal_send("MESSAGE=uiui",
+ "VALUE=A",
+ "VALUE=B",
+ "VALUE=C",
+ "SINGLETON=1",
+ "OTHERVALUE=X",
+ "OTHERVALUE=Y",
+ "WITH_BINARY=this is a binary value \a",
+ NULL) == 0);
+
+ syslog(LOG_NOTICE, "Hello World!");
+
+ assert_se(sd_journal_print(LOG_NOTICE, "Hello World") == 0);
+
+ assert_se(sd_journal_send("MESSAGE=Hello World!",
+ "MESSAGE_ID=52fb62f99e2c49d89cfbf9d6de5e3555",
+ "PRIORITY=5",
+ "HOME=%s", getenv("HOME"),
+ "TERM=%s", getenv("TERM"),
+ "PAGE_SIZE=%li", sysconf(_SC_PAGESIZE),
+ "N_CPUS=%li", sysconf(_SC_NPROCESSORS_ONLN),
+ NULL) == 0);
+
+ assert_se(sd_journal_sendv(graph1, 1) == 0);
+ assert_se(sd_journal_sendv(graph2, 1) == 0);
+ assert_se(sd_journal_sendv(message1, 1) == 0);
+ assert_se(sd_journal_sendv(message2, 1) == 0);
+
+ /* test without location fields */
+#undef sd_journal_sendv
+ assert_se(sd_journal_sendv(graph1, 1) == 0);
+ assert_se(sd_journal_sendv(graph2, 1) == 0);
+ assert_se(sd_journal_sendv(message1, 1) == 0);
+ assert_se(sd_journal_sendv(message2, 1) == 0);
+
+ /* The above syslog() opens a fd which is stored in libc, and the valgrind reports the fd is
+ * leaked when we do not call closelog(). */
+ closelog();
+}
+
+static int outro(void) {
+ /* Sleep a bit to make it easy for journald to collect metadata. */
+ sleep(1);
+
+ close_journal_fd();
+
+ return EXIT_SUCCESS;
+}
+
+DEFINE_TEST_MAIN_FULL(LOG_INFO, NULL, outro);
diff --git a/src/libsystemd/sd-journal/test-journal-stream.c b/src/libsystemd/sd-journal/test-journal-stream.c
new file mode 100644
index 0000000..3a370ef
--- /dev/null
+++ b/src/libsystemd/sd-journal/test-journal-stream.c
@@ -0,0 +1,201 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+
+#include <fcntl.h>
+#include <unistd.h>
+
+#include "sd-journal.h"
+
+#include "alloc-util.h"
+#include "chattr-util.h"
+#include "iovec-util.h"
+#include "journal-file-util.h"
+#include "journal-internal.h"
+#include "log.h"
+#include "macro.h"
+#include "parse-util.h"
+#include "rm-rf.h"
+#include "tests.h"
+
+#define N_ENTRIES 200
+
+static void verify_contents(sd_journal *j, unsigned skip) {
+ unsigned i;
+
+ assert_se(j);
+
+ i = 0;
+ SD_JOURNAL_FOREACH(j) {
+ const void *d;
+ char *k, *c;
+ size_t l;
+ unsigned u = 0;
+
+ assert_se(sd_journal_get_cursor(j, &k) >= 0);
+ printf("cursor: %s\n", k);
+ free(k);
+
+ assert_se(sd_journal_get_data(j, "MAGIC", &d, &l) >= 0);
+ printf("\t%.*s\n", (int) l, (const char*) d);
+
+ assert_se(sd_journal_get_data(j, "NUMBER", &d, &l) >= 0);
+ assert_se(k = strndup(d, l));
+ printf("\t%s\n", k);
+
+ if (skip > 0) {
+ assert_se(safe_atou(k + 7, &u) >= 0);
+ assert_se(i == u);
+ i += skip;
+ }
+
+ free(k);
+
+ assert_se(sd_journal_get_cursor(j, &c) >= 0);
+ assert_se(sd_journal_test_cursor(j, c) > 0);
+ free(c);
+ }
+
+ if (skip > 0)
+ assert_se(i == N_ENTRIES);
+}
+
+static void run_test(void) {
+ _cleanup_(mmap_cache_unrefp) MMapCache *m = NULL;
+ JournalFile *one, *two, *three;
+ char t[] = "/var/tmp/journal-stream-XXXXXX";
+ unsigned i;
+ _cleanup_(sd_journal_closep) sd_journal *j = NULL;
+ char *z;
+ const void *data;
+ size_t l;
+ dual_timestamp previous_ts = DUAL_TIMESTAMP_NULL;
+
+ m = mmap_cache_new();
+ assert_se(m != NULL);
+
+ assert_se(mkdtemp(t));
+ assert_se(chdir(t) >= 0);
+ (void) chattr_path(t, FS_NOCOW_FL, FS_NOCOW_FL, NULL);
+
+ assert_se(journal_file_open(-1, "one.journal", O_RDWR|O_CREAT, JOURNAL_COMPRESS, 0666, UINT64_MAX, NULL, m, NULL, &one) == 0);
+ assert_se(journal_file_open(-1, "two.journal", O_RDWR|O_CREAT, JOURNAL_COMPRESS, 0666, UINT64_MAX, NULL, m, NULL, &two) == 0);
+ assert_se(journal_file_open(-1, "three.journal", O_RDWR|O_CREAT, JOURNAL_COMPRESS, 0666, UINT64_MAX, NULL, m, NULL, &three) == 0);
+
+ for (i = 0; i < N_ENTRIES; i++) {
+ char *p, *q;
+ dual_timestamp ts;
+ struct iovec iovec[2];
+
+ dual_timestamp_now(&ts);
+
+ if (ts.monotonic <= previous_ts.monotonic)
+ ts.monotonic = previous_ts.monotonic + 1;
+
+ if (ts.realtime <= previous_ts.realtime)
+ ts.realtime = previous_ts.realtime + 1;
+
+ previous_ts = ts;
+
+ assert_se(asprintf(&p, "NUMBER=%u", i) >= 0);
+ iovec[0] = IOVEC_MAKE(p, strlen(p));
+
+ assert_se(asprintf(&q, "MAGIC=%s", i % 5 == 0 ? "quux" : "waldo") >= 0);
+
+ iovec[1] = IOVEC_MAKE(q, strlen(q));
+
+ if (i % 10 == 0)
+ assert_se(journal_file_append_entry(three, &ts, NULL, iovec, 2, NULL, NULL, NULL, NULL) == 0);
+ else {
+ if (i % 3 == 0)
+ assert_se(journal_file_append_entry(two, &ts, NULL, iovec, 2, NULL, NULL, NULL, NULL) == 0);
+
+ assert_se(journal_file_append_entry(one, &ts, NULL, iovec, 2, NULL, NULL, NULL, NULL) == 0);
+ }
+
+ free(p);
+ free(q);
+ }
+
+ (void) journal_file_offline_close(one);
+ (void) journal_file_offline_close(two);
+ (void) journal_file_offline_close(three);
+
+ assert_se(sd_journal_open_directory(&j, t, 0) >= 0);
+
+ assert_se(sd_journal_add_match(j, "MAGIC=quux", 0) >= 0);
+ SD_JOURNAL_FOREACH_BACKWARDS(j) {
+ _cleanup_free_ char *c;
+
+ assert_se(sd_journal_get_data(j, "NUMBER", &data, &l) >= 0);
+ printf("\t%.*s\n", (int) l, (const char*) data);
+
+ assert_se(sd_journal_get_cursor(j, &c) >= 0);
+ assert_se(sd_journal_test_cursor(j, c) > 0);
+ }
+
+ SD_JOURNAL_FOREACH(j) {
+ _cleanup_free_ char *c;
+
+ assert_se(sd_journal_get_data(j, "NUMBER", &data, &l) >= 0);
+ printf("\t%.*s\n", (int) l, (const char*) data);
+
+ assert_se(sd_journal_get_cursor(j, &c) >= 0);
+ assert_se(sd_journal_test_cursor(j, c) > 0);
+ }
+
+ sd_journal_flush_matches(j);
+
+ verify_contents(j, 1);
+
+ printf("NEXT TEST\n");
+ assert_se(sd_journal_add_match(j, "MAGIC=quux", 0) >= 0);
+
+ assert_se(z = journal_make_match_string(j));
+ printf("resulting match expression is: %s\n", z);
+ free(z);
+
+ verify_contents(j, 5);
+
+ printf("NEXT TEST\n");
+ sd_journal_flush_matches(j);
+ assert_se(sd_journal_add_match(j, "MAGIC=waldo", 0) >= 0);
+ assert_se(sd_journal_add_match(j, "NUMBER=10", 0) >= 0);
+ assert_se(sd_journal_add_match(j, "NUMBER=11", 0) >= 0);
+ assert_se(sd_journal_add_match(j, "NUMBER=12", 0) >= 0);
+
+ assert_se(z = journal_make_match_string(j));
+ printf("resulting match expression is: %s\n", z);
+ free(z);
+
+ verify_contents(j, 0);
+
+ assert_se(sd_journal_query_unique(j, "NUMBER") >= 0);
+ SD_JOURNAL_FOREACH_UNIQUE(j, data, l)
+ printf("%.*s\n", (int) l, (const char*) data);
+
+ assert_se(rm_rf(t, REMOVE_ROOT|REMOVE_PHYSICAL) >= 0);
+}
+
+int main(int argc, char *argv[]) {
+
+ /* journal_file_open() requires a valid machine id */
+ if (access("/etc/machine-id", F_OK) != 0)
+ return log_tests_skipped("/etc/machine-id not found");
+
+ test_setup_logging(LOG_DEBUG);
+
+ /* Run this test multiple times with different configurations of features. */
+
+ assert_se(setenv("SYSTEMD_JOURNAL_KEYED_HASH", "0", 1) >= 0);
+ run_test();
+
+ assert_se(setenv("SYSTEMD_JOURNAL_KEYED_HASH", "1", 1) >= 0);
+ run_test();
+
+ assert_se(setenv("SYSTEMD_JOURNAL_COMPACT", "0", 1) >= 0);
+ run_test();
+
+ assert_se(setenv("SYSTEMD_JOURNAL_COMPACT", "1", 1) >= 0);
+ run_test();
+
+ return 0;
+}
diff --git a/src/libsystemd/sd-journal/test-journal-verify.c b/src/libsystemd/sd-journal/test-journal-verify.c
new file mode 100644
index 0000000..edce440
--- /dev/null
+++ b/src/libsystemd/sd-journal/test-journal-verify.c
@@ -0,0 +1,210 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+
+#include <fcntl.h>
+#include <stdio.h>
+#include <unistd.h>
+
+#include "chattr-util.h"
+#include "fd-util.h"
+#include "iovec-util.h"
+#include "journal-file-util.h"
+#include "journal-verify.h"
+#include "log.h"
+#include "mmap-cache.h"
+#include "rm-rf.h"
+#include "strv.h"
+#include "terminal-util.h"
+#include "tests.h"
+
+#define N_ENTRIES 6000
+#define RANDOM_RANGE 77
+
+static void bit_toggle(const char *fn, uint64_t p) {
+ uint8_t b;
+ ssize_t r;
+ int fd;
+
+ fd = open(fn, O_RDWR|O_CLOEXEC);
+ assert_se(fd >= 0);
+
+ r = pread(fd, &b, 1, p/8);
+ assert_se(r == 1);
+
+ b ^= 1 << (p % 8);
+
+ r = pwrite(fd, &b, 1, p/8);
+ assert_se(r == 1);
+
+ safe_close(fd);
+}
+
+static int raw_verify(const char *fn, const char *verification_key) {
+ _cleanup_(mmap_cache_unrefp) MMapCache *m = NULL;
+ JournalFile *f;
+ int r;
+
+ m = mmap_cache_new();
+ assert_se(m != NULL);
+
+ r = journal_file_open(
+ /* fd= */ -1,
+ fn,
+ O_RDONLY,
+ JOURNAL_COMPRESS|(verification_key ? JOURNAL_SEAL : 0),
+ 0666,
+ /* compress_threshold_bytes= */ UINT64_MAX,
+ /* metrics= */ NULL,
+ m,
+ /* template= */ NULL,
+ &f);
+ if (r < 0)
+ return r;
+
+ r = journal_file_verify(f, verification_key, NULL, NULL, NULL, false);
+ (void) journal_file_close(f);
+
+ return r;
+}
+
+static int run_test(const char *verification_key, ssize_t max_iterations) {
+ _cleanup_(mmap_cache_unrefp) MMapCache *m = NULL;
+ char t[] = "/var/tmp/journal-XXXXXX";
+ struct stat st;
+ JournalFile *f;
+ JournalFile *df;
+ usec_t from = 0, to = 0, total = 0;
+ uint64_t start, end;
+ int r;
+
+ m = mmap_cache_new();
+ assert_se(m != NULL);
+
+ /* journal_file_open() requires a valid machine id */
+ if (sd_id128_get_machine(NULL) < 0)
+ return log_tests_skipped("No valid machine ID found");
+
+ test_setup_logging(LOG_DEBUG);
+
+ assert_se(mkdtemp(t));
+ assert_se(chdir(t) >= 0);
+ (void) chattr_path(t, FS_NOCOW_FL, FS_NOCOW_FL, NULL);
+
+ log_info("Generating a test journal");
+
+ assert_se(journal_file_open(
+ /* fd= */ -1,
+ "test.journal",
+ O_RDWR|O_CREAT,
+ JOURNAL_COMPRESS|(verification_key ? JOURNAL_SEAL : 0),
+ 0666,
+ /* compress_threshold_bytes= */ UINT64_MAX,
+ /* metrics= */ NULL,
+ m,
+ /* template= */ NULL,
+ &df) == 0);
+
+ for (size_t n = 0; n < N_ENTRIES; n++) {
+ _cleanup_free_ char *test = NULL;
+ struct iovec iovec;
+ struct dual_timestamp ts;
+
+ dual_timestamp_now(&ts);
+ assert_se(asprintf(&test, "RANDOM=%li", random() % RANDOM_RANGE));
+ iovec = IOVEC_MAKE_STRING(test);
+ assert_se(journal_file_append_entry(
+ df,
+ &ts,
+ /* boot_id= */ NULL,
+ &iovec,
+ /* n_iovec= */ 1,
+ /* seqnum= */ NULL,
+ /* seqnum_id= */ NULL,
+ /* ret_object= */ NULL,
+ /* ret_offset= */ NULL) == 0);
+ }
+
+ (void) journal_file_offline_close(df);
+
+ log_info("Verifying with key: %s", strna(verification_key));
+
+ assert_se(journal_file_open(
+ /* fd= */ -1,
+ "test.journal",
+ O_RDONLY,
+ JOURNAL_COMPRESS|(verification_key ? JOURNAL_SEAL : 0),
+ 0666,
+ /* compress_threshold_bytes= */ UINT64_MAX,
+ /* metrics= */ NULL,
+ m,
+ /* template= */ NULL,
+ &f) == 0);
+ journal_file_print_header(f);
+ journal_file_dump(f);
+
+ assert_se(journal_file_verify(f, verification_key, &from, &to, &total, true) >= 0);
+
+ if (verification_key && JOURNAL_HEADER_SEALED(f->header))
+ log_info("=> Validated from %s to %s, %s missing",
+ FORMAT_TIMESTAMP(from),
+ FORMAT_TIMESTAMP(to),
+ FORMAT_TIMESPAN(total > to ? total - to : 0, 0));
+
+ (void) journal_file_close(f);
+ assert_se(stat("test.journal", &st) >= 0);
+
+ start = 38448 * 8 + 0;
+ end = max_iterations < 0 ? (uint64_t)st.st_size * 8 : start + max_iterations;
+ log_info("Toggling bits %"PRIu64 " to %"PRIu64, start, end);
+
+ for (uint64_t p = start; p < end; p++) {
+ bit_toggle("test.journal", p);
+
+ if (max_iterations < 0)
+ log_info("[ %"PRIu64"+%"PRIu64"]", p / 8, p % 8);
+
+ r = raw_verify("test.journal", verification_key);
+ /* Suppress the notice when running in the limited (CI) mode */
+ if (verification_key && max_iterations < 0 && r >= 0)
+ log_notice(ANSI_HIGHLIGHT_RED ">>>> %"PRIu64" (bit %"PRIu64") can be toggled without detection." ANSI_NORMAL, p / 8, p % 8);
+
+ bit_toggle("test.journal", p);
+ }
+
+ assert_se(rm_rf(t, REMOVE_ROOT|REMOVE_PHYSICAL) >= 0);
+
+ return 0;
+}
+
+int main(int argc, char *argv[]) {
+ const char *verification_key = NULL;
+ int max_iterations = 512;
+
+ if (argc > 1) {
+ /* Don't limit the number of iterations when the verification key
+ * is provided on the command line, we want to do that only in CIs */
+ verification_key = argv[1];
+ max_iterations = -1;
+ }
+
+ assert_se(setenv("SYSTEMD_JOURNAL_COMPACT", "0", 1) >= 0);
+ run_test(verification_key, max_iterations);
+
+ assert_se(setenv("SYSTEMD_JOURNAL_COMPACT", "1", 1) >= 0);
+ run_test(verification_key, max_iterations);
+
+#if HAVE_GCRYPT
+ /* If we're running without any arguments and we're compiled with gcrypt
+ * check the journal verification stuff with a valid key as well */
+ if (argc <= 1) {
+ verification_key = "c262bd-85187f-0b1b04-877cc5/1c7af8-35a4e900";
+
+ assert_se(setenv("SYSTEMD_JOURNAL_COMPACT", "0", 1) >= 0);
+ run_test(verification_key, max_iterations);
+
+ assert_se(setenv("SYSTEMD_JOURNAL_COMPACT", "1", 1) >= 0);
+ run_test(verification_key, max_iterations);
+ }
+#endif
+
+ return 0;
+}
diff --git a/src/libsystemd/sd-journal/test-journal.c b/src/libsystemd/sd-journal/test-journal.c
new file mode 100644
index 0000000..96f2b67
--- /dev/null
+++ b/src/libsystemd/sd-journal/test-journal.c
@@ -0,0 +1,280 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+
+#include <fcntl.h>
+#include <unistd.h>
+
+#include "chattr-util.h"
+#include "iovec-util.h"
+#include "journal-authenticate.h"
+#include "journal-file-util.h"
+#include "journal-vacuum.h"
+#include "log.h"
+#include "rm-rf.h"
+#include "tests.h"
+
+static bool arg_keep = false;
+
+static void mkdtemp_chdir_chattr(char *path) {
+ assert_se(mkdtemp(path));
+ assert_se(chdir(path) >= 0);
+
+ /* Speed up things a bit on btrfs, ensuring that CoW is turned off for all files created in our
+ * directory during the test run */
+ (void) chattr_path(path, FS_NOCOW_FL, FS_NOCOW_FL, NULL);
+}
+
+static void test_non_empty_one(void) {
+ _cleanup_(mmap_cache_unrefp) MMapCache *m = NULL;
+ dual_timestamp ts;
+ JournalFile *f;
+ struct iovec iovec;
+ static const char test[] = "TEST1=1", test2[] = "TEST2=2";
+ Object *o, *d;
+ uint64_t p;
+ sd_id128_t fake_boot_id;
+ char t[] = "/var/tmp/journal-XXXXXX";
+
+ m = mmap_cache_new();
+ assert_se(m != NULL);
+
+ mkdtemp_chdir_chattr(t);
+
+ assert_se(journal_file_open(-1, "test.journal", O_RDWR|O_CREAT, JOURNAL_COMPRESS|JOURNAL_SEAL, 0666, UINT64_MAX, NULL, m, NULL, &f) == 0);
+
+ assert_se(dual_timestamp_now(&ts));
+ assert_se(sd_id128_randomize(&fake_boot_id) == 0);
+
+ iovec = IOVEC_MAKE_STRING(test);
+ assert_se(journal_file_append_entry(f, &ts, NULL, &iovec, 1, NULL, NULL, NULL, NULL) == 0);
+
+ iovec = IOVEC_MAKE_STRING(test2);
+ assert_se(journal_file_append_entry(f, &ts, NULL, &iovec, 1, NULL, NULL, NULL, NULL) == 0);
+
+ iovec = IOVEC_MAKE_STRING(test);
+ assert_se(journal_file_append_entry(f, &ts, &fake_boot_id, &iovec, 1, NULL, NULL, NULL, NULL) == 0);
+
+#if HAVE_GCRYPT
+ journal_file_append_tag(f);
+#endif
+ journal_file_dump(f);
+
+ assert_se(journal_file_next_entry(f, 0, DIRECTION_DOWN, &o, &p) == 1);
+ assert_se(le64toh(o->entry.seqnum) == 1);
+
+ assert_se(journal_file_next_entry(f, p, DIRECTION_DOWN, &o, &p) == 1);
+ assert_se(le64toh(o->entry.seqnum) == 2);
+
+ assert_se(journal_file_next_entry(f, p, DIRECTION_DOWN, &o, &p) == 1);
+ assert_se(le64toh(o->entry.seqnum) == 3);
+ assert_se(sd_id128_equal(o->entry.boot_id, fake_boot_id));
+
+ assert_se(journal_file_next_entry(f, p, DIRECTION_DOWN, &o, &p) == 0);
+
+ assert_se(journal_file_next_entry(f, 0, DIRECTION_DOWN, &o, &p) == 1);
+ assert_se(le64toh(o->entry.seqnum) == 1);
+
+ assert_se(journal_file_find_data_object(f, test, strlen(test), &d, NULL) == 1);
+ assert_se(journal_file_move_to_entry_for_data(f, d, DIRECTION_DOWN, &o, NULL) == 1);
+ assert_se(le64toh(o->entry.seqnum) == 1);
+
+ assert_se(journal_file_move_to_entry_for_data(f, d, DIRECTION_UP, &o, NULL) == 1);
+ assert_se(le64toh(o->entry.seqnum) == 3);
+
+ assert_se(journal_file_find_data_object(f, test2, strlen(test2), &d, NULL) == 1);
+ assert_se(journal_file_move_to_entry_for_data(f, d, DIRECTION_UP, &o, NULL) == 1);
+ assert_se(le64toh(o->entry.seqnum) == 2);
+
+ assert_se(journal_file_move_to_entry_for_data(f, d, DIRECTION_DOWN, &o, NULL) == 1);
+ assert_se(le64toh(o->entry.seqnum) == 2);
+
+ assert_se(journal_file_find_data_object(f, "quux", 4, &d, NULL) == 0);
+
+ assert_se(journal_file_move_to_entry_by_seqnum(f, 1, DIRECTION_DOWN, &o, NULL) == 1);
+ assert_se(le64toh(o->entry.seqnum) == 1);
+
+ assert_se(journal_file_move_to_entry_by_seqnum(f, 3, DIRECTION_DOWN, &o, NULL) == 1);
+ assert_se(le64toh(o->entry.seqnum) == 3);
+
+ assert_se(journal_file_move_to_entry_by_seqnum(f, 2, DIRECTION_DOWN, &o, NULL) == 1);
+ assert_se(le64toh(o->entry.seqnum) == 2);
+
+ assert_se(journal_file_move_to_entry_by_seqnum(f, 10, DIRECTION_DOWN, &o, NULL) == 0);
+
+ journal_file_rotate(&f, m, JOURNAL_SEAL|JOURNAL_COMPRESS, UINT64_MAX, NULL);
+ journal_file_rotate(&f, m, JOURNAL_SEAL|JOURNAL_COMPRESS, UINT64_MAX, NULL);
+
+ (void) journal_file_offline_close(f);
+
+ log_info("Done...");
+
+ if (arg_keep)
+ log_info("Not removing %s", t);
+ else {
+ journal_directory_vacuum(".", 3000000, 0, 0, NULL, true);
+
+ assert_se(rm_rf(t, REMOVE_ROOT|REMOVE_PHYSICAL) >= 0);
+ }
+
+ puts("------------------------------------------------------------");
+}
+
+TEST(non_empty) {
+ assert_se(setenv("SYSTEMD_JOURNAL_COMPACT", "0", 1) >= 0);
+ test_non_empty_one();
+
+ assert_se(setenv("SYSTEMD_JOURNAL_COMPACT", "1", 1) >= 0);
+ test_non_empty_one();
+}
+
+static void test_empty_one(void) {
+ _cleanup_(mmap_cache_unrefp) MMapCache *m = NULL;
+ JournalFile *f1, *f2, *f3, *f4;
+ char t[] = "/var/tmp/journal-XXXXXX";
+
+ m = mmap_cache_new();
+ assert_se(m != NULL);
+
+ mkdtemp_chdir_chattr(t);
+
+ assert_se(journal_file_open(-1, "test.journal", O_RDWR|O_CREAT, 0, 0666, UINT64_MAX, NULL, m, NULL, &f1) == 0);
+ assert_se(journal_file_open(-1, "test-compress.journal", O_RDWR|O_CREAT, JOURNAL_COMPRESS, 0666, UINT64_MAX, NULL, m, NULL, &f2) == 0);
+ assert_se(journal_file_open(-1, "test-seal.journal", O_RDWR|O_CREAT, JOURNAL_SEAL, 0666, UINT64_MAX, NULL, m, NULL, &f3) == 0);
+ assert_se(journal_file_open(-1, "test-seal-compress.journal", O_RDWR|O_CREAT, JOURNAL_COMPRESS|JOURNAL_SEAL, 0666, UINT64_MAX, NULL, m, NULL, &f4) == 0);
+
+ journal_file_print_header(f1);
+ puts("");
+ journal_file_print_header(f2);
+ puts("");
+ journal_file_print_header(f3);
+ puts("");
+ journal_file_print_header(f4);
+ puts("");
+
+ log_info("Done...");
+
+ if (arg_keep)
+ log_info("Not removing %s", t);
+ else {
+ journal_directory_vacuum(".", 3000000, 0, 0, NULL, true);
+
+ assert_se(rm_rf(t, REMOVE_ROOT|REMOVE_PHYSICAL) >= 0);
+ }
+
+ (void) journal_file_offline_close(f1);
+ (void) journal_file_offline_close(f2);
+ (void) journal_file_offline_close(f3);
+ (void) journal_file_offline_close(f4);
+}
+
+TEST(empty) {
+ assert_se(setenv("SYSTEMD_JOURNAL_COMPACT", "0", 1) >= 0);
+ test_empty_one();
+
+ assert_se(setenv("SYSTEMD_JOURNAL_COMPACT", "1", 1) >= 0);
+ test_empty_one();
+}
+
+#if HAVE_COMPRESSION
+static bool check_compressed(uint64_t compress_threshold, uint64_t data_size) {
+ _cleanup_(mmap_cache_unrefp) MMapCache *m = NULL;
+ dual_timestamp ts;
+ JournalFile *f;
+ struct iovec iovec;
+ Object *o;
+ uint64_t p;
+ char t[] = "/var/tmp/journal-XXXXXX";
+ char data[2048] = "FIELD=";
+ bool is_compressed;
+ int r;
+
+ assert_se(data_size <= sizeof(data));
+
+ m = mmap_cache_new();
+ assert_se(m != NULL);
+
+ mkdtemp_chdir_chattr(t);
+
+ assert_se(journal_file_open(-1, "test.journal", O_RDWR|O_CREAT, JOURNAL_COMPRESS|JOURNAL_SEAL, 0666, compress_threshold, NULL, m, NULL, &f) == 0);
+
+ dual_timestamp_now(&ts);
+
+ iovec = IOVEC_MAKE(data, data_size);
+ assert_se(journal_file_append_entry(f, &ts, NULL, &iovec, 1, NULL, NULL, NULL, NULL) == 0);
+
+#if HAVE_GCRYPT
+ journal_file_append_tag(f);
+#endif
+ journal_file_dump(f);
+
+ /* We have to partially reimplement some of the dump logic, because the normal next_entry does the
+ * decompression for us. */
+ p = le64toh(f->header->header_size);
+ for (;;) {
+ r = journal_file_move_to_object(f, OBJECT_UNUSED, p, &o);
+ assert_se(r == 0);
+ if (o->object.type == OBJECT_DATA)
+ break;
+
+ assert_se(p < le64toh(f->header->tail_object_offset));
+ p = p + ALIGN64(le64toh(o->object.size));
+ }
+
+ is_compressed = COMPRESSION_FROM_OBJECT(o) != COMPRESSION_NONE;
+
+ (void) journal_file_offline_close(f);
+
+ log_info("Done...");
+
+ if (arg_keep)
+ log_info("Not removing %s", t);
+ else {
+ journal_directory_vacuum(".", 3000000, 0, 0, NULL, true);
+
+ assert_se(rm_rf(t, REMOVE_ROOT|REMOVE_PHYSICAL) >= 0);
+ }
+
+ puts("------------------------------------------------------------");
+
+ return is_compressed;
+}
+
+static void test_min_compress_size_one(void) {
+ /* Note that XZ will actually fail to compress anything under 80 bytes, so you have to choose the limits
+ * carefully */
+
+ /* DEFAULT_MIN_COMPRESS_SIZE is 512 */
+ assert_se(!check_compressed(UINT64_MAX, 255));
+ assert_se(check_compressed(UINT64_MAX, 513));
+
+ /* compress everything */
+ assert_se(check_compressed(0, 96));
+ assert_se(check_compressed(8, 96));
+
+ /* Ensure we don't try to compress less than 8 bytes */
+ assert_se(!check_compressed(0, 7));
+
+ /* check boundary conditions */
+ assert_se(check_compressed(256, 256));
+ assert_se(!check_compressed(256, 255));
+}
+
+TEST(min_compress_size) {
+ assert_se(setenv("SYSTEMD_JOURNAL_COMPACT", "0", 1) >= 0);
+ test_min_compress_size_one();
+
+ assert_se(setenv("SYSTEMD_JOURNAL_COMPACT", "1", 1) >= 0);
+ test_min_compress_size_one();
+}
+#endif
+
+static int intro(void) {
+ arg_keep = saved_argc > 1;
+
+ /* journal_file_open() requires a valid machine id */
+ if (access("/etc/machine-id", F_OK) != 0)
+ return log_tests_skipped("/etc/machine-id not found");
+
+ return EXIT_SUCCESS;
+}
+
+DEFINE_TEST_MAIN_WITH_INTRO(LOG_DEBUG, intro);
diff --git a/src/libsystemd/sd-journal/test-mmap-cache.c b/src/libsystemd/sd-journal/test-mmap-cache.c
new file mode 100644
index 0000000..ce5ea12
--- /dev/null
+++ b/src/libsystemd/sd-journal/test-mmap-cache.c
@@ -0,0 +1,68 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
+
+#include <fcntl.h>
+#include <stdlib.h>
+#include <sys/mman.h>
+#include <unistd.h>
+
+#include "fd-util.h"
+#include "macro.h"
+#include "mmap-cache.h"
+#include "tests.h"
+#include "tmpfile-util.h"
+
+int main(int argc, char *argv[]) {
+ MMapFileDescriptor *fx;
+ int x, y, z, r;
+ char px[] = "/tmp/testmmapXXXXXXX", py[] = "/tmp/testmmapYXXXXXX", pz[] = "/tmp/testmmapZXXXXXX";
+ MMapCache *m;
+ void *p, *q;
+
+ test_setup_logging(LOG_DEBUG);
+
+ assert_se(m = mmap_cache_new());
+
+ x = mkostemp_safe(px);
+ assert_se(x >= 0);
+ (void) unlink(px);
+
+ assert_se(mmap_cache_add_fd(m, x, PROT_READ, &fx) > 0);
+
+ y = mkostemp_safe(py);
+ assert_se(y >= 0);
+ (void) unlink(py);
+
+ z = mkostemp_safe(pz);
+ assert_se(z >= 0);
+ (void) unlink(pz);
+
+ r = mmap_cache_fd_get(fx, 0, false, 1, 2, NULL, &p);
+ assert_se(r >= 0);
+
+ r = mmap_cache_fd_get(fx, 0, false, 2, 2, NULL, &q);
+ assert_se(r >= 0);
+
+ assert_se((uint8_t*) p + 1 == (uint8_t*) q);
+
+ r = mmap_cache_fd_get(fx, 1, false, 3, 2, NULL, &q);
+ assert_se(r >= 0);
+
+ assert_se((uint8_t*) p + 2 == (uint8_t*) q);
+
+ r = mmap_cache_fd_get(fx, 0, false, 16ULL*1024ULL*1024ULL, 2, NULL, &p);
+ assert_se(r >= 0);
+
+ r = mmap_cache_fd_get(fx, 1, false, 16ULL*1024ULL*1024ULL+1, 2, NULL, &q);
+ assert_se(r >= 0);
+
+ assert_se((uint8_t*) p + 1 == (uint8_t*) q);
+
+ mmap_cache_fd_free(fx);
+ mmap_cache_unref(m);
+
+ safe_close(x);
+ safe_close(y);
+ safe_close(z);
+
+ return 0;
+}