summaryrefslogtreecommitdiffstats
path: root/src/journal
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--src/journal-remote/browse.html547
-rw-r--r--src/journal-remote/journal-gatewayd.c1045
-rw-r--r--src/journal-remote/journal-remote-main.c1158
-rw-r--r--src/journal-remote/journal-remote-parse.c84
-rw-r--r--src/journal-remote/journal-remote-parse.h20
-rw-r--r--src/journal-remote/journal-remote-write.c105
-rw-r--r--src/journal-remote/journal-remote-write.h39
-rw-r--r--src/journal-remote/journal-remote.c533
-rw-r--r--src/journal-remote/journal-remote.conf.in19
-rw-r--r--src/journal-remote/journal-remote.h65
-rw-r--r--src/journal-remote/journal-upload-journal.c414
-rw-r--r--src/journal-remote/journal-upload.c854
-rw-r--r--src/journal-remote/journal-upload.conf.in18
-rw-r--r--src/journal-remote/journal-upload.h71
-rwxr-xr-xsrc/journal-remote/log-generator.py78
-rw-r--r--src/journal-remote/meson.build71
-rw-r--r--src/journal-remote/microhttpd-util.c312
-rw-r--r--src/journal-remote/microhttpd-util.h78
-rw-r--r--src/journal/audit-type.c6
-rw-r--r--src/journal/audit-type.h23
-rw-r--r--src/journal/audit_type-to-name.awk9
-rw-r--r--src/journal/cat.c168
-rw-r--r--src/journal/catalog.c743
-rw-r--r--src/journal/catalog.h18
-rw-r--r--src/journal/compress.c677
-rw-r--r--src/journal/compress.h67
-rw-r--r--src/journal/fsprg.c378
-rw-r--r--src/journal/fsprg.h62
-rwxr-xr-xsrc/journal/generate-audit_type-list.sh15
-rw-r--r--src/journal/journal-authenticate.c534
-rw-r--r--src/journal/journal-authenticate.h23
-rw-r--r--src/journal/journal-def.h230
-rw-r--r--src/journal/journal-file.c3908
-rw-r--r--src/journal/journal-file.h261
-rw-r--r--src/journal/journal-internal.h128
-rw-r--r--src/journal/journal-qrcode.c121
-rw-r--r--src/journal/journal-qrcode.h9
-rw-r--r--src/journal/journal-send.c543
-rw-r--r--src/journal/journal-vacuum.c321
-rw-r--r--src/journal/journal-vacuum.h9
-rw-r--r--src/journal/journal-verify.c1320
-rw-r--r--src/journal/journal-verify.h6
-rw-r--r--src/journal/journalctl.c2706
-rw-r--r--src/journal/journald-audit.c545
-rw-r--r--src/journal/journald-audit.h11
-rw-r--r--src/journal/journald-console.c103
-rw-r--r--src/journal/journald-console.h6
-rw-r--r--src/journal/journald-context.c782
-rw-r--r--src/journal/journald-context.h98
-rw-r--r--src/journal/journald-gperf.gperf51
-rw-r--r--src/journal/journald-kmsg.c450
-rw-r--r--src/journal/journald-kmsg.h13
-rw-r--r--src/journal/journald-native.c502
-rw-r--r--src/journal/journald-native.h23
-rw-r--r--src/journal/journald-rate-limit.c255
-rw-r--r--src/journal/journald-rate-limit.h10
-rw-r--r--src/journal/journald-server.c2192
-rw-r--r--src/journal/journald-server.h214
-rw-r--r--src/journal/journald-stream.c876
-rw-r--r--src/journal/journald-stream.h15
-rw-r--r--src/journal/journald-syslog.c516
-rw-r--r--src/journal/journald-syslog.h15
-rw-r--r--src/journal/journald-wall.c54
-rw-r--r--src/journal/journald-wall.h6
-rw-r--r--src/journal/journald.c112
-rw-r--r--src/journal/journald.conf43
-rw-r--r--src/journal/lookup3.c1005
-rw-r--r--src/journal/lookup3.h22
-rw-r--r--src/journal/meson.build130
-rw-r--r--src/journal/mmap-cache.c668
-rw-r--r--src/journal/mmap-cache.h34
-rw-r--r--src/journal/sd-journal.c3137
-rw-r--r--src/journal/test-audit-type.c25
-rw-r--r--src/journal/test-catalog.c240
-rw-r--r--src/journal/test-compress-benchmark.c175
-rw-r--r--src/journal/test-compress.c341
-rw-r--r--src/journal/test-journal-config.c53
-rw-r--r--src/journal/test-journal-enum.c37
-rw-r--r--src/journal/test-journal-flush.c58
-rw-r--r--src/journal/test-journal-init.c48
-rw-r--r--src/journal/test-journal-interleaving.c288
-rw-r--r--src/journal/test-journal-match.c62
-rw-r--r--src/journal/test-journal-send.c85
-rw-r--r--src/journal/test-journal-stream.c180
-rw-r--r--src/journal/test-journal-syslog.c59
-rw-r--r--src/journal/test-journal-verify.c134
-rw-r--r--src/journal/test-journal.c252
-rw-r--r--src/journal/test-mmap-cache.c66
88 files changed, 31787 insertions, 0 deletions
diff --git a/src/journal-remote/browse.html b/src/journal-remote/browse.html
new file mode 100644
index 0000000..e5162d1
--- /dev/null
+++ b/src/journal-remote/browse.html
@@ -0,0 +1,547 @@
+<!DOCTYPE html>
+<html>
+<head>
+ <title>Journal</title>
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8"/>
+ <style type="text/css">
+ div#divlogs, div#diventry {
+ font-family: monospace;
+ font-size: 7pt;
+ background-color: #ffffff;
+ padding: 1em;
+ margin: 2em 0em;
+ border-radius: 10px 10px 10px 10px;
+ border: 1px solid threedshadow;
+ white-space: nowrap;
+ overflow-x: scroll;
+ }
+ div#diventry {
+ display: none;
+ }
+ div#divlogs {
+ display: block;
+ }
+ body {
+ background-color: #ededed;
+ color: #313739;
+ font: message-box;
+ margin: 3em;
+ }
+ td.timestamp {
+ text-align: right;
+ border-right: 1px dotted lightgrey;
+ padding-right: 5px;
+ }
+ td.process {
+ border-right: 1px dotted lightgrey;
+ padding-left: 5px;
+ padding-right: 5px;
+ }
+ td.message {
+ padding-left: 5px;
+ }
+ td.message > a:link, td.message > a:visited {
+ text-decoration: none;
+ color: #313739;
+ }
+ td.message-error {
+ padding-left: 5px;
+ color: red;
+ font-weight: bold;
+ }
+ td.message-error > a:link, td.message-error > a:visited {
+ text-decoration: none;
+ color: red;
+ }
+ td.message-highlight {
+ padding-left: 5px;
+ font-weight: bold;
+ }
+ td.message-highlight > a:link, td.message-highlight > a:visited {
+ text-decoration: none;
+ color: #313739;
+ }
+ td > a:hover, td > a:active {
+ text-decoration: underline;
+ color: #c13739;
+ }
+ table#tablelogs, table#tableentry {
+ border-collapse: collapse;
+ }
+ td.field {
+ text-align: right;
+ border-right: 1px dotted lightgrey;
+ padding-right: 5px;
+ }
+ td.data {
+ padding-left: 5px;
+ }
+ div#keynav {
+ text-align: center;
+ font-size: 7pt;
+ color: #818789;
+ padding-top: 2em;
+ }
+ span.key {
+ font-weight: bold;
+ color: #313739;
+ }
+ div#buttonnav {
+ text-align: center;
+ }
+ button {
+ font-size: 18pt;
+ font-weight: bold;
+ width: 2em;
+ height: 2em;
+ }
+ div#filternav {
+ text-align: center;
+ }
+ select {
+ width: 50em;
+ }
+ </style>
+</head>
+
+<body>
+ <!-- TODO:
+ - live display
+ - show red lines for reboots -->
+
+ <h1 id="title"></h1>
+
+ <div id="os"></div>
+ <div id="virtualization"></div>
+ <div id="cutoff"></div>
+ <div id="machine"></div>
+ <div id="usage"></div>
+ <div id="showing"></div>
+
+ <div id="filternav">
+ <select id="filter" onchange="onFilterChange(this);" onfocus="onFilterFocus(this);">
+ <option>No filter</option>
+ </select>
+ &nbsp;&nbsp;&nbsp;&nbsp;
+ <input id="boot" type="checkbox" onchange="onBootChange(this);">Only current boot</input>
+ </div>
+
+ <div id="divlogs"><table id="tablelogs"></table></div>
+ <a name="entry"></a>
+ <div id="diventry"><table id="tableentry"></table></div>
+
+ <div id="buttonnav">
+ <button id="head" onclick="entriesLoadHead();" title="First Page">&#8676;</button>
+ <button id="previous" type="button" onclick="entriesLoadPrevious();" title="Previous Page"/>&#8592;</button>
+ <button id="next" type="button" onclick="entriesLoadNext();" title="Next Page"/>&#8594;</button>
+ <button id="tail" type="button" onclick="entriesLoadTail();" title="Last Page"/>&#8677;</button>
+ &nbsp;&nbsp;&nbsp;&nbsp;
+ <button id="more" type="button" onclick="entriesMore();" title="More Entries"/>+</button>
+ <button id="less" type="button" onclick="entriesLess();" title="Fewer Entries"/>-</button>
+ </div>
+
+ <div id="keynav">
+ <span class="key">g</span>: First Page &nbsp;&nbsp;&nbsp;&nbsp;
+ <span class="key">&#8592;, k, BACKSPACE</span>: Previous Page &nbsp;&nbsp;&nbsp;&nbsp;
+ <span class="key">&#8594;, j, SPACE</span>: Next Page &nbsp;&nbsp;&nbsp;&nbsp;
+ <span class="key">G</span>: Last Page &nbsp;&nbsp;&nbsp;&nbsp;
+ <span class="key">+</span>: More entries &nbsp;&nbsp;&nbsp;&nbsp;
+ <span class="key">-</span>: Fewer entries
+ </div>
+
+ <script type="text/javascript">
+ var first_cursor = null;
+ var last_cursor = null;
+
+ function getNEntries() {
+ var n;
+ n = localStorage["n_entries"];
+ if (n == null)
+ return 50;
+ n = parseInt(n);
+ if (n < 10)
+ return 10;
+ if (n > 1000)
+ return 1000;
+ return n;
+ }
+
+ function showNEntries(n) {
+ var showing = document.getElementById("showing");
+ showing.innerHTML = "Showing <b>" + n.toString() + "</b> entries.";
+ }
+
+ function setNEntries(n) {
+ if (n < 10)
+ return 10;
+ if (n > 1000)
+ return 1000;
+ localStorage["n_entries"] = n.toString();
+ showNEntries(n);
+ }
+
+ function machineLoad() {
+ var request = new XMLHttpRequest();
+ request.open("GET", "/machine");
+ request.onreadystatechange = machineOnResult;
+ request.setRequestHeader("Accept", "application/json");
+ request.send(null);
+ }
+
+ function formatBytes(u) {
+ if (u >= 1024*1024*1024*1024)
+ return (u/1024/1024/1024/1024).toFixed(1) + " TiB";
+ else if (u >= 1024*1024*1024)
+ return (u/1024/1024/1024).toFixed(1) + " GiB";
+ else if (u >= 1024*1024)
+ return (u/1024/1024).toFixed(1) + " MiB";
+ else if (u >= 1024)
+ return (u/1024).toFixed(1) + " KiB";
+ else
+ return u.toString() + " B";
+ }
+
+ function escapeHTML(s) {
+ return s.replace(/&/g, "&amp;").replace(/</g, "&lt;").replace(/>/g, "&gt;");
+ }
+
+ function machineOnResult(event) {
+ if ((event.currentTarget.readyState != 4) ||
+ (event.currentTarget.status != 200 && event.currentTarget.status != 0))
+ return;
+
+ var d = JSON.parse(event.currentTarget.responseText);
+
+ var title = document.getElementById("title");
+ title.innerHTML = 'Journal of ' + escapeHTML(d.hostname);
+ document.title = 'Journal of ' + escapeHTML(d.hostname);
+
+ var machine = document.getElementById("machine");
+ machine.innerHTML = 'Machine ID is <b>' + d.machine_id + '</b>, current boot ID is <b>' + d.boot_id + '</b>.';
+
+ var cutoff = document.getElementById("cutoff");
+ var from = new Date(parseInt(d.cutoff_from_realtime) / 1000);
+ var to = new Date(parseInt(d.cutoff_to_realtime) / 1000);
+ cutoff.innerHTML = 'Journal begins at <b>' + from.toLocaleString() + '</b> and ends at <b>' + to.toLocaleString() + '</b>.';
+
+ var usage = document.getElementById("usage");
+ usage.innerHTML = 'Disk usage is <b>' + formatBytes(parseInt(d.usage)) + '</b>.';
+
+ var os = document.getElementById("os");
+ os.innerHTML = 'Operating system is <b>' + escapeHTML(d.os_pretty_name) + '</b>.';
+
+ var virtualization = document.getElementById("virtualization");
+ virtualization.innerHTML = d.virtualization == "bare" ? "Running on <b>bare metal</b>." : "Running on virtualization <b>" + escapeHTML(d.virtualization) + "</b>.";
+ }
+
+ function entriesLoad(range) {
+
+ if (range == null) {
+ if (localStorage["cursor"] != null && localStorage["cursor"] != "")
+ range = localStorage["cursor"] + ":0";
+ else
+ range = "";
+ }
+
+ var url = "/entries";
+
+ if (localStorage["filter"] != "" && localStorage["filter"] != null) {
+ url += "?_SYSTEMD_UNIT=" + escape(localStorage["filter"]);
+
+ if (localStorage["boot"] == "1")
+ url += "&boot";
+ } else {
+ if (localStorage["boot"] == "1")
+ url += "?boot";
+ }
+
+ var request = new XMLHttpRequest();
+ request.open("GET", url);
+ request.onreadystatechange = entriesOnResult;
+ request.setRequestHeader("Accept", "application/json");
+ request.setRequestHeader("Range", "entries=" + range + ":" + getNEntries().toString());
+ request.send(null);
+ }
+
+ function entriesLoadNext() {
+ if (last_cursor == null)
+ entriesLoad("");
+ else
+ entriesLoad(last_cursor + ":1");
+ }
+
+ function entriesLoadPrevious() {
+ if (first_cursor == null)
+ entriesLoad("");
+ else
+ entriesLoad(first_cursor + ":-" + getNEntries().toString());
+ }
+
+ function entriesLoadHead() {
+ entriesLoad("");
+ }
+
+ function entriesLoadTail() {
+ entriesLoad(":-" + getNEntries().toString());
+ }
+
+ function entriesOnResult(event) {
+
+ if ((event.currentTarget.readyState != 4) ||
+ (event.currentTarget.status != 200 && event.currentTarget.status != 0))
+ return;
+
+ var logs = document.getElementById("tablelogs");
+
+ var lc = null;
+ var fc = null;
+
+ var i, l = event.currentTarget.responseText.split('\n');
+
+ if (l.length <= 1) {
+ logs.innerHTML = '<tbody><tr><td colspan="3"><i>No further entries...</i></td></tr></tbody>';
+ return;
+ }
+
+ var buf = '';
+
+ for (i in l) {
+ if (l[i] == '')
+ continue;
+
+ var d = JSON.parse(l[i]);
+ if (d.MESSAGE == undefined || d.__CURSOR == undefined)
+ continue;
+
+ if (fc == null)
+ fc = d.__CURSOR;
+ lc = d.__CURSOR;
+
+ var priority;
+ if (d.PRIORITY != undefined)
+ priority = parseInt(d.PRIORITY);
+ else
+ priority = 6;
+
+ var clazz;
+ if (priority <= 3)
+ clazz = "message-error";
+ else if (priority <= 5)
+ clazz = "message-highlight";
+ else
+ clazz = "message";
+
+ buf += '<tr><td class="timestamp">';
+
+ if (d.__REALTIME_TIMESTAMP != undefined) {
+ var timestamp = new Date(parseInt(d.__REALTIME_TIMESTAMP) / 1000);
+ buf += timestamp.toLocaleString();
+ }
+
+ buf += '</td><td class="process">';
+
+ if (d.SYSLOG_IDENTIFIER != undefined)
+ buf += escapeHTML(d.SYSLOG_IDENTIFIER);
+ else if (d._COMM != undefined)
+ buf += escapeHTML(d._COMM);
+
+ if (d._PID != undefined)
+ buf += "[" + escapeHTML(d._PID) + "]";
+ else if (d.SYSLOG_PID != undefined)
+ buf += "[" + escapeHTML(d.SYSLOG_PID) + "]";
+
+ buf += '</td><td class="' + clazz + '"><a href="#entry" onclick="onMessageClick(\'' + d.__CURSOR + '\');">';
+
+ if (d.MESSAGE == null)
+ buf += "[blob data]";
+ else if (d.MESSAGE instanceof Array)
+ buf += "[" + formatBytes(d.MESSAGE.length) + " blob data]";
+ else
+ buf += escapeHTML(d.MESSAGE);
+
+ buf += '</a></td></tr>';
+ }
+
+ logs.innerHTML = '<tbody>' + buf + '</tbody>';
+
+ if (fc != null) {
+ first_cursor = fc;
+ localStorage["cursor"] = fc;
+ }
+ if (lc != null)
+ last_cursor = lc;
+ }
+
+ function entriesMore() {
+ setNEntries(getNEntries() + 10);
+ entriesLoad(first_cursor);
+ }
+
+ function entriesLess() {
+ setNEntries(getNEntries() - 10);
+ entriesLoad(first_cursor);
+ }
+
+ function onResultMessageClick(event) {
+ if ((event.currentTarget.readyState != 4) ||
+ (event.currentTarget.status != 200 && event.currentTarget.status != 0))
+ return;
+
+ var d = JSON.parse(event.currentTarget.responseText);
+
+ document.getElementById("diventry").style.display = "block";
+ var entry = document.getElementById("tableentry");
+
+ var buf = "";
+ for (var key in d) {
+ var data = d[key];
+
+ if (data == null)
+ data = "[blob data]";
+ else if (data instanceof Array)
+ data = "[" + formatBytes(data.length) + " blob data]";
+ else
+ data = escapeHTML(data);
+
+ buf += '<tr><td class="field">' + key + '</td><td class="data">' + data + '</td></tr>';
+ }
+ entry.innerHTML = '<tbody>' + buf + '</tbody>';
+ }
+
+ function onMessageClick(t) {
+ var request = new XMLHttpRequest();
+ request.open("GET", "/entries?discrete");
+ request.onreadystatechange = onResultMessageClick;
+ request.setRequestHeader("Accept", "application/json");
+ request.setRequestHeader("Range", "entries=" + t + ":0:1");
+ request.send(null);
+ }
+
+ function onKeyUp(event) {
+ switch (event.keyCode) {
+ case 8:
+ case 37:
+ case 75:
+ entriesLoadPrevious();
+ break;
+ case 32:
+ case 39:
+ case 74:
+ entriesLoadNext();
+ break;
+
+ case 71:
+ if (event.shiftKey)
+ entriesLoadTail();
+ else
+ entriesLoadHead();
+ break;
+ case 171:
+ entriesMore();
+ break;
+ case 173:
+ entriesLess();
+ break;
+ }
+ }
+
+ function onMouseWheel(event) {
+ if (event.detail < 0 || event.wheelDelta > 0)
+ entriesLoadPrevious();
+ else
+ entriesLoadNext();
+ }
+
+ function onResultFilterFocus(event) {
+ if ((event.currentTarget.readyState != 4) ||
+ (event.currentTarget.status != 200 && event.currentTarget.status != 0))
+ return;
+
+ var f = document.getElementById("filter");
+
+ var l = event.currentTarget.responseText.split('\n');
+ var buf = '<option>No filter</option>';
+ var j = -1;
+
+ for (i in l) {
+
+ if (l[i] == '')
+ continue;
+
+ var d = JSON.parse(l[i]);
+ if (d._SYSTEMD_UNIT == undefined)
+ continue;
+
+ buf += '<option value="' + escape(d._SYSTEMD_UNIT) + '">' + escapeHTML(d._SYSTEMD_UNIT) + '</option>';
+
+ if (d._SYSTEMD_UNIT == localStorage["filter"])
+ j = i;
+ }
+
+ if (j < 0) {
+ if (localStorage["filter"] != null && localStorage["filter"] != "") {
+ buf += '<option value="' + escape(localStorage["filter"]) + '">' + escapeHTML(localStorage["filter"]) + '</option>';
+ j = i + 1;
+ } else
+ j = 0;
+ }
+
+ f.innerHTML = buf;
+ f.selectedIndex = j;
+ }
+
+ function onFilterFocus(w) {
+ var request = new XMLHttpRequest();
+ request.open("GET", "/fields/_SYSTEMD_UNIT");
+ request.onreadystatechange = onResultFilterFocus;
+ request.setRequestHeader("Accept", "application/json");
+ request.send(null);
+ }
+
+ function onFilterChange(w) {
+ if (w.selectedIndex <= 0)
+ localStorage["filter"] = "";
+ else
+ localStorage["filter"] = unescape(w.options[w.selectedIndex].value);
+
+ entriesLoadHead();
+ }
+
+ function onBootChange(w) {
+ localStorage["boot"] = w.checked ? "1" : "0";
+ entriesLoadHead();
+ }
+
+ function initFilter() {
+ var f = document.getElementById("filter");
+
+ var buf = '<option>No filter</option>';
+
+ var filter = localStorage["filter"];
+ var j;
+ if (filter != null && filter != "") {
+ buf += '<option value="' + escape(filter) + '">' + escapeHTML(filter) + '</option>';
+ j = 1;
+ } else
+ j = 0;
+
+ f.innerHTML = buf;
+ f.selectedIndex = j;
+ }
+
+ function installHandlers() {
+ document.onkeyup = onKeyUp;
+
+ var logs = document.getElementById("divlogs");
+ logs.addEventListener("mousewheel", onMouseWheel, false);
+ logs.addEventListener("DOMMouseScroll", onMouseWheel, false);
+ }
+
+ machineLoad();
+ entriesLoad(null);
+ showNEntries(getNEntries());
+ initFilter();
+ installHandlers();
+ </script>
+</body>
+</html>
diff --git a/src/journal-remote/journal-gatewayd.c b/src/journal-remote/journal-gatewayd.c
new file mode 100644
index 0000000..af45fa5
--- /dev/null
+++ b/src/journal-remote/journal-gatewayd.c
@@ -0,0 +1,1045 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+
+#include <fcntl.h>
+#include <getopt.h>
+#include <microhttpd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#include "sd-bus.h"
+#include "sd-daemon.h"
+#include "sd-journal.h"
+
+#include "alloc-util.h"
+#include "bus-util.h"
+#include "fd-util.h"
+#include "fileio.h"
+#include "hostname-util.h"
+#include "log.h"
+#include "logs-show.h"
+#include "main-func.h"
+#include "microhttpd-util.h"
+#include "os-util.h"
+#include "parse-util.h"
+#include "pretty-print.h"
+#include "sigbus.h"
+#include "tmpfile-util.h"
+#include "util.h"
+
+#define JOURNAL_WAIT_TIMEOUT (10*USEC_PER_SEC)
+
+static char *arg_key_pem = NULL;
+static char *arg_cert_pem = NULL;
+static char *arg_trust_pem = NULL;
+static const char *arg_directory = NULL;
+
+STATIC_DESTRUCTOR_REGISTER(arg_key_pem, freep);
+STATIC_DESTRUCTOR_REGISTER(arg_cert_pem, freep);
+STATIC_DESTRUCTOR_REGISTER(arg_trust_pem, freep);
+
+typedef struct RequestMeta {
+ sd_journal *journal;
+
+ OutputMode mode;
+
+ char *cursor;
+ int64_t n_skip;
+ uint64_t n_entries;
+ bool n_entries_set;
+
+ FILE *tmp;
+ uint64_t delta, size;
+
+ int argument_parse_error;
+
+ bool follow;
+ bool discrete;
+
+ uint64_t n_fields;
+ bool n_fields_set;
+} RequestMeta;
+
+static const char* const mime_types[_OUTPUT_MODE_MAX] = {
+ [OUTPUT_SHORT] = "text/plain",
+ [OUTPUT_JSON] = "application/json",
+ [OUTPUT_JSON_SSE] = "text/event-stream",
+ [OUTPUT_JSON_SEQ] = "application/json-seq",
+ [OUTPUT_EXPORT] = "application/vnd.fdo.journal",
+};
+
+static RequestMeta *request_meta(void **connection_cls) {
+ RequestMeta *m;
+
+ assert(connection_cls);
+ if (*connection_cls)
+ return *connection_cls;
+
+ m = new0(RequestMeta, 1);
+ if (!m)
+ return NULL;
+
+ *connection_cls = m;
+ return m;
+}
+
+static void request_meta_free(
+ void *cls,
+ struct MHD_Connection *connection,
+ void **connection_cls,
+ enum MHD_RequestTerminationCode toe) {
+
+ RequestMeta *m = *connection_cls;
+
+ if (!m)
+ return;
+
+ sd_journal_close(m->journal);
+
+ safe_fclose(m->tmp);
+
+ free(m->cursor);
+ free(m);
+}
+
+static int open_journal(RequestMeta *m) {
+ assert(m);
+
+ if (m->journal)
+ return 0;
+
+ if (arg_directory)
+ return sd_journal_open_directory(&m->journal, arg_directory, 0);
+ else
+ return sd_journal_open(&m->journal, SD_JOURNAL_LOCAL_ONLY|SD_JOURNAL_SYSTEM);
+}
+
+static int request_meta_ensure_tmp(RequestMeta *m) {
+ assert(m);
+
+ if (m->tmp)
+ rewind(m->tmp);
+ else {
+ int fd;
+
+ fd = open_tmpfile_unlinkable("/tmp", O_RDWR|O_CLOEXEC);
+ if (fd < 0)
+ return fd;
+
+ m->tmp = fdopen(fd, "w+");
+ if (!m->tmp) {
+ safe_close(fd);
+ return -errno;
+ }
+ }
+
+ return 0;
+}
+
+static ssize_t request_reader_entries(
+ void *cls,
+ uint64_t pos,
+ char *buf,
+ size_t max) {
+
+ RequestMeta *m = cls;
+ int r;
+ size_t n, k;
+
+ assert(m);
+ assert(buf);
+ assert(max > 0);
+ assert(pos >= m->delta);
+
+ pos -= m->delta;
+
+ while (pos >= m->size) {
+ off_t sz;
+
+ /* End of this entry, so let's serialize the next
+ * one */
+
+ if (m->n_entries_set &&
+ m->n_entries <= 0)
+ return MHD_CONTENT_READER_END_OF_STREAM;
+
+ if (m->n_skip < 0)
+ r = sd_journal_previous_skip(m->journal, (uint64_t) -m->n_skip + 1);
+ else if (m->n_skip > 0)
+ r = sd_journal_next_skip(m->journal, (uint64_t) m->n_skip + 1);
+ else
+ r = sd_journal_next(m->journal);
+
+ if (r < 0) {
+ log_error_errno(r, "Failed to advance journal pointer: %m");
+ return MHD_CONTENT_READER_END_WITH_ERROR;
+ } else if (r == 0) {
+
+ if (m->follow) {
+ r = sd_journal_wait(m->journal, (uint64_t) JOURNAL_WAIT_TIMEOUT);
+ if (r < 0) {
+ log_error_errno(r, "Couldn't wait for journal event: %m");
+ return MHD_CONTENT_READER_END_WITH_ERROR;
+ }
+ if (r == SD_JOURNAL_NOP)
+ break;
+
+ continue;
+ }
+
+ return MHD_CONTENT_READER_END_OF_STREAM;
+ }
+
+ if (m->discrete) {
+ assert(m->cursor);
+
+ r = sd_journal_test_cursor(m->journal, m->cursor);
+ if (r < 0) {
+ log_error_errno(r, "Failed to test cursor: %m");
+ return MHD_CONTENT_READER_END_WITH_ERROR;
+ }
+
+ if (r == 0)
+ return MHD_CONTENT_READER_END_OF_STREAM;
+ }
+
+ pos -= m->size;
+ m->delta += m->size;
+
+ if (m->n_entries_set)
+ m->n_entries -= 1;
+
+ m->n_skip = 0;
+
+ r = request_meta_ensure_tmp(m);
+ if (r < 0) {
+ log_error_errno(r, "Failed to create temporary file: %m");
+ return MHD_CONTENT_READER_END_WITH_ERROR;
+ }
+
+ r = show_journal_entry(m->tmp, m->journal, m->mode, 0, OUTPUT_FULL_WIDTH,
+ NULL, NULL, NULL);
+ if (r < 0) {
+ log_error_errno(r, "Failed to serialize item: %m");
+ return MHD_CONTENT_READER_END_WITH_ERROR;
+ }
+
+ sz = ftello(m->tmp);
+ if (sz == (off_t) -1) {
+ log_error_errno(errno, "Failed to retrieve file position: %m");
+ return MHD_CONTENT_READER_END_WITH_ERROR;
+ }
+
+ m->size = (uint64_t) sz;
+ }
+
+ if (m->tmp == NULL && m->follow)
+ return 0;
+
+ if (fseeko(m->tmp, pos, SEEK_SET) < 0) {
+ log_error_errno(errno, "Failed to seek to position: %m");
+ return MHD_CONTENT_READER_END_WITH_ERROR;
+ }
+
+ n = m->size - pos;
+ if (n < 1)
+ return 0;
+ if (n > max)
+ n = max;
+
+ errno = 0;
+ k = fread(buf, 1, n, m->tmp);
+ if (k != n) {
+ log_error("Failed to read from file: %s", errno ? strerror(errno) : "Premature EOF");
+ return MHD_CONTENT_READER_END_WITH_ERROR;
+ }
+
+ return (ssize_t) k;
+}
+
+static int request_parse_accept(
+ RequestMeta *m,
+ struct MHD_Connection *connection) {
+
+ const char *header;
+
+ assert(m);
+ assert(connection);
+
+ header = MHD_lookup_connection_value(connection, MHD_HEADER_KIND, "Accept");
+ if (!header)
+ return 0;
+
+ if (streq(header, mime_types[OUTPUT_JSON]))
+ m->mode = OUTPUT_JSON;
+ else if (streq(header, mime_types[OUTPUT_JSON_SSE]))
+ m->mode = OUTPUT_JSON_SSE;
+ else if (streq(header, mime_types[OUTPUT_JSON_SEQ]))
+ m->mode = OUTPUT_JSON_SEQ;
+ else if (streq(header, mime_types[OUTPUT_EXPORT]))
+ m->mode = OUTPUT_EXPORT;
+ else
+ m->mode = OUTPUT_SHORT;
+
+ return 0;
+}
+
+static int request_parse_range(
+ RequestMeta *m,
+ struct MHD_Connection *connection) {
+
+ const char *range, *colon, *colon2;
+ int r;
+
+ assert(m);
+ assert(connection);
+
+ range = MHD_lookup_connection_value(connection, MHD_HEADER_KIND, "Range");
+ if (!range)
+ return 0;
+
+ if (!startswith(range, "entries="))
+ return 0;
+
+ range += 8;
+ range += strspn(range, WHITESPACE);
+
+ colon = strchr(range, ':');
+ if (!colon)
+ m->cursor = strdup(range);
+ else {
+ const char *p;
+
+ colon2 = strchr(colon + 1, ':');
+ if (colon2) {
+ _cleanup_free_ char *t;
+
+ t = strndup(colon + 1, colon2 - colon - 1);
+ if (!t)
+ return -ENOMEM;
+
+ r = safe_atoi64(t, &m->n_skip);
+ if (r < 0)
+ return r;
+ }
+
+ p = (colon2 ? colon2 : colon) + 1;
+ if (*p) {
+ r = safe_atou64(p, &m->n_entries);
+ if (r < 0)
+ return r;
+
+ if (m->n_entries <= 0)
+ return -EINVAL;
+
+ m->n_entries_set = true;
+ }
+
+ m->cursor = strndup(range, colon - range);
+ }
+
+ if (!m->cursor)
+ return -ENOMEM;
+
+ m->cursor[strcspn(m->cursor, WHITESPACE)] = 0;
+ if (isempty(m->cursor))
+ m->cursor = mfree(m->cursor);
+
+ return 0;
+}
+
+static int request_parse_arguments_iterator(
+ void *cls,
+ enum MHD_ValueKind kind,
+ const char *key,
+ const char *value) {
+
+ RequestMeta *m = cls;
+ _cleanup_free_ char *p = NULL;
+ int r;
+
+ assert(m);
+
+ if (isempty(key)) {
+ m->argument_parse_error = -EINVAL;
+ return MHD_NO;
+ }
+
+ if (streq(key, "follow")) {
+ if (isempty(value)) {
+ m->follow = true;
+ return MHD_YES;
+ }
+
+ r = parse_boolean(value);
+ if (r < 0) {
+ m->argument_parse_error = r;
+ return MHD_NO;
+ }
+
+ m->follow = r;
+ return MHD_YES;
+ }
+
+ if (streq(key, "discrete")) {
+ if (isempty(value)) {
+ m->discrete = true;
+ return MHD_YES;
+ }
+
+ r = parse_boolean(value);
+ if (r < 0) {
+ m->argument_parse_error = r;
+ return MHD_NO;
+ }
+
+ m->discrete = r;
+ return MHD_YES;
+ }
+
+ if (streq(key, "boot")) {
+ if (isempty(value))
+ r = true;
+ else {
+ r = parse_boolean(value);
+ if (r < 0) {
+ m->argument_parse_error = r;
+ return MHD_NO;
+ }
+ }
+
+ if (r) {
+ char match[9 + 32 + 1] = "_BOOT_ID=";
+ sd_id128_t bid;
+
+ r = sd_id128_get_boot(&bid);
+ if (r < 0) {
+ log_error_errno(r, "Failed to get boot ID: %m");
+ return MHD_NO;
+ }
+
+ sd_id128_to_string(bid, match + 9);
+ r = sd_journal_add_match(m->journal, match, sizeof(match)-1);
+ if (r < 0) {
+ m->argument_parse_error = r;
+ return MHD_NO;
+ }
+ }
+
+ return MHD_YES;
+ }
+
+ p = strjoin(key, "=", strempty(value));
+ if (!p) {
+ m->argument_parse_error = log_oom();
+ return MHD_NO;
+ }
+
+ r = sd_journal_add_match(m->journal, p, 0);
+ if (r < 0) {
+ m->argument_parse_error = r;
+ return MHD_NO;
+ }
+
+ return MHD_YES;
+}
+
+static int request_parse_arguments(
+ RequestMeta *m,
+ struct MHD_Connection *connection) {
+
+ assert(m);
+ assert(connection);
+
+ m->argument_parse_error = 0;
+ MHD_get_connection_values(connection, MHD_GET_ARGUMENT_KIND, request_parse_arguments_iterator, m);
+
+ return m->argument_parse_error;
+}
+
+static int request_handler_entries(
+ struct MHD_Connection *connection,
+ void *connection_cls) {
+
+ _cleanup_(MHD_destroy_responsep) struct MHD_Response *response = NULL;
+ RequestMeta *m = connection_cls;
+ int r;
+
+ assert(connection);
+ assert(m);
+
+ r = open_journal(m);
+ if (r < 0)
+ return mhd_respondf(connection, r, MHD_HTTP_INTERNAL_SERVER_ERROR, "Failed to open journal: %m");
+
+ if (request_parse_accept(m, connection) < 0)
+ return mhd_respond(connection, MHD_HTTP_BAD_REQUEST, "Failed to parse Accept header.");
+
+ if (request_parse_range(m, connection) < 0)
+ return mhd_respond(connection, MHD_HTTP_BAD_REQUEST, "Failed to parse Range header.");
+
+ if (request_parse_arguments(m, connection) < 0)
+ return mhd_respond(connection, MHD_HTTP_BAD_REQUEST, "Failed to parse URL arguments.");
+
+ if (m->discrete) {
+ if (!m->cursor)
+ return mhd_respond(connection, MHD_HTTP_BAD_REQUEST, "Discrete seeks require a cursor specification.");
+
+ m->n_entries = 1;
+ m->n_entries_set = true;
+ }
+
+ if (m->cursor)
+ r = sd_journal_seek_cursor(m->journal, m->cursor);
+ else if (m->n_skip >= 0)
+ r = sd_journal_seek_head(m->journal);
+ else if (m->n_skip < 0)
+ r = sd_journal_seek_tail(m->journal);
+ if (r < 0)
+ return mhd_respond(connection, MHD_HTTP_BAD_REQUEST, "Failed to seek in journal.");
+
+ response = MHD_create_response_from_callback(MHD_SIZE_UNKNOWN, 4*1024, request_reader_entries, m, NULL);
+ if (!response)
+ return respond_oom(connection);
+
+ MHD_add_response_header(response, "Content-Type", mime_types[m->mode]);
+ return MHD_queue_response(connection, MHD_HTTP_OK, response);
+}
+
+static int output_field(FILE *f, OutputMode m, const char *d, size_t l) {
+ const char *eq;
+ size_t j;
+
+ eq = memchr(d, '=', l);
+ if (!eq)
+ return -EINVAL;
+
+ j = l - (eq - d + 1);
+
+ if (m == OUTPUT_JSON) {
+ fprintf(f, "{ \"%.*s\" : ", (int) (eq - d), d);
+ json_escape(f, eq+1, j, OUTPUT_FULL_WIDTH);
+ fputs(" }\n", f);
+ } else {
+ fwrite(eq+1, 1, j, f);
+ fputc('\n', f);
+ }
+
+ return 0;
+}
+
+static ssize_t request_reader_fields(
+ void *cls,
+ uint64_t pos,
+ char *buf,
+ size_t max) {
+
+ RequestMeta *m = cls;
+ int r;
+ size_t n, k;
+
+ assert(m);
+ assert(buf);
+ assert(max > 0);
+ assert(pos >= m->delta);
+
+ pos -= m->delta;
+
+ while (pos >= m->size) {
+ off_t sz;
+ const void *d;
+ size_t l;
+
+ /* End of this field, so let's serialize the next
+ * one */
+
+ if (m->n_fields_set &&
+ m->n_fields <= 0)
+ return MHD_CONTENT_READER_END_OF_STREAM;
+
+ r = sd_journal_enumerate_unique(m->journal, &d, &l);
+ if (r < 0) {
+ log_error_errno(r, "Failed to advance field index: %m");
+ return MHD_CONTENT_READER_END_WITH_ERROR;
+ } else if (r == 0)
+ return MHD_CONTENT_READER_END_OF_STREAM;
+
+ pos -= m->size;
+ m->delta += m->size;
+
+ if (m->n_fields_set)
+ m->n_fields -= 1;
+
+ r = request_meta_ensure_tmp(m);
+ if (r < 0) {
+ log_error_errno(r, "Failed to create temporary file: %m");
+ return MHD_CONTENT_READER_END_WITH_ERROR;
+ }
+
+ r = output_field(m->tmp, m->mode, d, l);
+ if (r < 0) {
+ log_error_errno(r, "Failed to serialize item: %m");
+ return MHD_CONTENT_READER_END_WITH_ERROR;
+ }
+
+ sz = ftello(m->tmp);
+ if (sz == (off_t) -1) {
+ log_error_errno(errno, "Failed to retrieve file position: %m");
+ return MHD_CONTENT_READER_END_WITH_ERROR;
+ }
+
+ m->size = (uint64_t) sz;
+ }
+
+ if (fseeko(m->tmp, pos, SEEK_SET) < 0) {
+ log_error_errno(errno, "Failed to seek to position: %m");
+ return MHD_CONTENT_READER_END_WITH_ERROR;
+ }
+
+ n = m->size - pos;
+ if (n > max)
+ n = max;
+
+ errno = 0;
+ k = fread(buf, 1, n, m->tmp);
+ if (k != n) {
+ log_error("Failed to read from file: %s", errno ? strerror(errno) : "Premature EOF");
+ return MHD_CONTENT_READER_END_WITH_ERROR;
+ }
+
+ return (ssize_t) k;
+}
+
+static int request_handler_fields(
+ struct MHD_Connection *connection,
+ const char *field,
+ void *connection_cls) {
+
+ _cleanup_(MHD_destroy_responsep) struct MHD_Response *response = NULL;
+ RequestMeta *m = connection_cls;
+ int r;
+
+ assert(connection);
+ assert(m);
+
+ r = open_journal(m);
+ if (r < 0)
+ return mhd_respondf(connection, r, MHD_HTTP_INTERNAL_SERVER_ERROR, "Failed to open journal: %m");
+
+ if (request_parse_accept(m, connection) < 0)
+ return mhd_respond(connection, MHD_HTTP_BAD_REQUEST, "Failed to parse Accept header.");
+
+ r = sd_journal_query_unique(m->journal, field);
+ if (r < 0)
+ return mhd_respond(connection, MHD_HTTP_BAD_REQUEST, "Failed to query unique fields.");
+
+ response = MHD_create_response_from_callback(MHD_SIZE_UNKNOWN, 4*1024, request_reader_fields, m, NULL);
+ if (!response)
+ return respond_oom(connection);
+
+ MHD_add_response_header(response, "Content-Type", mime_types[m->mode == OUTPUT_JSON ? OUTPUT_JSON : OUTPUT_SHORT]);
+ return MHD_queue_response(connection, MHD_HTTP_OK, response);
+}
+
+static int request_handler_redirect(
+ struct MHD_Connection *connection,
+ const char *target) {
+
+ char *page;
+ _cleanup_(MHD_destroy_responsep) struct MHD_Response *response = NULL;
+
+ assert(connection);
+ assert(target);
+
+ if (asprintf(&page, "<html><body>Please continue to the <a href=\"%s\">journal browser</a>.</body></html>", target) < 0)
+ return respond_oom(connection);
+
+ response = MHD_create_response_from_buffer(strlen(page), page, MHD_RESPMEM_MUST_FREE);
+ if (!response) {
+ free(page);
+ return respond_oom(connection);
+ }
+
+ MHD_add_response_header(response, "Content-Type", "text/html");
+ MHD_add_response_header(response, "Location", target);
+ return MHD_queue_response(connection, MHD_HTTP_MOVED_PERMANENTLY, response);
+}
+
+static int request_handler_file(
+ struct MHD_Connection *connection,
+ const char *path,
+ const char *mime_type) {
+
+ _cleanup_(MHD_destroy_responsep) struct MHD_Response *response = NULL;
+ _cleanup_close_ int fd = -1;
+ struct stat st;
+
+ assert(connection);
+ assert(path);
+ assert(mime_type);
+
+ fd = open(path, O_RDONLY|O_CLOEXEC);
+ if (fd < 0)
+ return mhd_respondf(connection, errno, MHD_HTTP_NOT_FOUND, "Failed to open file %s: %m", path);
+
+ if (fstat(fd, &st) < 0)
+ return mhd_respondf(connection, errno, MHD_HTTP_INTERNAL_SERVER_ERROR, "Failed to stat file: %m");
+
+ response = MHD_create_response_from_fd_at_offset64(st.st_size, fd, 0);
+ if (!response)
+ return respond_oom(connection);
+ TAKE_FD(fd);
+
+ MHD_add_response_header(response, "Content-Type", mime_type);
+ return MHD_queue_response(connection, MHD_HTTP_OK, response);
+}
+
+static int get_virtualization(char **v) {
+ _cleanup_(sd_bus_unrefp) sd_bus *bus = NULL;
+ char *b = NULL;
+ int r;
+
+ r = sd_bus_default_system(&bus);
+ if (r < 0)
+ return r;
+
+ r = sd_bus_get_property_string(
+ bus,
+ "org.freedesktop.systemd1",
+ "/org/freedesktop/systemd1",
+ "org.freedesktop.systemd1.Manager",
+ "Virtualization",
+ NULL,
+ &b);
+ if (r < 0)
+ return r;
+
+ if (isempty(b)) {
+ free(b);
+ *v = NULL;
+ return 0;
+ }
+
+ *v = b;
+ return 1;
+}
+
+static int request_handler_machine(
+ struct MHD_Connection *connection,
+ void *connection_cls) {
+
+ _cleanup_(MHD_destroy_responsep) struct MHD_Response *response = NULL;
+ RequestMeta *m = connection_cls;
+ int r;
+ _cleanup_free_ char* hostname = NULL, *os_name = NULL;
+ uint64_t cutoff_from = 0, cutoff_to = 0, usage = 0;
+ sd_id128_t mid, bid;
+ _cleanup_free_ char *v = NULL, *json = NULL;
+
+ assert(connection);
+ assert(m);
+
+ r = open_journal(m);
+ if (r < 0)
+ return mhd_respondf(connection, r, MHD_HTTP_INTERNAL_SERVER_ERROR, "Failed to open journal: %m");
+
+ r = sd_id128_get_machine(&mid);
+ if (r < 0)
+ return mhd_respondf(connection, r, MHD_HTTP_INTERNAL_SERVER_ERROR, "Failed to determine machine ID: %m");
+
+ r = sd_id128_get_boot(&bid);
+ if (r < 0)
+ return mhd_respondf(connection, r, MHD_HTTP_INTERNAL_SERVER_ERROR, "Failed to determine boot ID: %m");
+
+ hostname = gethostname_malloc();
+ if (!hostname)
+ return respond_oom(connection);
+
+ r = sd_journal_get_usage(m->journal, &usage);
+ if (r < 0)
+ return mhd_respondf(connection, r, MHD_HTTP_INTERNAL_SERVER_ERROR, "Failed to determine disk usage: %m");
+
+ r = sd_journal_get_cutoff_realtime_usec(m->journal, &cutoff_from, &cutoff_to);
+ if (r < 0)
+ return mhd_respondf(connection, r, MHD_HTTP_INTERNAL_SERVER_ERROR, "Failed to determine disk usage: %m");
+
+ (void) parse_os_release(NULL, "PRETTY_NAME", &os_name, NULL);
+ (void) get_virtualization(&v);
+
+ r = asprintf(&json,
+ "{ \"machine_id\" : \"" SD_ID128_FORMAT_STR "\","
+ "\"boot_id\" : \"" SD_ID128_FORMAT_STR "\","
+ "\"hostname\" : \"%s\","
+ "\"os_pretty_name\" : \"%s\","
+ "\"virtualization\" : \"%s\","
+ "\"usage\" : \"%"PRIu64"\","
+ "\"cutoff_from_realtime\" : \"%"PRIu64"\","
+ "\"cutoff_to_realtime\" : \"%"PRIu64"\" }\n",
+ SD_ID128_FORMAT_VAL(mid),
+ SD_ID128_FORMAT_VAL(bid),
+ hostname_cleanup(hostname),
+ os_name ? os_name : "Linux",
+ v ? v : "bare",
+ usage,
+ cutoff_from,
+ cutoff_to);
+ if (r < 0)
+ return respond_oom(connection);
+
+ response = MHD_create_response_from_buffer(strlen(json), json, MHD_RESPMEM_MUST_FREE);
+ if (!response)
+ return respond_oom(connection);
+ TAKE_PTR(json);
+
+ MHD_add_response_header(response, "Content-Type", "application/json");
+ return MHD_queue_response(connection, MHD_HTTP_OK, response);
+}
+
+static int request_handler(
+ void *cls,
+ struct MHD_Connection *connection,
+ const char *url,
+ const char *method,
+ const char *version,
+ const char *upload_data,
+ size_t *upload_data_size,
+ void **connection_cls) {
+ int r, code;
+
+ assert(connection);
+ assert(connection_cls);
+ assert(url);
+ assert(method);
+
+ if (!streq(method, "GET"))
+ return mhd_respond(connection, MHD_HTTP_NOT_ACCEPTABLE, "Unsupported method.");
+
+ if (!*connection_cls) {
+ if (!request_meta(connection_cls))
+ return respond_oom(connection);
+ return MHD_YES;
+ }
+
+ if (arg_trust_pem) {
+ r = check_permissions(connection, &code, NULL);
+ if (r < 0)
+ return code;
+ }
+
+ if (streq(url, "/"))
+ return request_handler_redirect(connection, "/browse");
+
+ if (streq(url, "/entries"))
+ return request_handler_entries(connection, *connection_cls);
+
+ if (startswith(url, "/fields/"))
+ return request_handler_fields(connection, url + 8, *connection_cls);
+
+ if (streq(url, "/browse"))
+ return request_handler_file(connection, DOCUMENT_ROOT "/browse.html", "text/html");
+
+ if (streq(url, "/machine"))
+ return request_handler_machine(connection, *connection_cls);
+
+ return mhd_respond(connection, MHD_HTTP_NOT_FOUND, "Not found.");
+}
+
+static int help(void) {
+ _cleanup_free_ char *link = NULL;
+ int r;
+
+ r = terminal_urlify_man("systemd-journal-gatewayd.service", "8", &link);
+ if (r < 0)
+ return log_oom();
+
+ printf("%s [OPTIONS...] ...\n\n"
+ "HTTP server for journal events.\n\n"
+ " -h --help Show this help\n"
+ " --version Show package version\n"
+ " --cert=CERT.PEM Server certificate in PEM format\n"
+ " --key=KEY.PEM Server key in PEM format\n"
+ " --trust=CERT.PEM Certificate authority certificate in PEM format\n"
+ " -D --directory=PATH Serve journal files in directory\n"
+ "\nSee the %s for details.\n"
+ , program_invocation_short_name
+ , link
+ );
+
+ return 0;
+}
+
+static int parse_argv(int argc, char *argv[]) {
+ enum {
+ ARG_VERSION = 0x100,
+ ARG_KEY,
+ ARG_CERT,
+ ARG_TRUST,
+ };
+
+ int r, c;
+
+ static const struct option options[] = {
+ { "help", no_argument, NULL, 'h' },
+ { "version", no_argument, NULL, ARG_VERSION },
+ { "key", required_argument, NULL, ARG_KEY },
+ { "cert", required_argument, NULL, ARG_CERT },
+ { "trust", required_argument, NULL, ARG_TRUST },
+ { "directory", required_argument, NULL, 'D' },
+ {}
+ };
+
+ assert(argc >= 0);
+ assert(argv);
+
+ while ((c = getopt_long(argc, argv, "hD:", options, NULL)) >= 0)
+
+ switch(c) {
+
+ case 'h':
+ return help();
+
+ case ARG_VERSION:
+ return version();
+
+ case ARG_KEY:
+ if (arg_key_pem)
+ return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
+ "Key file specified twice");
+ r = read_full_file(optarg, &arg_key_pem, NULL);
+ if (r < 0)
+ return log_error_errno(r, "Failed to read key file: %m");
+ assert(arg_key_pem);
+ break;
+
+ case ARG_CERT:
+ if (arg_cert_pem)
+ return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
+ "Certificate file specified twice");
+ r = read_full_file(optarg, &arg_cert_pem, NULL);
+ if (r < 0)
+ return log_error_errno(r, "Failed to read certificate file: %m");
+ assert(arg_cert_pem);
+ break;
+
+ case ARG_TRUST:
+#if HAVE_GNUTLS
+ if (arg_trust_pem)
+ return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
+ "CA certificate file specified twice");
+ r = read_full_file(optarg, &arg_trust_pem, NULL);
+ if (r < 0)
+ return log_error_errno(r, "Failed to read CA certificate file: %m");
+ assert(arg_trust_pem);
+ break;
+#else
+ return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
+ "Option --trust is not available.");
+#endif
+ case 'D':
+ arg_directory = optarg;
+ break;
+
+ case '?':
+ return -EINVAL;
+
+ default:
+ assert_not_reached("Unhandled option");
+ }
+
+ if (optind < argc)
+ return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
+ "This program does not take arguments.");
+
+ if (!!arg_key_pem != !!arg_cert_pem)
+ return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
+ "Certificate and key files must be specified together");
+
+ if (arg_trust_pem && !arg_key_pem)
+ return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
+ "CA certificate can only be used with certificate file");
+
+ return 1;
+}
+
+static int run(int argc, char *argv[]) {
+ _cleanup_(MHD_stop_daemonp) struct MHD_Daemon *d = NULL;
+ struct MHD_OptionItem opts[] = {
+ { MHD_OPTION_NOTIFY_COMPLETED,
+ (intptr_t) request_meta_free, NULL },
+ { MHD_OPTION_EXTERNAL_LOGGER,
+ (intptr_t) microhttpd_logger, NULL },
+ { MHD_OPTION_END, 0, NULL },
+ { MHD_OPTION_END, 0, NULL },
+ { MHD_OPTION_END, 0, NULL },
+ { MHD_OPTION_END, 0, NULL },
+ { MHD_OPTION_END, 0, NULL },
+ };
+ int opts_pos = 2;
+
+ /* We force MHD_USE_ITC here, in order to make sure
+ * libmicrohttpd doesn't use shutdown() on our listening
+ * socket, which would break socket re-activation. See
+ *
+ * https://lists.gnu.org/archive/html/libmicrohttpd/2015-09/msg00014.html
+ * https://github.com/systemd/systemd/pull/1286
+ */
+
+ int flags =
+ MHD_USE_DEBUG |
+ MHD_USE_DUAL_STACK |
+ MHD_USE_ITC |
+ MHD_USE_POLL_INTERNAL_THREAD |
+ MHD_USE_THREAD_PER_CONNECTION;
+ int r, n;
+
+ log_setup_service();
+
+ r = parse_argv(argc, argv);
+ if (r <= 0)
+ return r;
+
+ sigbus_install();
+
+ r = setup_gnutls_logger(NULL);
+ if (r < 0)
+ return r;
+
+ n = sd_listen_fds(1);
+ if (n < 0)
+ return log_error_errno(n, "Failed to determine passed sockets: %m");
+ if (n > 1)
+ return log_error_errno(SYNTHETIC_ERRNO(EINVAL), "Can't listen on more than one socket.");
+
+ if (n == 1)
+ opts[opts_pos++] = (struct MHD_OptionItem)
+ { MHD_OPTION_LISTEN_SOCKET, SD_LISTEN_FDS_START };
+
+ if (arg_key_pem) {
+ assert(arg_cert_pem);
+ opts[opts_pos++] = (struct MHD_OptionItem)
+ { MHD_OPTION_HTTPS_MEM_KEY, 0, arg_key_pem };
+ opts[opts_pos++] = (struct MHD_OptionItem)
+ { MHD_OPTION_HTTPS_MEM_CERT, 0, arg_cert_pem };
+ flags |= MHD_USE_TLS;
+ }
+
+ if (arg_trust_pem) {
+ assert(flags & MHD_USE_TLS);
+ opts[opts_pos++] = (struct MHD_OptionItem)
+ { MHD_OPTION_HTTPS_MEM_TRUST, 0, arg_trust_pem };
+ }
+
+ d = MHD_start_daemon(flags, 19531,
+ NULL, NULL,
+ request_handler, NULL,
+ MHD_OPTION_ARRAY, opts,
+ MHD_OPTION_END);
+ if (!d)
+ return log_error_errno(SYNTHETIC_ERRNO(EINVAL), "Failed to start daemon!");
+
+ pause();
+
+ return 0;
+}
+
+DEFINE_MAIN_FUNCTION(run);
diff --git a/src/journal-remote/journal-remote-main.c b/src/journal-remote/journal-remote-main.c
new file mode 100644
index 0000000..802c3ea
--- /dev/null
+++ b/src/journal-remote/journal-remote-main.c
@@ -0,0 +1,1158 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+
+#include <getopt.h>
+#include <unistd.h>
+
+#include "sd-daemon.h"
+
+#include "conf-parser.h"
+#include "daemon-util.h"
+#include "def.h"
+#include "fd-util.h"
+#include "fileio.h"
+#include "journal-remote-write.h"
+#include "journal-remote.h"
+#include "main-func.h"
+#include "pretty-print.h"
+#include "process-util.h"
+#include "rlimit-util.h"
+#include "signal-util.h"
+#include "socket-util.h"
+#include "stat-util.h"
+#include "string-table.h"
+#include "strv.h"
+
+#define PRIV_KEY_FILE CERTIFICATE_ROOT "/private/journal-remote.pem"
+#define CERT_FILE CERTIFICATE_ROOT "/certs/journal-remote.pem"
+#define TRUST_FILE CERTIFICATE_ROOT "/ca/trusted.pem"
+
+static const char* arg_url = NULL;
+static const char* arg_getter = NULL;
+static const char* arg_listen_raw = NULL;
+static const char* arg_listen_http = NULL;
+static const char* arg_listen_https = NULL;
+static char** arg_files = NULL; /* Do not free this. */
+static int arg_compress = true;
+static int arg_seal = false;
+static int http_socket = -1, https_socket = -1;
+static char** arg_gnutls_log = NULL;
+
+static JournalWriteSplitMode arg_split_mode = _JOURNAL_WRITE_SPLIT_INVALID;
+static const char* arg_output = NULL;
+
+static char *arg_key = NULL;
+static char *arg_cert = NULL;
+static char *arg_trust = NULL;
+static bool arg_trust_all = false;
+
+STATIC_DESTRUCTOR_REGISTER(arg_gnutls_log, strv_freep);
+STATIC_DESTRUCTOR_REGISTER(arg_key, freep);
+STATIC_DESTRUCTOR_REGISTER(arg_cert, freep);
+STATIC_DESTRUCTOR_REGISTER(arg_trust, freep);
+
+static const char* const journal_write_split_mode_table[_JOURNAL_WRITE_SPLIT_MAX] = {
+ [JOURNAL_WRITE_SPLIT_NONE] = "none",
+ [JOURNAL_WRITE_SPLIT_HOST] = "host",
+};
+
+DEFINE_PRIVATE_STRING_TABLE_LOOKUP(journal_write_split_mode, JournalWriteSplitMode);
+static DEFINE_CONFIG_PARSE_ENUM(config_parse_write_split_mode,
+ journal_write_split_mode,
+ JournalWriteSplitMode,
+ "Failed to parse split mode setting");
+
+/**********************************************************************
+ **********************************************************************
+ **********************************************************************/
+
+static int spawn_child(const char* child, char** argv) {
+ pid_t child_pid;
+ int fd[2], r;
+
+ if (pipe(fd) < 0)
+ return log_error_errno(errno, "Failed to create pager pipe: %m");
+
+ r = safe_fork("(remote)", FORK_RESET_SIGNALS|FORK_DEATHSIG|FORK_LOG, &child_pid);
+ if (r < 0) {
+ safe_close_pair(fd);
+ return r;
+ }
+
+ /* In the child */
+ if (r == 0) {
+ safe_close(fd[0]);
+
+ r = rearrange_stdio(STDIN_FILENO, fd[1], STDERR_FILENO);
+ if (r < 0) {
+ log_error_errno(r, "Failed to dup pipe to stdout: %m");
+ _exit(EXIT_FAILURE);
+ }
+
+ (void) rlimit_nofile_safe();
+
+ execvp(child, argv);
+ log_error_errno(errno, "Failed to exec child %s: %m", child);
+ _exit(EXIT_FAILURE);
+ }
+
+ safe_close(fd[1]);
+
+ r = fd_nonblock(fd[0], true);
+ if (r < 0)
+ log_warning_errno(errno, "Failed to set child pipe to non-blocking: %m");
+
+ return fd[0];
+}
+
+static int spawn_curl(const char* url) {
+ char **argv = STRV_MAKE("curl",
+ "-HAccept: application/vnd.fdo.journal",
+ "--silent",
+ "--show-error",
+ url);
+ int r;
+
+ r = spawn_child("curl", argv);
+ if (r < 0)
+ log_error_errno(r, "Failed to spawn curl: %m");
+ return r;
+}
+
+static int spawn_getter(const char *getter) {
+ int r;
+ _cleanup_strv_free_ char **words = NULL;
+
+ assert(getter);
+ r = strv_split_extract(&words, getter, WHITESPACE, EXTRACT_QUOTES);
+ if (r < 0)
+ return log_error_errno(r, "Failed to split getter option: %m");
+
+ r = spawn_child(words[0], words);
+ if (r < 0)
+ log_error_errno(r, "Failed to spawn getter %s: %m", getter);
+
+ return r;
+}
+
+/**********************************************************************
+ **********************************************************************
+ **********************************************************************/
+
+static int null_timer_event_handler(sd_event_source *s,
+ uint64_t usec,
+ void *userdata);
+static int dispatch_http_event(sd_event_source *event,
+ int fd,
+ uint32_t revents,
+ void *userdata);
+
+static int request_meta(void **connection_cls, int fd, char *hostname) {
+ RemoteSource *source;
+ Writer *writer;
+ int r;
+
+ assert(connection_cls);
+ if (*connection_cls)
+ return 0;
+
+ r = journal_remote_get_writer(journal_remote_server_global, hostname, &writer);
+ if (r < 0)
+ return log_warning_errno(r, "Failed to get writer for source %s: %m",
+ hostname);
+
+ source = source_new(fd, true, hostname, writer);
+ if (!source) {
+ writer_unref(writer);
+ return log_oom();
+ }
+
+ log_debug("Added RemoteSource as connection metadata %p", source);
+
+ *connection_cls = source;
+ return 0;
+}
+
+static void request_meta_free(void *cls,
+ struct MHD_Connection *connection,
+ void **connection_cls,
+ enum MHD_RequestTerminationCode toe) {
+ RemoteSource *s;
+
+ assert(connection_cls);
+ s = *connection_cls;
+
+ if (s) {
+ log_debug("Cleaning up connection metadata %p", s);
+ source_free(s);
+ *connection_cls = NULL;
+ }
+}
+
+static int process_http_upload(
+ struct MHD_Connection *connection,
+ const char *upload_data,
+ size_t *upload_data_size,
+ RemoteSource *source) {
+
+ bool finished = false;
+ size_t remaining;
+ int r;
+
+ assert(source);
+
+ log_trace("%s: connection %p, %zu bytes",
+ __func__, connection, *upload_data_size);
+
+ if (*upload_data_size) {
+ log_trace("Received %zu bytes", *upload_data_size);
+
+ r = journal_importer_push_data(&source->importer,
+ upload_data, *upload_data_size);
+ if (r < 0)
+ return mhd_respond_oom(connection);
+
+ *upload_data_size = 0;
+ } else
+ finished = true;
+
+ for (;;) {
+ r = process_source(source,
+ journal_remote_server_global->compress,
+ journal_remote_server_global->seal);
+ if (r == -EAGAIN)
+ break;
+ if (r < 0) {
+ if (r == -ENOBUFS)
+ log_warning_errno(r, "Entry is above the maximum of %u, aborting connection %p.",
+ DATA_SIZE_MAX, connection);
+ else if (r == -E2BIG)
+ log_warning_errno(r, "Entry with more fields than the maximum of %u, aborting connection %p.",
+ ENTRY_FIELD_COUNT_MAX, connection);
+ else
+ log_warning_errno(r, "Failed to process data, aborting connection %p: %m",
+ connection);
+ return MHD_NO;
+ }
+ }
+
+ if (!finished)
+ return MHD_YES;
+
+ /* The upload is finished */
+
+ remaining = journal_importer_bytes_remaining(&source->importer);
+ if (remaining > 0) {
+ log_warning("Premature EOF byte. %zu bytes lost.", remaining);
+ return mhd_respondf(connection,
+ 0, MHD_HTTP_EXPECTATION_FAILED,
+ "Premature EOF. %zu bytes of trailing data not processed.",
+ remaining);
+ }
+
+ return mhd_respond(connection, MHD_HTTP_ACCEPTED, "OK.");
+};
+
+static int request_handler(
+ void *cls,
+ struct MHD_Connection *connection,
+ const char *url,
+ const char *method,
+ const char *version,
+ const char *upload_data,
+ size_t *upload_data_size,
+ void **connection_cls) {
+
+ const char *header;
+ int r, code, fd;
+ _cleanup_free_ char *hostname = NULL;
+ size_t len;
+
+ assert(connection);
+ assert(connection_cls);
+ assert(url);
+ assert(method);
+
+ log_trace("Handling a connection %s %s %s", method, url, version);
+
+ if (*connection_cls)
+ return process_http_upload(connection,
+ upload_data, upload_data_size,
+ *connection_cls);
+
+ if (!streq(method, "POST"))
+ return mhd_respond(connection, MHD_HTTP_NOT_ACCEPTABLE, "Unsupported method.");
+
+ if (!streq(url, "/upload"))
+ return mhd_respond(connection, MHD_HTTP_NOT_FOUND, "Not found.");
+
+ header = MHD_lookup_connection_value(connection, MHD_HEADER_KIND, "Content-Type");
+ if (!header || !streq(header, "application/vnd.fdo.journal"))
+ return mhd_respond(connection, MHD_HTTP_UNSUPPORTED_MEDIA_TYPE,
+ "Content-Type: application/vnd.fdo.journal is required.");
+
+ header = MHD_lookup_connection_value(connection, MHD_HEADER_KIND, "Content-Length");
+ if (!header)
+ return mhd_respond(connection, MHD_HTTP_LENGTH_REQUIRED,
+ "Content-Length header is required.");
+ r = safe_atozu(header, &len);
+ if (r < 0)
+ return mhd_respondf(connection, r, MHD_HTTP_LENGTH_REQUIRED,
+ "Content-Length: %s cannot be parsed: %m", header);
+
+ if (len > ENTRY_SIZE_MAX)
+ /* When serialized, an entry of maximum size might be slightly larger,
+ * so this does not correspond exactly to the limit in journald. Oh well.
+ */
+ return mhd_respondf(connection, 0, MHD_HTTP_PAYLOAD_TOO_LARGE,
+ "Payload larger than maximum size of %u bytes", ENTRY_SIZE_MAX);
+
+ {
+ const union MHD_ConnectionInfo *ci;
+
+ ci = MHD_get_connection_info(connection,
+ MHD_CONNECTION_INFO_CONNECTION_FD);
+ if (!ci) {
+ log_error("MHD_get_connection_info failed: cannot get remote fd");
+ return mhd_respond(connection, MHD_HTTP_INTERNAL_SERVER_ERROR,
+ "Cannot check remote address.");
+ }
+
+ fd = ci->connect_fd;
+ assert(fd >= 0);
+ }
+
+ if (journal_remote_server_global->check_trust) {
+ r = check_permissions(connection, &code, &hostname);
+ if (r < 0)
+ return code;
+ } else {
+ r = getpeername_pretty(fd, false, &hostname);
+ if (r < 0)
+ return mhd_respond(connection, MHD_HTTP_INTERNAL_SERVER_ERROR,
+ "Cannot check remote hostname.");
+ }
+
+ assert(hostname);
+
+ r = request_meta(connection_cls, fd, hostname);
+ if (r == -ENOMEM)
+ return respond_oom(connection);
+ else if (r < 0)
+ return mhd_respondf(connection, r, MHD_HTTP_INTERNAL_SERVER_ERROR, "%m");
+
+ hostname = NULL;
+ return MHD_YES;
+}
+
+static int setup_microhttpd_server(RemoteServer *s,
+ int fd,
+ const char *key,
+ const char *cert,
+ const char *trust) {
+ struct MHD_OptionItem opts[] = {
+ { MHD_OPTION_NOTIFY_COMPLETED, (intptr_t) request_meta_free},
+ { MHD_OPTION_EXTERNAL_LOGGER, (intptr_t) microhttpd_logger},
+ { MHD_OPTION_LISTEN_SOCKET, fd},
+ { MHD_OPTION_CONNECTION_MEMORY_LIMIT, 128*1024},
+ { MHD_OPTION_END},
+ { MHD_OPTION_END},
+ { MHD_OPTION_END},
+ { MHD_OPTION_END},
+ { MHD_OPTION_END}};
+ int opts_pos = 4;
+ int flags =
+ MHD_USE_DEBUG |
+ MHD_USE_DUAL_STACK |
+ MHD_USE_EPOLL |
+ MHD_USE_ITC;
+
+ const union MHD_DaemonInfo *info;
+ int r, epoll_fd;
+ MHDDaemonWrapper *d;
+
+ assert(fd >= 0);
+
+ r = fd_nonblock(fd, true);
+ if (r < 0)
+ return log_error_errno(r, "Failed to make fd:%d nonblocking: %m", fd);
+
+/* MHD_OPTION_STRICT_FOR_CLIENT is introduced in microhttpd 0.9.54,
+ * and MHD_USE_PEDANTIC_CHECKS will be deprecated in future.
+ * If MHD_USE_PEDANTIC_CHECKS is '#define'd, then it is deprecated
+ * and we should use MHD_OPTION_STRICT_FOR_CLIENT. On the other hand,
+ * if MHD_USE_PEDANTIC_CHECKS is not '#define'd, then it is not
+ * deprecated yet and there exists an enum element with the same name.
+ * So we can safely use it. */
+#ifdef MHD_USE_PEDANTIC_CHECKS
+ opts[opts_pos++] = (struct MHD_OptionItem)
+ {MHD_OPTION_STRICT_FOR_CLIENT, 1};
+#else
+ flags |= MHD_USE_PEDANTIC_CHECKS;
+#endif
+
+ if (key) {
+ assert(cert);
+
+ opts[opts_pos++] = (struct MHD_OptionItem)
+ {MHD_OPTION_HTTPS_MEM_KEY, 0, (char*) key};
+ opts[opts_pos++] = (struct MHD_OptionItem)
+ {MHD_OPTION_HTTPS_MEM_CERT, 0, (char*) cert};
+
+ flags |= MHD_USE_TLS;
+
+ if (trust)
+ opts[opts_pos++] = (struct MHD_OptionItem)
+ {MHD_OPTION_HTTPS_MEM_TRUST, 0, (char*) trust};
+ }
+
+ d = new(MHDDaemonWrapper, 1);
+ if (!d)
+ return log_oom();
+
+ d->fd = (uint64_t) fd;
+
+ d->daemon = MHD_start_daemon(flags, 0,
+ NULL, NULL,
+ request_handler, NULL,
+ MHD_OPTION_ARRAY, opts,
+ MHD_OPTION_END);
+ if (!d->daemon) {
+ log_error("Failed to start µhttp daemon");
+ r = -EINVAL;
+ goto error;
+ }
+
+ log_debug("Started MHD %s daemon on fd:%d (wrapper @ %p)",
+ key ? "HTTPS" : "HTTP", fd, d);
+
+ info = MHD_get_daemon_info(d->daemon, MHD_DAEMON_INFO_EPOLL_FD_LINUX_ONLY);
+ if (!info) {
+ log_error("µhttp returned NULL daemon info");
+ r = -EOPNOTSUPP;
+ goto error;
+ }
+
+ epoll_fd = info->listen_fd;
+ if (epoll_fd < 0) {
+ log_error("µhttp epoll fd is invalid");
+ r = -EUCLEAN;
+ goto error;
+ }
+
+ r = sd_event_add_io(s->events, &d->io_event,
+ epoll_fd, EPOLLIN,
+ dispatch_http_event, d);
+ if (r < 0) {
+ log_error_errno(r, "Failed to add event callback: %m");
+ goto error;
+ }
+
+ r = sd_event_source_set_description(d->io_event, "io_event");
+ if (r < 0) {
+ log_error_errno(r, "Failed to set source name: %m");
+ goto error;
+ }
+
+ r = sd_event_add_time(s->events, &d->timer_event,
+ CLOCK_MONOTONIC, (uint64_t) -1, 0,
+ null_timer_event_handler, d);
+ if (r < 0) {
+ log_error_errno(r, "Failed to add timer_event: %m");
+ goto error;
+ }
+
+ r = sd_event_source_set_description(d->timer_event, "timer_event");
+ if (r < 0) {
+ log_error_errno(r, "Failed to set source name: %m");
+ goto error;
+ }
+
+ r = hashmap_ensure_allocated(&s->daemons, &uint64_hash_ops);
+ if (r < 0) {
+ log_oom();
+ goto error;
+ }
+
+ r = hashmap_put(s->daemons, &d->fd, d);
+ if (r < 0) {
+ log_error_errno(r, "Failed to add daemon to hashmap: %m");
+ goto error;
+ }
+
+ s->active++;
+ return 0;
+
+error:
+ MHD_stop_daemon(d->daemon);
+ free(d->daemon);
+ free(d);
+ return r;
+}
+
+static int setup_microhttpd_socket(RemoteServer *s,
+ const char *address,
+ const char *key,
+ const char *cert,
+ const char *trust) {
+ int fd;
+
+ fd = make_socket_fd(LOG_DEBUG, address, SOCK_STREAM, SOCK_CLOEXEC);
+ if (fd < 0)
+ return fd;
+
+ return setup_microhttpd_server(s, fd, key, cert, trust);
+}
+
+static int null_timer_event_handler(sd_event_source *timer_event,
+ uint64_t usec,
+ void *userdata) {
+ return dispatch_http_event(timer_event, 0, 0, userdata);
+}
+
+static int dispatch_http_event(sd_event_source *event,
+ int fd,
+ uint32_t revents,
+ void *userdata) {
+ MHDDaemonWrapper *d = userdata;
+ int r;
+ MHD_UNSIGNED_LONG_LONG timeout = ULONG_LONG_MAX;
+
+ assert(d);
+
+ r = MHD_run(d->daemon);
+ if (r == MHD_NO)
+ // FIXME: unregister daemon
+ return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
+ "MHD_run failed!");
+ if (MHD_get_timeout(d->daemon, &timeout) == MHD_NO)
+ timeout = ULONG_LONG_MAX;
+
+ r = sd_event_source_set_time(d->timer_event, timeout);
+ if (r < 0) {
+ log_warning_errno(r, "Unable to set event loop timeout: %m, this may result in indefinite blocking!");
+ return 1;
+ }
+
+ r = sd_event_source_set_enabled(d->timer_event, SD_EVENT_ON);
+ if (r < 0)
+ log_warning_errno(r, "Unable to enable timer_event: %m, this may result in indefinite blocking!");
+
+ return 1; /* work to do */
+}
+
+/**********************************************************************
+ **********************************************************************
+ **********************************************************************/
+
+static int setup_signals(RemoteServer *s) {
+ int r;
+
+ assert(s);
+
+ assert_se(sigprocmask_many(SIG_SETMASK, NULL, SIGINT, SIGTERM, -1) >= 0);
+
+ r = sd_event_add_signal(s->events, &s->sigterm_event, SIGTERM, NULL, s);
+ if (r < 0)
+ return r;
+
+ r = sd_event_add_signal(s->events, &s->sigint_event, SIGINT, NULL, s);
+ if (r < 0)
+ return r;
+
+ return 0;
+}
+
+static int setup_raw_socket(RemoteServer *s, const char *address) {
+ int fd;
+
+ fd = make_socket_fd(LOG_INFO, address, SOCK_STREAM, SOCK_CLOEXEC);
+ if (fd < 0)
+ return fd;
+
+ return journal_remote_add_raw_socket(s, fd);
+}
+
+static int create_remoteserver(
+ RemoteServer *s,
+ const char* key,
+ const char* cert,
+ const char* trust) {
+
+ int r, n, fd;
+ char **file;
+
+ r = journal_remote_server_init(s, arg_output, arg_split_mode, arg_compress, arg_seal);
+ if (r < 0)
+ return r;
+
+ r = setup_signals(s);
+ if (r < 0)
+ return log_error_errno(r, "Failed to set up signals: %m");
+
+ n = sd_listen_fds(true);
+ if (n < 0)
+ return log_error_errno(n, "Failed to read listening file descriptors from environment: %m");
+ else
+ log_debug("Received %d descriptors", n);
+
+ if (MAX(http_socket, https_socket) >= SD_LISTEN_FDS_START + n)
+ return log_error_errno(SYNTHETIC_ERRNO(EBADFD),
+ "Received fewer sockets than expected");
+
+ for (fd = SD_LISTEN_FDS_START; fd < SD_LISTEN_FDS_START + n; fd++) {
+ if (sd_is_socket(fd, AF_UNSPEC, 0, true)) {
+ log_debug("Received a listening socket (fd:%d)", fd);
+
+ if (fd == http_socket)
+ r = setup_microhttpd_server(s, fd, NULL, NULL, NULL);
+ else if (fd == https_socket)
+ r = setup_microhttpd_server(s, fd, key, cert, trust);
+ else
+ r = journal_remote_add_raw_socket(s, fd);
+ } else if (sd_is_socket(fd, AF_UNSPEC, 0, false)) {
+ char *hostname;
+
+ r = getpeername_pretty(fd, false, &hostname);
+ if (r < 0)
+ return log_error_errno(r, "Failed to retrieve remote name: %m");
+
+ log_debug("Received a connection socket (fd:%d) from %s", fd, hostname);
+
+ r = journal_remote_add_source(s, fd, hostname, true);
+ } else
+ return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
+ "Unknown socket passed on fd:%d", fd);
+
+ if (r < 0)
+ return log_error_errno(r, "Failed to register socket (fd:%d): %m", fd);
+ }
+
+ if (arg_getter) {
+ log_info("Spawning getter %s...", arg_getter);
+ fd = spawn_getter(arg_getter);
+ if (fd < 0)
+ return fd;
+
+ r = journal_remote_add_source(s, fd, (char*) arg_output, false);
+ if (r < 0)
+ return r;
+ }
+
+ if (arg_url) {
+ const char *url, *hostname;
+
+ if (!strstr(arg_url, "/entries")) {
+ if (endswith(arg_url, "/"))
+ url = strjoina(arg_url, "entries");
+ else
+ url = strjoina(arg_url, "/entries");
+ } else
+ url = strdupa(arg_url);
+
+ log_info("Spawning curl %s...", url);
+ fd = spawn_curl(url);
+ if (fd < 0)
+ return fd;
+
+ hostname = STARTSWITH_SET(arg_url, "https://", "http://");
+ if (!hostname)
+ hostname = arg_url;
+
+ hostname = strndupa(hostname, strcspn(hostname, "/:"));
+
+ r = journal_remote_add_source(s, fd, (char *) hostname, false);
+ if (r < 0)
+ return r;
+ }
+
+ if (arg_listen_raw) {
+ log_debug("Listening on a socket...");
+ r = setup_raw_socket(s, arg_listen_raw);
+ if (r < 0)
+ return r;
+ }
+
+ if (arg_listen_http) {
+ r = setup_microhttpd_socket(s, arg_listen_http, NULL, NULL, NULL);
+ if (r < 0)
+ return r;
+ }
+
+ if (arg_listen_https) {
+ r = setup_microhttpd_socket(s, arg_listen_https, key, cert, trust);
+ if (r < 0)
+ return r;
+ }
+
+ STRV_FOREACH(file, arg_files) {
+ const char *output_name;
+
+ if (streq(*file, "-")) {
+ log_debug("Using standard input as source.");
+
+ fd = STDIN_FILENO;
+ output_name = "stdin";
+ } else {
+ log_debug("Reading file %s...", *file);
+
+ fd = open(*file, O_RDONLY|O_CLOEXEC|O_NOCTTY|O_NONBLOCK);
+ if (fd < 0)
+ return log_error_errno(errno, "Failed to open %s: %m", *file);
+ output_name = *file;
+ }
+
+ r = journal_remote_add_source(s, fd, (char*) output_name, false);
+ if (r < 0)
+ return r;
+ }
+
+ if (s->active == 0)
+ return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
+ "Zero sources specified");
+
+ if (arg_split_mode == JOURNAL_WRITE_SPLIT_NONE) {
+ /* In this case we know what the writer will be
+ called, so we can create it and verify that we can
+ create output as expected. */
+ r = journal_remote_get_writer(s, NULL, &s->_single_writer);
+ if (r < 0)
+ return r;
+ }
+
+ return 0;
+}
+
+static int negative_fd(const char *spec) {
+ /* Return a non-positive number as its inverse, -EINVAL otherwise. */
+
+ int fd, r;
+
+ r = safe_atoi(spec, &fd);
+ if (r < 0)
+ return r;
+
+ if (fd > 0)
+ return -EINVAL;
+ else
+ return -fd;
+}
+
+static int parse_config(void) {
+ const ConfigTableItem items[] = {
+ { "Remote", "Seal", config_parse_bool, 0, &arg_seal },
+ { "Remote", "SplitMode", config_parse_write_split_mode, 0, &arg_split_mode },
+ { "Remote", "ServerKeyFile", config_parse_path, 0, &arg_key },
+ { "Remote", "ServerCertificateFile", config_parse_path, 0, &arg_cert },
+ { "Remote", "TrustedCertificateFile", config_parse_path, 0, &arg_trust },
+ {}
+ };
+
+ return config_parse_many_nulstr(PKGSYSCONFDIR "/journal-remote.conf",
+ CONF_PATHS_NULSTR("systemd/journal-remote.conf.d"),
+ "Remote\0", config_item_table_lookup, items,
+ CONFIG_PARSE_WARN, NULL);
+}
+
+static int help(void) {
+ _cleanup_free_ char *link = NULL;
+ int r;
+
+ r = terminal_urlify_man("systemd-journal-remote.service", "8", &link);
+ if (r < 0)
+ return log_oom();
+
+ printf("%s [OPTIONS...] {FILE|-}...\n\n"
+ "Write external journal events to journal file(s).\n\n"
+ " -h --help Show this help\n"
+ " --version Show package version\n"
+ " --url=URL Read events from systemd-journal-gatewayd at URL\n"
+ " --getter=COMMAND Read events from the output of COMMAND\n"
+ " --listen-raw=ADDR Listen for connections at ADDR\n"
+ " --listen-http=ADDR Listen for HTTP connections at ADDR\n"
+ " --listen-https=ADDR Listen for HTTPS connections at ADDR\n"
+ " -o --output=FILE|DIR Write output to FILE or DIR/external-*.journal\n"
+ " --compress[=BOOL] XZ-compress the output journal (default: yes)\n"
+ " --seal[=BOOL] Use event sealing (default: no)\n"
+ " --key=FILENAME SSL key in PEM format (default:\n"
+ " \"" PRIV_KEY_FILE "\")\n"
+ " --cert=FILENAME SSL certificate in PEM format (default:\n"
+ " \"" CERT_FILE "\")\n"
+ " --trust=FILENAME|all SSL CA certificate or disable checking (default:\n"
+ " \"" TRUST_FILE "\")\n"
+ " --gnutls-log=CATEGORY...\n"
+ " Specify a list of gnutls logging categories\n"
+ " --split-mode=none|host How many output files to create\n"
+ "\nNote: file descriptors from sd_listen_fds() will be consumed, too.\n"
+ "\nSee the %s for details.\n"
+ , program_invocation_short_name
+ , link
+ );
+
+ return 0;
+}
+
+static int parse_argv(int argc, char *argv[]) {
+ enum {
+ ARG_VERSION = 0x100,
+ ARG_URL,
+ ARG_LISTEN_RAW,
+ ARG_LISTEN_HTTP,
+ ARG_LISTEN_HTTPS,
+ ARG_GETTER,
+ ARG_SPLIT_MODE,
+ ARG_COMPRESS,
+ ARG_SEAL,
+ ARG_KEY,
+ ARG_CERT,
+ ARG_TRUST,
+ ARG_GNUTLS_LOG,
+ };
+
+ static const struct option options[] = {
+ { "help", no_argument, NULL, 'h' },
+ { "version", no_argument, NULL, ARG_VERSION },
+ { "url", required_argument, NULL, ARG_URL },
+ { "getter", required_argument, NULL, ARG_GETTER },
+ { "listen-raw", required_argument, NULL, ARG_LISTEN_RAW },
+ { "listen-http", required_argument, NULL, ARG_LISTEN_HTTP },
+ { "listen-https", required_argument, NULL, ARG_LISTEN_HTTPS },
+ { "output", required_argument, NULL, 'o' },
+ { "split-mode", required_argument, NULL, ARG_SPLIT_MODE },
+ { "compress", optional_argument, NULL, ARG_COMPRESS },
+ { "seal", optional_argument, NULL, ARG_SEAL },
+ { "key", required_argument, NULL, ARG_KEY },
+ { "cert", required_argument, NULL, ARG_CERT },
+ { "trust", required_argument, NULL, ARG_TRUST },
+ { "gnutls-log", required_argument, NULL, ARG_GNUTLS_LOG },
+ {}
+ };
+
+ int c, r;
+ bool type_a, type_b;
+
+ assert(argc >= 0);
+ assert(argv);
+
+ while ((c = getopt_long(argc, argv, "ho:", options, NULL)) >= 0)
+ switch(c) {
+
+ case 'h':
+ return help();
+
+ case ARG_VERSION:
+ return version();
+
+ case ARG_URL:
+ if (arg_url)
+ return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
+ "cannot currently set more than one --url");
+
+ arg_url = optarg;
+ break;
+
+ case ARG_GETTER:
+ if (arg_getter)
+ return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
+ "cannot currently use --getter more than once");
+
+ arg_getter = optarg;
+ break;
+
+ case ARG_LISTEN_RAW:
+ if (arg_listen_raw)
+ return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
+ "cannot currently use --listen-raw more than once");
+
+ arg_listen_raw = optarg;
+ break;
+
+ case ARG_LISTEN_HTTP:
+ if (arg_listen_http || http_socket >= 0)
+ return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
+ "cannot currently use --listen-http more than once");
+
+ r = negative_fd(optarg);
+ if (r >= 0)
+ http_socket = r;
+ else
+ arg_listen_http = optarg;
+ break;
+
+ case ARG_LISTEN_HTTPS:
+ if (arg_listen_https || https_socket >= 0)
+ return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
+ "cannot currently use --listen-https more than once");
+
+ r = negative_fd(optarg);
+ if (r >= 0)
+ https_socket = r;
+ else
+ arg_listen_https = optarg;
+
+ break;
+
+ case ARG_KEY:
+ if (arg_key)
+ return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
+ "Key file specified twice");
+
+ arg_key = strdup(optarg);
+ if (!arg_key)
+ return log_oom();
+
+ break;
+
+ case ARG_CERT:
+ if (arg_cert)
+ return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
+ "Certificate file specified twice");
+
+ arg_cert = strdup(optarg);
+ if (!arg_cert)
+ return log_oom();
+
+ break;
+
+ case ARG_TRUST:
+ if (arg_trust || arg_trust_all)
+ return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
+ "Confusing trusted CA configuration");
+
+ if (streq(optarg, "all"))
+ arg_trust_all = true;
+ else {
+#if HAVE_GNUTLS
+ arg_trust = strdup(optarg);
+ if (!arg_trust)
+ return log_oom();
+#else
+ return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
+ "Option --trust is not available.");
+#endif
+ }
+
+ break;
+
+ case 'o':
+ if (arg_output)
+ return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
+ "cannot use --output/-o more than once");
+
+ arg_output = optarg;
+ break;
+
+ case ARG_SPLIT_MODE:
+ arg_split_mode = journal_write_split_mode_from_string(optarg);
+ if (arg_split_mode == _JOURNAL_WRITE_SPLIT_INVALID)
+ return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
+ "Invalid split mode: %s", optarg);
+ break;
+
+ case ARG_COMPRESS:
+ if (optarg) {
+ r = parse_boolean(optarg);
+ if (r < 0)
+ return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
+ "Failed to parse --compress= parameter.");
+
+ arg_compress = !!r;
+ } else
+ arg_compress = true;
+
+ break;
+
+ case ARG_SEAL:
+ if (optarg) {
+ r = parse_boolean(optarg);
+ if (r < 0)
+ return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
+ "Failed to parse --seal= parameter.");
+
+ arg_seal = !!r;
+ } else
+ arg_seal = true;
+
+ break;
+
+ case ARG_GNUTLS_LOG: {
+#if HAVE_GNUTLS
+ const char* p = optarg;
+ for (;;) {
+ _cleanup_free_ char *word = NULL;
+
+ r = extract_first_word(&p, &word, ",", 0);
+ if (r < 0)
+ return log_error_errno(r, "Failed to parse --gnutls-log= argument: %m");
+ if (r == 0)
+ break;
+
+ if (strv_push(&arg_gnutls_log, word) < 0)
+ return log_oom();
+
+ word = NULL;
+ }
+ break;
+#else
+ return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
+ "Option --gnutls-log is not available.");
+#endif
+ }
+
+ case '?':
+ return -EINVAL;
+
+ default:
+ assert_not_reached("Unknown option code.");
+ }
+
+ if (optind < argc)
+ arg_files = argv + optind;
+
+ type_a = arg_getter || !strv_isempty(arg_files);
+ type_b = arg_url
+ || arg_listen_raw
+ || arg_listen_http || arg_listen_https
+ || sd_listen_fds(false) > 0;
+ if (type_a && type_b)
+ return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
+ "Cannot use file input or --getter with "
+ "--arg-listen-... or socket activation.");
+ if (type_a) {
+ if (!arg_output)
+ return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
+ "Option --output must be specified with file input or --getter.");
+
+ if (!IN_SET(arg_split_mode, JOURNAL_WRITE_SPLIT_NONE, _JOURNAL_WRITE_SPLIT_INVALID))
+ return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
+ "For active sources, only --split-mode=none is allowed.");
+
+ arg_split_mode = JOURNAL_WRITE_SPLIT_NONE;
+ }
+
+ if (arg_split_mode == _JOURNAL_WRITE_SPLIT_INVALID)
+ arg_split_mode = JOURNAL_WRITE_SPLIT_HOST;
+
+ if (arg_split_mode == JOURNAL_WRITE_SPLIT_NONE && arg_output) {
+ if (is_dir(arg_output, true) > 0)
+ return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
+ "For SplitMode=none, output must be a file.");
+ if (!endswith(arg_output, ".journal"))
+ return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
+ "For SplitMode=none, output file name must end with .journal.");
+ }
+
+ if (arg_split_mode == JOURNAL_WRITE_SPLIT_HOST
+ && arg_output && is_dir(arg_output, true) <= 0)
+ return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
+ "For SplitMode=host, output must be a directory.");
+
+ log_debug("Full config: SplitMode=%s Key=%s Cert=%s Trust=%s",
+ journal_write_split_mode_to_string(arg_split_mode),
+ strna(arg_key),
+ strna(arg_cert),
+ strna(arg_trust));
+
+ return 1 /* work to do */;
+}
+
+static int load_certificates(char **key, char **cert, char **trust) {
+ int r;
+
+ r = read_full_file(arg_key ?: PRIV_KEY_FILE, key, NULL);
+ if (r < 0)
+ return log_error_errno(r, "Failed to read key from file '%s': %m",
+ arg_key ?: PRIV_KEY_FILE);
+
+ r = read_full_file(arg_cert ?: CERT_FILE, cert, NULL);
+ if (r < 0)
+ return log_error_errno(r, "Failed to read certificate from file '%s': %m",
+ arg_cert ?: CERT_FILE);
+
+ if (arg_trust_all)
+ log_info("Certificate checking disabled.");
+ else {
+ r = read_full_file(arg_trust ?: TRUST_FILE, trust, NULL);
+ if (r < 0)
+ return log_error_errno(r, "Failed to read CA certificate file '%s': %m",
+ arg_trust ?: TRUST_FILE);
+ }
+
+ if ((arg_listen_raw || arg_listen_http) && *trust)
+ return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
+ "Option --trust makes all non-HTTPS connections untrusted.");
+
+ return 0;
+}
+
+static int run(int argc, char **argv) {
+ _cleanup_(notify_on_cleanup) const char *notify_message = NULL;
+ _cleanup_(journal_remote_server_destroy) RemoteServer s = {};
+ _cleanup_free_ char *key = NULL, *cert = NULL, *trust = NULL;
+ int r;
+
+ log_show_color(true);
+ log_parse_environment();
+
+ /* The journal merging logic potentially needs a lot of fds. */
+ (void) rlimit_nofile_bump(HIGH_RLIMIT_NOFILE);
+
+ r = parse_config();
+ if (r < 0)
+ return r;
+
+ r = parse_argv(argc, argv);
+ if (r <= 0)
+ return r;
+
+ if (arg_listen_http || arg_listen_https) {
+ r = setup_gnutls_logger(arg_gnutls_log);
+ if (r < 0)
+ return r;
+ }
+
+ if (arg_listen_https || https_socket >= 0) {
+ r = load_certificates(&key, &cert, &trust);
+ if (r < 0)
+ return r;
+
+ s.check_trust = !arg_trust_all;
+ }
+
+ r = create_remoteserver(&s, key, cert, trust);
+ if (r < 0)
+ return r;
+
+ r = sd_event_set_watchdog(s.events, true);
+ if (r < 0)
+ return log_error_errno(r, "Failed to enable watchdog: %m");
+
+ log_debug("Watchdog is %sd.", enable_disable(r > 0));
+
+ log_debug("%s running as pid "PID_FMT,
+ program_invocation_short_name, getpid_cached());
+
+ notify_message = notify_start(NOTIFY_READY, NOTIFY_STOPPING);
+
+ while (s.active) {
+ r = sd_event_get_state(s.events);
+ if (r < 0)
+ return r;
+ if (r == SD_EVENT_FINISHED)
+ break;
+
+ r = sd_event_run(s.events, -1);
+ if (r < 0)
+ return log_error_errno(r, "Failed to run event loop: %m");
+ }
+
+ notify_message = NULL;
+ (void) sd_notifyf(false,
+ "STOPPING=1\n"
+ "STATUS=Shutting down after writing %" PRIu64 " entries...", s.event_count);
+
+ log_info("Finishing after writing %" PRIu64 " entries", s.event_count);
+
+ return 0;
+}
+
+DEFINE_MAIN_FUNCTION(run);
diff --git a/src/journal-remote/journal-remote-parse.c b/src/journal-remote/journal-remote-parse.c
new file mode 100644
index 0000000..535d06a
--- /dev/null
+++ b/src/journal-remote/journal-remote-parse.c
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+
+#include "alloc-util.h"
+#include "fd-util.h"
+#include "journal-remote-parse.h"
+#include "journald-native.h"
+#include "parse-util.h"
+#include "string-util.h"
+
+void source_free(RemoteSource *source) {
+ if (!source)
+ return;
+
+ journal_importer_cleanup(&source->importer);
+
+ log_debug("Writer ref count %i", source->writer->n_ref);
+ writer_unref(source->writer);
+
+ sd_event_source_unref(source->event);
+ sd_event_source_unref(source->buffer_event);
+
+ free(source);
+}
+
+/**
+ * Initialize zero-filled source with given values. On success, takes
+ * ownership of fd, name, and writer, otherwise does not touch them.
+ */
+RemoteSource* source_new(int fd, bool passive_fd, char *name, Writer *writer) {
+ RemoteSource *source;
+
+ log_debug("Creating source for %sfd:%d (%s)",
+ passive_fd ? "passive " : "", fd, name);
+
+ assert(fd >= 0);
+
+ source = new0(RemoteSource, 1);
+ if (!source)
+ return NULL;
+
+ source->importer.fd = fd;
+ source->importer.passive_fd = passive_fd;
+ source->importer.name = name;
+
+ source->writer = writer;
+
+ return source;
+}
+
+int process_source(RemoteSource *source, bool compress, bool seal) {
+ int r;
+
+ assert(source);
+ assert(source->writer);
+
+ r = journal_importer_process_data(&source->importer);
+ if (r <= 0)
+ return r;
+
+ /* We have a full event */
+ log_trace("Received full event from source@%p fd:%d (%s)",
+ source, source->importer.fd, source->importer.name);
+
+ if (source->importer.iovw.count == 0) {
+ log_warning("Entry with no payload, skipping");
+ goto freeing;
+ }
+
+ assert(source->importer.iovw.iovec);
+
+ r = writer_write(source->writer, &source->importer.iovw, &source->importer.ts, compress, seal);
+ if (r == -EBADMSG) {
+ log_error_errno(r, "Entry is invalid, ignoring.");
+ r = 0;
+ } else if (r < 0)
+ log_error_errno(r, "Failed to write entry of %zu bytes: %m",
+ iovw_size(&source->importer.iovw));
+ else
+ r = 1;
+
+ freeing:
+ journal_importer_drop_iovw(&source->importer);
+ return r;
+}
diff --git a/src/journal-remote/journal-remote-parse.h b/src/journal-remote/journal-remote-parse.h
new file mode 100644
index 0000000..d6c7736
--- /dev/null
+++ b/src/journal-remote/journal-remote-parse.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+#pragma once
+
+#include "sd-event.h"
+
+#include "journal-importer.h"
+#include "journal-remote-write.h"
+
+typedef struct RemoteSource {
+ JournalImporter importer;
+
+ Writer *writer;
+
+ sd_event_source *event;
+ sd_event_source *buffer_event;
+} RemoteSource;
+
+RemoteSource* source_new(int fd, bool passive_fd, char *name, Writer *writer);
+void source_free(RemoteSource *source);
+int process_source(RemoteSource *source, bool compress, bool seal);
diff --git a/src/journal-remote/journal-remote-write.c b/src/journal-remote/journal-remote-write.c
new file mode 100644
index 0000000..188ff35
--- /dev/null
+++ b/src/journal-remote/journal-remote-write.c
@@ -0,0 +1,105 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+
+#include "alloc-util.h"
+#include "journal-remote.h"
+
+static int do_rotate(JournalFile **f, bool compress, bool seal) {
+ int r = journal_file_rotate(f, compress, (uint64_t) -1, seal, NULL);
+ if (r < 0) {
+ if (*f)
+ log_error_errno(r, "Failed to rotate %s: %m", (*f)->path);
+ else
+ log_error_errno(r, "Failed to create rotated journal: %m");
+ }
+
+ return r;
+}
+
+Writer* writer_new(RemoteServer *server) {
+ Writer *w;
+
+ w = new0(Writer, 1);
+ if (!w)
+ return NULL;
+
+ memset(&w->metrics, 0xFF, sizeof(w->metrics));
+
+ w->mmap = mmap_cache_new();
+ if (!w->mmap)
+ return mfree(w);
+
+ w->n_ref = 1;
+ w->server = server;
+
+ return w;
+}
+
+static Writer* writer_free(Writer *w) {
+ if (!w)
+ return NULL;
+
+ if (w->journal) {
+ log_debug("Closing journal file %s.", w->journal->path);
+ journal_file_close(w->journal);
+ }
+
+ if (w->server && w->hashmap_key)
+ hashmap_remove(w->server->writers, w->hashmap_key);
+
+ free(w->hashmap_key);
+
+ if (w->mmap)
+ mmap_cache_unref(w->mmap);
+
+ return mfree(w);
+}
+
+DEFINE_TRIVIAL_REF_UNREF_FUNC(Writer, writer, writer_free);
+
+int writer_write(Writer *w,
+ struct iovec_wrapper *iovw,
+ dual_timestamp *ts,
+ bool compress,
+ bool seal) {
+ int r;
+
+ assert(w);
+ assert(iovw);
+ assert(iovw->count > 0);
+
+ if (journal_file_rotate_suggested(w->journal, 0)) {
+ log_info("%s: Journal header limits reached or header out-of-date, rotating",
+ w->journal->path);
+ r = do_rotate(&w->journal, compress, seal);
+ if (r < 0)
+ return r;
+ }
+
+ r = journal_file_append_entry(w->journal, ts, NULL,
+ iovw->iovec, iovw->count,
+ &w->seqnum, NULL, NULL);
+ if (r >= 0) {
+ if (w->server)
+ w->server->event_count += 1;
+ return 0;
+ } else if (r == -EBADMSG)
+ return r;
+
+ log_debug_errno(r, "%s: Write failed, rotating: %m", w->journal->path);
+ r = do_rotate(&w->journal, compress, seal);
+ if (r < 0)
+ return r;
+ else
+ log_debug("%s: Successfully rotated journal", w->journal->path);
+
+ log_debug("Retrying write.");
+ r = journal_file_append_entry(w->journal, ts, NULL,
+ iovw->iovec, iovw->count,
+ &w->seqnum, NULL, NULL);
+ if (r < 0)
+ return r;
+
+ if (w->server)
+ w->server->event_count += 1;
+ return 0;
+}
diff --git a/src/journal-remote/journal-remote-write.h b/src/journal-remote/journal-remote-write.h
new file mode 100644
index 0000000..e445859
--- /dev/null
+++ b/src/journal-remote/journal-remote-write.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+#pragma once
+
+#include "journal-file.h"
+#include "journal-importer.h"
+
+typedef struct RemoteServer RemoteServer;
+
+typedef struct Writer {
+ JournalFile *journal;
+ JournalMetrics metrics;
+
+ MMapCache *mmap;
+ RemoteServer *server;
+ char *hashmap_key;
+
+ uint64_t seqnum;
+
+ unsigned n_ref;
+} Writer;
+
+Writer* writer_new(RemoteServer* server);
+Writer* writer_ref(Writer *w);
+Writer* writer_unref(Writer *w);
+
+DEFINE_TRIVIAL_CLEANUP_FUNC(Writer*, writer_unref);
+
+int writer_write(Writer *s,
+ struct iovec_wrapper *iovw,
+ dual_timestamp *ts,
+ bool compress,
+ bool seal);
+
+typedef enum JournalWriteSplitMode {
+ JOURNAL_WRITE_SPLIT_NONE,
+ JOURNAL_WRITE_SPLIT_HOST,
+ _JOURNAL_WRITE_SPLIT_MAX,
+ _JOURNAL_WRITE_SPLIT_INVALID = -1
+} JournalWriteSplitMode;
diff --git a/src/journal-remote/journal-remote.c b/src/journal-remote/journal-remote.c
new file mode 100644
index 0000000..1da32c5
--- /dev/null
+++ b/src/journal-remote/journal-remote.c
@@ -0,0 +1,533 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/prctl.h>
+#include <sys/socket.h>
+#include <stdint.h>
+
+#include "sd-daemon.h"
+
+#include "alloc-util.h"
+#include "def.h"
+#include "escape.h"
+#include "fd-util.h"
+#include "journal-file.h"
+#include "journal-remote-write.h"
+#include "journal-remote.h"
+#include "journald-native.h"
+#include "macro.h"
+#include "parse-util.h"
+#include "process-util.h"
+#include "socket-util.h"
+#include "stdio-util.h"
+#include "string-util.h"
+#include "strv.h"
+
+#define REMOTE_JOURNAL_PATH "/var/log/journal/remote"
+
+#define filename_escape(s) xescape((s), "/ ")
+
+static int open_output(RemoteServer *s, Writer *w, const char* host) {
+ _cleanup_free_ char *_filename = NULL;
+ const char *filename;
+ int r;
+
+ switch (s->split_mode) {
+ case JOURNAL_WRITE_SPLIT_NONE:
+ filename = s->output;
+ break;
+
+ case JOURNAL_WRITE_SPLIT_HOST: {
+ _cleanup_free_ char *name;
+
+ assert(host);
+
+ name = filename_escape(host);
+ if (!name)
+ return log_oom();
+
+ r = asprintf(&_filename, "%s/remote-%s.journal", s->output, name);
+ if (r < 0)
+ return log_oom();
+
+ filename = _filename;
+ break;
+ }
+
+ default:
+ assert_not_reached("what?");
+ }
+
+ r = journal_file_open_reliably(filename,
+ O_RDWR|O_CREAT, 0640,
+ s->compress, (uint64_t) -1, s->seal,
+ &w->metrics,
+ w->mmap, NULL,
+ NULL, &w->journal);
+ if (r < 0)
+ return log_error_errno(r, "Failed to open output journal %s: %m", filename);
+
+ log_debug("Opened output file %s", w->journal->path);
+ return 0;
+}
+
+/**********************************************************************
+ **********************************************************************
+ **********************************************************************/
+
+static int init_writer_hashmap(RemoteServer *s) {
+ static const struct hash_ops* const hash_ops[] = {
+ [JOURNAL_WRITE_SPLIT_NONE] = NULL,
+ [JOURNAL_WRITE_SPLIT_HOST] = &string_hash_ops,
+ };
+
+ assert(s);
+ assert(s->split_mode >= 0 && s->split_mode < (int) ELEMENTSOF(hash_ops));
+
+ s->writers = hashmap_new(hash_ops[s->split_mode]);
+ if (!s->writers)
+ return log_oom();
+
+ return 0;
+}
+
+int journal_remote_get_writer(RemoteServer *s, const char *host, Writer **writer) {
+ _cleanup_(writer_unrefp) Writer *w = NULL;
+ const void *key;
+ int r;
+
+ switch(s->split_mode) {
+ case JOURNAL_WRITE_SPLIT_NONE:
+ key = "one and only";
+ break;
+
+ case JOURNAL_WRITE_SPLIT_HOST:
+ assert(host);
+ key = host;
+ break;
+
+ default:
+ assert_not_reached("what split mode?");
+ }
+
+ w = hashmap_get(s->writers, key);
+ if (w)
+ writer_ref(w);
+ else {
+ w = writer_new(s);
+ if (!w)
+ return log_oom();
+
+ if (s->split_mode == JOURNAL_WRITE_SPLIT_HOST) {
+ w->hashmap_key = strdup(key);
+ if (!w->hashmap_key)
+ return log_oom();
+ }
+
+ r = open_output(s, w, host);
+ if (r < 0)
+ return r;
+
+ r = hashmap_put(s->writers, w->hashmap_key ?: key, w);
+ if (r < 0)
+ return r;
+ }
+
+ *writer = TAKE_PTR(w);
+
+ return 0;
+}
+
+/**********************************************************************
+ **********************************************************************
+ **********************************************************************/
+
+/* This should go away as soon as µhttpd allows state to be passed around. */
+RemoteServer *journal_remote_server_global;
+
+static int dispatch_raw_source_event(sd_event_source *event,
+ int fd,
+ uint32_t revents,
+ void *userdata);
+static int dispatch_raw_source_until_block(sd_event_source *event,
+ void *userdata);
+static int dispatch_blocking_source_event(sd_event_source *event,
+ void *userdata);
+static int dispatch_raw_connection_event(sd_event_source *event,
+ int fd,
+ uint32_t revents,
+ void *userdata);
+
+static int get_source_for_fd(RemoteServer *s,
+ int fd, char *name, RemoteSource **source) {
+ Writer *writer;
+ int r;
+
+ /* This takes ownership of name, but only on success. */
+
+ assert(fd >= 0);
+ assert(source);
+
+ if (!GREEDY_REALLOC0(s->sources, s->sources_size, fd + 1))
+ return log_oom();
+
+ r = journal_remote_get_writer(s, name, &writer);
+ if (r < 0)
+ return log_warning_errno(r, "Failed to get writer for source %s: %m",
+ name);
+
+ if (s->sources[fd] == NULL) {
+ s->sources[fd] = source_new(fd, false, name, writer);
+ if (!s->sources[fd]) {
+ writer_unref(writer);
+ return log_oom();
+ }
+
+ s->active++;
+ }
+
+ *source = s->sources[fd];
+ return 0;
+}
+
+static int remove_source(RemoteServer *s, int fd) {
+ RemoteSource *source;
+
+ assert(s);
+ assert(fd >= 0 && fd < (ssize_t) s->sources_size);
+
+ source = s->sources[fd];
+ if (source) {
+ /* this closes fd too */
+ source_free(source);
+ s->sources[fd] = NULL;
+ s->active--;
+ }
+
+ return 0;
+}
+
+int journal_remote_add_source(RemoteServer *s, int fd, char* name, bool own_name) {
+ RemoteSource *source = NULL;
+ int r;
+
+ /* This takes ownership of name, even on failure, if own_name is true. */
+
+ assert(s);
+ assert(fd >= 0);
+ assert(name);
+
+ if (!own_name) {
+ name = strdup(name);
+ if (!name)
+ return log_oom();
+ }
+
+ r = get_source_for_fd(s, fd, name, &source);
+ if (r < 0) {
+ log_error_errno(r, "Failed to create source for fd:%d (%s): %m",
+ fd, name);
+ free(name);
+ return r;
+ }
+
+ r = sd_event_add_io(s->events, &source->event,
+ fd, EPOLLIN|EPOLLRDHUP|EPOLLPRI,
+ dispatch_raw_source_event, source);
+ if (r == 0) {
+ /* Add additional source for buffer processing. It will be
+ * enabled later. */
+ r = sd_event_add_defer(s->events, &source->buffer_event,
+ dispatch_raw_source_until_block, source);
+ if (r == 0)
+ sd_event_source_set_enabled(source->buffer_event, SD_EVENT_OFF);
+ } else if (r == -EPERM) {
+ log_debug("Falling back to sd_event_add_defer for fd:%d (%s)", fd, name);
+ r = sd_event_add_defer(s->events, &source->event,
+ dispatch_blocking_source_event, source);
+ if (r == 0)
+ sd_event_source_set_enabled(source->event, SD_EVENT_ON);
+ }
+ if (r < 0) {
+ log_error_errno(r, "Failed to register event source for fd:%d: %m",
+ fd);
+ goto error;
+ }
+
+ r = sd_event_source_set_description(source->event, name);
+ if (r < 0) {
+ log_error_errno(r, "Failed to set source name for fd:%d: %m", fd);
+ goto error;
+ }
+
+ return 1; /* work to do */
+
+ error:
+ remove_source(s, fd);
+ return r;
+}
+
+int journal_remote_add_raw_socket(RemoteServer *s, int fd) {
+ int r;
+ _cleanup_close_ int fd_ = fd;
+ char name[STRLEN("raw-socket-") + DECIMAL_STR_MAX(int) + 1];
+
+ assert(fd >= 0);
+
+ r = sd_event_add_io(s->events, &s->listen_event,
+ fd, EPOLLIN,
+ dispatch_raw_connection_event, s);
+ if (r < 0)
+ return r;
+
+ xsprintf(name, "raw-socket-%d", fd);
+
+ r = sd_event_source_set_description(s->listen_event, name);
+ if (r < 0)
+ return r;
+
+ fd_ = -1;
+ s->active++;
+ return 0;
+}
+
+/**********************************************************************
+ **********************************************************************
+ **********************************************************************/
+
+int journal_remote_server_init(
+ RemoteServer *s,
+ const char *output,
+ JournalWriteSplitMode split_mode,
+ bool compress,
+ bool seal) {
+
+ int r;
+
+ assert(s);
+
+ assert(journal_remote_server_global == NULL);
+ journal_remote_server_global = s;
+
+ s->split_mode = split_mode;
+ s->compress = compress;
+ s->seal = seal;
+
+ if (output)
+ s->output = output;
+ else if (split_mode == JOURNAL_WRITE_SPLIT_NONE)
+ s->output = REMOTE_JOURNAL_PATH "/remote.journal";
+ else if (split_mode == JOURNAL_WRITE_SPLIT_HOST)
+ s->output = REMOTE_JOURNAL_PATH;
+ else
+ assert_not_reached("bad split mode");
+
+ r = sd_event_default(&s->events);
+ if (r < 0)
+ return log_error_errno(r, "Failed to allocate event loop: %m");
+
+ r = init_writer_hashmap(s);
+ if (r < 0)
+ return r;
+
+ return 0;
+}
+
+#if HAVE_MICROHTTPD
+static void MHDDaemonWrapper_free(MHDDaemonWrapper *d) {
+ MHD_stop_daemon(d->daemon);
+ sd_event_source_unref(d->io_event);
+ sd_event_source_unref(d->timer_event);
+ free(d);
+}
+#endif
+
+void journal_remote_server_destroy(RemoteServer *s) {
+ size_t i;
+
+#if HAVE_MICROHTTPD
+ hashmap_free_with_destructor(s->daemons, MHDDaemonWrapper_free);
+#endif
+
+ assert(s->sources_size == 0 || s->sources);
+ for (i = 0; i < s->sources_size; i++)
+ remove_source(s, i);
+ free(s->sources);
+
+ writer_unref(s->_single_writer);
+ hashmap_free(s->writers);
+
+ sd_event_source_unref(s->sigterm_event);
+ sd_event_source_unref(s->sigint_event);
+ sd_event_source_unref(s->listen_event);
+ sd_event_unref(s->events);
+
+ if (s == journal_remote_server_global)
+ journal_remote_server_global = NULL;
+
+ /* fds that we're listening on remain open... */
+}
+
+/**********************************************************************
+ **********************************************************************
+ **********************************************************************/
+
+int journal_remote_handle_raw_source(
+ sd_event_source *event,
+ int fd,
+ uint32_t revents,
+ RemoteServer *s) {
+
+ RemoteSource *source;
+ int r;
+
+ /* Returns 1 if there might be more data pending,
+ * 0 if data is currently exhausted, negative on error.
+ */
+
+ assert(fd >= 0 && fd < (ssize_t) s->sources_size);
+ source = s->sources[fd];
+ assert(source->importer.fd == fd);
+
+ r = process_source(source, s->compress, s->seal);
+ if (journal_importer_eof(&source->importer)) {
+ size_t remaining;
+
+ log_debug("EOF reached with source %s (fd=%d)",
+ source->importer.name, source->importer.fd);
+
+ remaining = journal_importer_bytes_remaining(&source->importer);
+ if (remaining > 0)
+ log_notice("Premature EOF. %zu bytes lost.", remaining);
+ remove_source(s, source->importer.fd);
+ log_debug("%zu active sources remaining", s->active);
+ return 0;
+ } else if (r == -E2BIG) {
+ log_notice("Entry with too many fields, skipped");
+ return 1;
+ } else if (r == -ENOBUFS) {
+ log_notice("Entry too big, skipped");
+ return 1;
+ } else if (r == -EAGAIN) {
+ return 0;
+ } else if (r < 0) {
+ log_debug_errno(r, "Closing connection: %m");
+ remove_source(s, fd);
+ return 0;
+ } else
+ return 1;
+}
+
+static int dispatch_raw_source_until_block(sd_event_source *event,
+ void *userdata) {
+ RemoteSource *source = userdata;
+ int r;
+
+ /* Make sure event stays around even if source is destroyed */
+ sd_event_source_ref(event);
+
+ r = journal_remote_handle_raw_source(event, source->importer.fd, EPOLLIN, journal_remote_server_global);
+ if (r != 1)
+ /* No more data for now */
+ sd_event_source_set_enabled(event, SD_EVENT_OFF);
+
+ sd_event_source_unref(event);
+
+ return r;
+}
+
+static int dispatch_raw_source_event(sd_event_source *event,
+ int fd,
+ uint32_t revents,
+ void *userdata) {
+ RemoteSource *source = userdata;
+ int r;
+
+ assert(source->event);
+ assert(source->buffer_event);
+
+ r = journal_remote_handle_raw_source(event, fd, EPOLLIN, journal_remote_server_global);
+ if (r == 1)
+ /* Might have more data. We need to rerun the handler
+ * until we are sure the buffer is exhausted. */
+ sd_event_source_set_enabled(source->buffer_event, SD_EVENT_ON);
+
+ return r;
+}
+
+static int dispatch_blocking_source_event(sd_event_source *event,
+ void *userdata) {
+ RemoteSource *source = userdata;
+
+ return journal_remote_handle_raw_source(event, source->importer.fd, EPOLLIN, journal_remote_server_global);
+}
+
+static int accept_connection(const char* type, int fd,
+ SocketAddress *addr, char **hostname) {
+ int fd2, r;
+
+ log_debug("Accepting new %s connection on fd:%d", type, fd);
+ fd2 = accept4(fd, &addr->sockaddr.sa, &addr->size, SOCK_NONBLOCK|SOCK_CLOEXEC);
+ if (fd2 < 0)
+ return log_error_errno(errno, "accept() on fd:%d failed: %m", fd);
+
+ switch(socket_address_family(addr)) {
+ case AF_INET:
+ case AF_INET6: {
+ _cleanup_free_ char *a = NULL;
+ char *b;
+
+ r = socket_address_print(addr, &a);
+ if (r < 0) {
+ log_error_errno(r, "socket_address_print(): %m");
+ close(fd2);
+ return r;
+ }
+
+ r = socknameinfo_pretty(&addr->sockaddr, addr->size, &b);
+ if (r < 0) {
+ log_error_errno(r, "Resolving hostname failed: %m");
+ close(fd2);
+ return r;
+ }
+
+ log_debug("Accepted %s %s connection from %s",
+ type,
+ socket_address_family(addr) == AF_INET ? "IP" : "IPv6",
+ a);
+
+ *hostname = b;
+
+ return fd2;
+ };
+ default:
+ log_error("Rejected %s connection with unsupported family %d",
+ type, socket_address_family(addr));
+ close(fd2);
+
+ return -EINVAL;
+ }
+}
+
+static int dispatch_raw_connection_event(sd_event_source *event,
+ int fd,
+ uint32_t revents,
+ void *userdata) {
+ RemoteServer *s = userdata;
+ int fd2;
+ SocketAddress addr = {
+ .size = sizeof(union sockaddr_union),
+ .type = SOCK_STREAM,
+ };
+ char *hostname = NULL;
+
+ fd2 = accept_connection("raw", fd, &addr, &hostname);
+ if (fd2 < 0)
+ return fd2;
+
+ return journal_remote_add_source(s, fd2, hostname, true);
+}
diff --git a/src/journal-remote/journal-remote.conf.in b/src/journal-remote/journal-remote.conf.in
new file mode 100644
index 0000000..edc3aba
--- /dev/null
+++ b/src/journal-remote/journal-remote.conf.in
@@ -0,0 +1,19 @@
+# This file is part of systemd.
+#
+# systemd is free software; you can redistribute it and/or modify it
+# under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation; either version 2.1 of the License, or
+# (at your option) any later version.
+#
+# Entries in this file show the compile time defaults.
+# You can change settings by editing this file.
+# Defaults can be restored by simply deleting this file.
+#
+# See journal-remote.conf(5) for details
+
+[Remote]
+# Seal=false
+# SplitMode=host
+# ServerKeyFile=@CERTIFICATEROOT@/private/journal-remote.pem
+# ServerCertificateFile=@CERTIFICATEROOT@/certs/journal-remote.pem
+# TrustedCertificateFile=@CERTIFICATEROOT@/ca/trusted.pem
diff --git a/src/journal-remote/journal-remote.h b/src/journal-remote/journal-remote.h
new file mode 100644
index 0000000..4c25d43
--- /dev/null
+++ b/src/journal-remote/journal-remote.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+#pragma once
+
+#include "sd-event.h"
+
+#include "hashmap.h"
+#include "journal-remote-parse.h"
+#include "journal-remote-write.h"
+
+#if HAVE_MICROHTTPD
+#include "microhttpd-util.h"
+
+typedef struct MHDDaemonWrapper MHDDaemonWrapper;
+
+struct MHDDaemonWrapper {
+ uint64_t fd;
+ struct MHD_Daemon *daemon;
+
+ sd_event_source *io_event;
+ sd_event_source *timer_event;
+};
+#endif
+
+struct RemoteServer {
+ RemoteSource **sources;
+ size_t sources_size;
+ size_t active;
+
+ sd_event *events;
+ sd_event_source *sigterm_event, *sigint_event, *listen_event;
+
+ Hashmap *writers;
+ Writer *_single_writer;
+ uint64_t event_count;
+
+#if HAVE_MICROHTTPD
+ Hashmap *daemons;
+#endif
+ const char *output; /* either the output file or directory */
+
+ JournalWriteSplitMode split_mode;
+ bool compress;
+ bool seal;
+ bool check_trust;
+};
+extern RemoteServer *journal_remote_server_global;
+
+int journal_remote_server_init(
+ RemoteServer *s,
+ const char *output,
+ JournalWriteSplitMode split_mode,
+ bool compress,
+ bool seal);
+
+int journal_remote_get_writer(RemoteServer *s, const char *host, Writer **writer);
+
+int journal_remote_add_source(RemoteServer *s, int fd, char* name, bool own_name);
+int journal_remote_add_raw_socket(RemoteServer *s, int fd);
+int journal_remote_handle_raw_source(
+ sd_event_source *event,
+ int fd,
+ uint32_t revents,
+ RemoteServer *s);
+
+void journal_remote_server_destroy(RemoteServer *s);
diff --git a/src/journal-remote/journal-upload-journal.c b/src/journal-remote/journal-upload-journal.c
new file mode 100644
index 0000000..7d7e738
--- /dev/null
+++ b/src/journal-remote/journal-upload-journal.c
@@ -0,0 +1,414 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+
+#include <curl/curl.h>
+#include <stdbool.h>
+
+#include "sd-daemon.h"
+
+#include "alloc-util.h"
+#include "journal-upload.h"
+#include "log.h"
+#include "string-util.h"
+#include "utf8.h"
+#include "util.h"
+
+/**
+ * Write up to size bytes to buf. Return negative on error, and number of
+ * bytes written otherwise. The last case is a kind of an error too.
+ */
+static ssize_t write_entry(char *buf, size_t size, Uploader *u) {
+ int r;
+ size_t pos = 0;
+
+ assert(size <= SSIZE_MAX);
+
+ for (;;) {
+
+ switch(u->entry_state) {
+ case ENTRY_CURSOR: {
+ u->current_cursor = mfree(u->current_cursor);
+
+ r = sd_journal_get_cursor(u->journal, &u->current_cursor);
+ if (r < 0)
+ return log_error_errno(r, "Failed to get cursor: %m");
+
+ r = snprintf(buf + pos, size - pos,
+ "__CURSOR=%s\n", u->current_cursor);
+ assert(r >= 0);
+ if ((size_t) r > size - pos)
+ /* not enough space */
+ return pos;
+
+ u->entry_state++;
+
+ if (pos + r == size) {
+ /* exactly one character short, but we don't need it */
+ buf[size - 1] = '\n';
+ return size;
+ }
+
+ pos += r;
+ }
+ _fallthrough_;
+ case ENTRY_REALTIME: {
+ usec_t realtime;
+
+ r = sd_journal_get_realtime_usec(u->journal, &realtime);
+ if (r < 0)
+ return log_error_errno(r, "Failed to get realtime timestamp: %m");
+
+ r = snprintf(buf + pos, size - pos,
+ "__REALTIME_TIMESTAMP="USEC_FMT"\n", realtime);
+ assert(r >= 0);
+ if ((size_t) r > size - pos)
+ /* not enough space */
+ return pos;
+
+ u->entry_state++;
+
+ if (r + pos == size) {
+ /* exactly one character short, but we don't need it */
+ buf[size - 1] = '\n';
+ return size;
+ }
+
+ pos += r;
+ }
+ _fallthrough_;
+ case ENTRY_MONOTONIC: {
+ usec_t monotonic;
+ sd_id128_t boot_id;
+
+ r = sd_journal_get_monotonic_usec(u->journal, &monotonic, &boot_id);
+ if (r < 0)
+ return log_error_errno(r, "Failed to get monotonic timestamp: %m");
+
+ r = snprintf(buf + pos, size - pos,
+ "__MONOTONIC_TIMESTAMP="USEC_FMT"\n", monotonic);
+ assert(r >= 0);
+ if ((size_t) r > size - pos)
+ /* not enough space */
+ return pos;
+
+ u->entry_state++;
+
+ if (r + pos == size) {
+ /* exactly one character short, but we don't need it */
+ buf[size - 1] = '\n';
+ return size;
+ }
+
+ pos += r;
+ }
+ _fallthrough_;
+ case ENTRY_BOOT_ID: {
+ sd_id128_t boot_id;
+ char sid[33];
+
+ r = sd_journal_get_monotonic_usec(u->journal, NULL, &boot_id);
+ if (r < 0)
+ return log_error_errno(r, "Failed to get monotonic timestamp: %m");
+
+ r = snprintf(buf + pos, size - pos,
+ "_BOOT_ID=%s\n", sd_id128_to_string(boot_id, sid));
+ assert(r >= 0);
+ if ((size_t) r > size - pos)
+ /* not enough space */
+ return pos;
+
+ u->entry_state++;
+
+ if (r + pos == size) {
+ /* exactly one character short, but we don't need it */
+ buf[size - 1] = '\n';
+ return size;
+ }
+
+ pos += r;
+ }
+ _fallthrough_;
+ case ENTRY_NEW_FIELD: {
+ u->field_pos = 0;
+
+ r = sd_journal_enumerate_data(u->journal,
+ &u->field_data,
+ &u->field_length);
+ if (r < 0)
+ return log_error_errno(r, "Failed to move to next field in entry: %m");
+ else if (r == 0) {
+ u->entry_state = ENTRY_OUTRO;
+ continue;
+ }
+
+ /* We already printed the boot id from the data in
+ * the header, hence let's suppress it here */
+ if (memory_startswith(u->field_data, u->field_length, "_BOOT_ID="))
+ continue;
+
+ if (!utf8_is_printable_newline(u->field_data, u->field_length, false)) {
+ u->entry_state = ENTRY_BINARY_FIELD_START;
+ continue;
+ }
+
+ u->entry_state++;
+ }
+ _fallthrough_;
+ case ENTRY_TEXT_FIELD:
+ case ENTRY_BINARY_FIELD: {
+ bool done;
+ size_t tocopy;
+
+ done = size - pos > u->field_length - u->field_pos;
+ if (done)
+ tocopy = u->field_length - u->field_pos;
+ else
+ tocopy = size - pos;
+
+ memcpy(buf + pos,
+ (char*) u->field_data + u->field_pos,
+ tocopy);
+
+ if (done) {
+ buf[pos + tocopy] = '\n';
+ pos += tocopy + 1;
+ u->entry_state = ENTRY_NEW_FIELD;
+ continue;
+ } else {
+ u->field_pos += tocopy;
+ return size;
+ }
+ }
+
+ case ENTRY_BINARY_FIELD_START: {
+ const char *c;
+ size_t len;
+
+ c = memchr(u->field_data, '=', u->field_length);
+ if (!c || c == u->field_data)
+ return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
+ "Invalid field.");
+
+ len = c - (const char*)u->field_data;
+
+ /* need space for label + '\n' */
+ if (size - pos < len + 1)
+ return pos;
+
+ memcpy(buf + pos, u->field_data, len);
+ buf[pos + len] = '\n';
+ pos += len + 1;
+
+ u->field_pos = len + 1;
+ u->entry_state++;
+ }
+ _fallthrough_;
+ case ENTRY_BINARY_FIELD_SIZE: {
+ uint64_t le64;
+
+ /* need space for uint64_t */
+ if (size - pos < 8)
+ return pos;
+
+ le64 = htole64(u->field_length - u->field_pos);
+ memcpy(buf + pos, &le64, 8);
+ pos += 8;
+
+ u->entry_state++;
+ continue;
+ }
+
+ case ENTRY_OUTRO:
+ /* need space for '\n' */
+ if (size - pos < 1)
+ return pos;
+
+ buf[pos++] = '\n';
+ u->entry_state++;
+ u->entries_sent++;
+
+ return pos;
+
+ default:
+ assert_not_reached("WTF?");
+ }
+ }
+ assert_not_reached("WTF?");
+}
+
+static void check_update_watchdog(Uploader *u) {
+ usec_t after;
+ usec_t elapsed_time;
+
+ if (u->watchdog_usec <= 0)
+ return;
+
+ after = now(CLOCK_MONOTONIC);
+ elapsed_time = usec_sub_unsigned(after, u->watchdog_timestamp);
+ if (elapsed_time > u->watchdog_usec / 2) {
+ log_debug("Update watchdog timer");
+ sd_notify(false, "WATCHDOG=1");
+ u->watchdog_timestamp = after;
+ }
+}
+
+static size_t journal_input_callback(void *buf, size_t size, size_t nmemb, void *userp) {
+ Uploader *u = userp;
+ int r;
+ sd_journal *j;
+ size_t filled = 0;
+ ssize_t w;
+
+ assert(u);
+ assert(nmemb <= SSIZE_MAX / size);
+
+ check_update_watchdog(u);
+
+ j = u->journal;
+
+ while (j && filled < size * nmemb) {
+ if (u->entry_state == ENTRY_DONE) {
+ r = sd_journal_next(j);
+ if (r < 0) {
+ log_error_errno(r, "Failed to move to next entry in journal: %m");
+ return CURL_READFUNC_ABORT;
+ } else if (r == 0) {
+ if (u->input_event)
+ log_debug("No more entries, waiting for journal.");
+ else {
+ log_info("No more entries, closing journal.");
+ close_journal_input(u);
+ }
+
+ u->uploading = false;
+
+ break;
+ }
+
+ u->entry_state = ENTRY_CURSOR;
+ }
+
+ w = write_entry((char*)buf + filled, size * nmemb - filled, u);
+ if (w < 0)
+ return CURL_READFUNC_ABORT;
+ filled += w;
+
+ if (filled == 0) {
+ log_error("Buffer space is too small to write entry.");
+ return CURL_READFUNC_ABORT;
+ } else if (u->entry_state != ENTRY_DONE)
+ /* This means that all available space was used up */
+ break;
+
+ log_debug("Entry %zu (%s) has been uploaded.",
+ u->entries_sent, u->current_cursor);
+ }
+
+ return filled;
+}
+
+void close_journal_input(Uploader *u) {
+ assert(u);
+
+ if (u->journal) {
+ log_debug("Closing journal input.");
+
+ sd_journal_close(u->journal);
+ u->journal = NULL;
+ }
+ u->timeout = 0;
+}
+
+static int process_journal_input(Uploader *u, int skip) {
+ int r;
+
+ if (u->uploading)
+ return 0;
+
+ r = sd_journal_next_skip(u->journal, skip);
+ if (r < 0)
+ return log_error_errno(r, "Failed to skip to next entry: %m");
+ else if (r < skip)
+ return 0;
+
+ /* have data */
+ u->entry_state = ENTRY_CURSOR;
+ return start_upload(u, journal_input_callback, u);
+}
+
+int check_journal_input(Uploader *u) {
+ if (u->input_event) {
+ int r;
+
+ r = sd_journal_process(u->journal);
+ if (r < 0) {
+ log_error_errno(r, "Failed to process journal: %m");
+ close_journal_input(u);
+ return r;
+ }
+
+ if (r == SD_JOURNAL_NOP)
+ return 0;
+ }
+
+ return process_journal_input(u, 1);
+}
+
+static int dispatch_journal_input(sd_event_source *event,
+ int fd,
+ uint32_t revents,
+ void *userp) {
+ Uploader *u = userp;
+
+ assert(u);
+
+ if (u->uploading)
+ return 0;
+
+ log_debug("Detected journal input, checking for new data.");
+ return check_journal_input(u);
+}
+
+int open_journal_for_upload(Uploader *u,
+ sd_journal *j,
+ const char *cursor,
+ bool after_cursor,
+ bool follow) {
+ int fd, r, events;
+
+ u->journal = j;
+
+ sd_journal_set_data_threshold(j, 0);
+
+ if (follow) {
+ fd = sd_journal_get_fd(j);
+ if (fd < 0)
+ return log_error_errno(fd, "sd_journal_get_fd failed: %m");
+
+ events = sd_journal_get_events(j);
+
+ r = sd_journal_reliable_fd(j);
+ assert(r >= 0);
+ if (r > 0)
+ u->timeout = -1;
+ else
+ u->timeout = JOURNAL_UPLOAD_POLL_TIMEOUT;
+
+ r = sd_event_add_io(u->events, &u->input_event,
+ fd, events, dispatch_journal_input, u);
+ if (r < 0)
+ return log_error_errno(r, "Failed to register input event: %m");
+
+ log_debug("Listening for journal events on fd:%d, timeout %d",
+ fd, u->timeout == (uint64_t) -1 ? -1 : (int) u->timeout);
+ } else
+ log_debug("Not listening for journal events.");
+
+ if (cursor) {
+ r = sd_journal_seek_cursor(j, cursor);
+ if (r < 0)
+ return log_error_errno(r, "Failed to seek to cursor %s: %m",
+ cursor);
+ }
+
+ return process_journal_input(u, 1 + !!after_cursor);
+}
diff --git a/src/journal-remote/journal-upload.c b/src/journal-remote/journal-upload.c
new file mode 100644
index 0000000..ef3556f
--- /dev/null
+++ b/src/journal-remote/journal-upload.c
@@ -0,0 +1,854 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+
+#include <curl/curl.h>
+#include <fcntl.h>
+#include <getopt.h>
+#include <stdio.h>
+#include <sys/stat.h>
+
+#include "sd-daemon.h"
+
+#include "alloc-util.h"
+#include "build.h"
+#include "conf-parser.h"
+#include "daemon-util.h"
+#include "def.h"
+#include "env-file.h"
+#include "fd-util.h"
+#include "fileio.h"
+#include "format-util.h"
+#include "glob-util.h"
+#include "journal-upload.h"
+#include "log.h"
+#include "main-func.h"
+#include "mkdir.h"
+#include "parse-util.h"
+#include "pretty-print.h"
+#include "process-util.h"
+#include "rlimit-util.h"
+#include "sigbus.h"
+#include "signal-util.h"
+#include "string-util.h"
+#include "strv.h"
+#include "tmpfile-util.h"
+#include "util.h"
+
+#define PRIV_KEY_FILE CERTIFICATE_ROOT "/private/journal-upload.pem"
+#define CERT_FILE CERTIFICATE_ROOT "/certs/journal-upload.pem"
+#define TRUST_FILE CERTIFICATE_ROOT "/ca/trusted.pem"
+#define DEFAULT_PORT 19532
+
+static const char* arg_url = NULL;
+static const char *arg_key = NULL;
+static const char *arg_cert = NULL;
+static const char *arg_trust = NULL;
+static const char *arg_directory = NULL;
+static char **arg_file = NULL;
+static const char *arg_cursor = NULL;
+static bool arg_after_cursor = false;
+static int arg_journal_type = 0;
+static const char *arg_machine = NULL;
+static bool arg_merge = false;
+static int arg_follow = -1;
+static const char *arg_save_state = NULL;
+
+static void close_fd_input(Uploader *u);
+
+#define SERVER_ANSWER_KEEP 2048
+
+#define STATE_FILE "/var/lib/systemd/journal-upload/state"
+
+#define easy_setopt(curl, opt, value, level, cmd) \
+ do { \
+ code = curl_easy_setopt(curl, opt, value); \
+ if (code) { \
+ log_full(level, \
+ "curl_easy_setopt " #opt " failed: %s", \
+ curl_easy_strerror(code)); \
+ cmd; \
+ } \
+ } while (0)
+
+static size_t output_callback(char *buf,
+ size_t size,
+ size_t nmemb,
+ void *userp) {
+ Uploader *u = userp;
+
+ assert(u);
+
+ log_debug("The server answers (%zu bytes): %.*s",
+ size*nmemb, (int)(size*nmemb), buf);
+
+ if (nmemb && !u->answer) {
+ u->answer = strndup(buf, size*nmemb);
+ if (!u->answer)
+ log_warning("Failed to store server answer (%zu bytes): out of memory", size*nmemb);
+ }
+
+ return size * nmemb;
+}
+
+static int check_cursor_updating(Uploader *u) {
+ _cleanup_free_ char *temp_path = NULL;
+ _cleanup_fclose_ FILE *f = NULL;
+ int r;
+
+ if (!u->state_file)
+ return 0;
+
+ r = mkdir_parents(u->state_file, 0755);
+ if (r < 0)
+ return log_error_errno(r, "Cannot create parent directory of state file %s: %m",
+ u->state_file);
+
+ r = fopen_temporary(u->state_file, &f, &temp_path);
+ if (r < 0)
+ return log_error_errno(r, "Cannot save state to %s: %m",
+ u->state_file);
+ unlink(temp_path);
+
+ return 0;
+}
+
+static int update_cursor_state(Uploader *u) {
+ _cleanup_free_ char *temp_path = NULL;
+ _cleanup_fclose_ FILE *f = NULL;
+ int r;
+
+ if (!u->state_file || !u->last_cursor)
+ return 0;
+
+ r = fopen_temporary(u->state_file, &f, &temp_path);
+ if (r < 0)
+ goto fail;
+
+ fprintf(f,
+ "# This is private data. Do not parse.\n"
+ "LAST_CURSOR=%s\n",
+ u->last_cursor);
+
+ r = fflush_and_check(f);
+ if (r < 0)
+ goto fail;
+
+ if (rename(temp_path, u->state_file) < 0) {
+ r = -errno;
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ if (temp_path)
+ (void) unlink(temp_path);
+
+ (void) unlink(u->state_file);
+
+ return log_error_errno(r, "Failed to save state %s: %m", u->state_file);
+}
+
+static int load_cursor_state(Uploader *u) {
+ int r;
+
+ if (!u->state_file)
+ return 0;
+
+ r = parse_env_file(NULL, u->state_file, "LAST_CURSOR", &u->last_cursor);
+ if (r == -ENOENT)
+ log_debug("State file %s is not present.", u->state_file);
+ else if (r < 0)
+ return log_error_errno(r, "Failed to read state file %s: %m",
+ u->state_file);
+ else
+ log_debug("Last cursor was %s", u->last_cursor);
+
+ return 0;
+}
+
+int start_upload(Uploader *u,
+ size_t (*input_callback)(void *ptr,
+ size_t size,
+ size_t nmemb,
+ void *userdata),
+ void *data) {
+ CURLcode code;
+
+ assert(u);
+ assert(input_callback);
+
+ if (!u->header) {
+ struct curl_slist *h;
+
+ h = curl_slist_append(NULL, "Content-Type: application/vnd.fdo.journal");
+ if (!h)
+ return log_oom();
+
+ h = curl_slist_append(h, "Transfer-Encoding: chunked");
+ if (!h) {
+ curl_slist_free_all(h);
+ return log_oom();
+ }
+
+ h = curl_slist_append(h, "Accept: text/plain");
+ if (!h) {
+ curl_slist_free_all(h);
+ return log_oom();
+ }
+
+ u->header = h;
+ }
+
+ if (!u->easy) {
+ CURL *curl;
+
+ curl = curl_easy_init();
+ if (!curl)
+ return log_error_errno(SYNTHETIC_ERRNO(ENOSR),
+ "Call to curl_easy_init failed.");
+
+ /* tell it to POST to the URL */
+ easy_setopt(curl, CURLOPT_POST, 1L,
+ LOG_ERR, return -EXFULL);
+
+ easy_setopt(curl, CURLOPT_ERRORBUFFER, u->error,
+ LOG_ERR, return -EXFULL);
+
+ /* set where to write to */
+ easy_setopt(curl, CURLOPT_WRITEFUNCTION, output_callback,
+ LOG_ERR, return -EXFULL);
+
+ easy_setopt(curl, CURLOPT_WRITEDATA, data,
+ LOG_ERR, return -EXFULL);
+
+ /* set where to read from */
+ easy_setopt(curl, CURLOPT_READFUNCTION, input_callback,
+ LOG_ERR, return -EXFULL);
+
+ easy_setopt(curl, CURLOPT_READDATA, data,
+ LOG_ERR, return -EXFULL);
+
+ /* use our special own mime type and chunked transfer */
+ easy_setopt(curl, CURLOPT_HTTPHEADER, u->header,
+ LOG_ERR, return -EXFULL);
+
+ if (DEBUG_LOGGING)
+ /* enable verbose for easier tracing */
+ easy_setopt(curl, CURLOPT_VERBOSE, 1L, LOG_WARNING, );
+
+ easy_setopt(curl, CURLOPT_USERAGENT,
+ "systemd-journal-upload " GIT_VERSION,
+ LOG_WARNING, );
+
+ if (arg_key || startswith(u->url, "https://")) {
+ easy_setopt(curl, CURLOPT_SSLKEY, arg_key ?: PRIV_KEY_FILE,
+ LOG_ERR, return -EXFULL);
+ easy_setopt(curl, CURLOPT_SSLCERT, arg_cert ?: CERT_FILE,
+ LOG_ERR, return -EXFULL);
+ }
+
+ if (streq_ptr(arg_trust, "all"))
+ easy_setopt(curl, CURLOPT_SSL_VERIFYPEER, 0,
+ LOG_ERR, return -EUCLEAN);
+ else if (arg_trust || startswith(u->url, "https://"))
+ easy_setopt(curl, CURLOPT_CAINFO, arg_trust ?: TRUST_FILE,
+ LOG_ERR, return -EXFULL);
+
+ if (arg_key || arg_trust)
+ easy_setopt(curl, CURLOPT_SSLVERSION, CURL_SSLVERSION_TLSv1,
+ LOG_WARNING, );
+
+ u->easy = curl;
+ } else {
+ /* truncate the potential old error message */
+ u->error[0] = '\0';
+
+ free(u->answer);
+ u->answer = 0;
+ }
+
+ /* upload to this place */
+ code = curl_easy_setopt(u->easy, CURLOPT_URL, u->url);
+ if (code)
+ return log_error_errno(SYNTHETIC_ERRNO(EXFULL),
+ "curl_easy_setopt CURLOPT_URL failed: %s",
+ curl_easy_strerror(code));
+
+ u->uploading = true;
+
+ return 0;
+}
+
+static size_t fd_input_callback(void *buf, size_t size, size_t nmemb, void *userp) {
+ Uploader *u = userp;
+ ssize_t n;
+
+ assert(u);
+ assert(nmemb < SSIZE_MAX / size);
+
+ if (u->input < 0)
+ return 0;
+
+ assert(!size_multiply_overflow(size, nmemb));
+
+ n = read(u->input, buf, size * nmemb);
+ log_debug("%s: allowed %zu, read %zd", __func__, size*nmemb, n);
+ if (n > 0)
+ return n;
+
+ u->uploading = false;
+ if (n < 0) {
+ log_error_errno(errno, "Aborting transfer after read error on input: %m.");
+ return CURL_READFUNC_ABORT;
+ }
+
+ log_debug("Reached EOF");
+ close_fd_input(u);
+ return 0;
+}
+
+static void close_fd_input(Uploader *u) {
+ assert(u);
+
+ u->input = safe_close(u->input);
+ u->timeout = 0;
+}
+
+static int dispatch_fd_input(sd_event_source *event,
+ int fd,
+ uint32_t revents,
+ void *userp) {
+ Uploader *u = userp;
+
+ assert(u);
+ assert(fd >= 0);
+
+ if (revents & EPOLLHUP) {
+ log_debug("Received HUP");
+ close_fd_input(u);
+ return 0;
+ }
+
+ if (!(revents & EPOLLIN)) {
+ log_warning("Unexpected poll event %"PRIu32".", revents);
+ return -EINVAL;
+ }
+
+ if (u->uploading) {
+ log_warning("dispatch_fd_input called when uploading, ignoring.");
+ return 0;
+ }
+
+ return start_upload(u, fd_input_callback, u);
+}
+
+static int open_file_for_upload(Uploader *u, const char *filename) {
+ int fd, r = 0;
+
+ if (streq(filename, "-"))
+ fd = STDIN_FILENO;
+ else {
+ fd = open(filename, O_RDONLY|O_CLOEXEC|O_NOCTTY);
+ if (fd < 0)
+ return log_error_errno(errno, "Failed to open %s: %m", filename);
+ }
+
+ u->input = fd;
+
+ if (arg_follow) {
+ r = sd_event_add_io(u->events, &u->input_event,
+ fd, EPOLLIN, dispatch_fd_input, u);
+ if (r < 0) {
+ if (r != -EPERM || arg_follow > 0)
+ return log_error_errno(r, "Failed to register input event: %m");
+
+ /* Normal files should just be consumed without polling. */
+ r = start_upload(u, fd_input_callback, u);
+ }
+ }
+
+ return r;
+}
+
+static int dispatch_sigterm(sd_event_source *event,
+ const struct signalfd_siginfo *si,
+ void *userdata) {
+ Uploader *u = userdata;
+
+ assert(u);
+
+ log_received_signal(LOG_INFO, si);
+
+ close_fd_input(u);
+ close_journal_input(u);
+
+ sd_event_exit(u->events, 0);
+ return 0;
+}
+
+static int setup_signals(Uploader *u) {
+ int r;
+
+ assert(u);
+
+ assert_se(sigprocmask_many(SIG_SETMASK, NULL, SIGINT, SIGTERM, -1) >= 0);
+
+ r = sd_event_add_signal(u->events, &u->sigterm_event, SIGTERM, dispatch_sigterm, u);
+ if (r < 0)
+ return r;
+
+ r = sd_event_add_signal(u->events, &u->sigint_event, SIGINT, dispatch_sigterm, u);
+ if (r < 0)
+ return r;
+
+ return 0;
+}
+
+static int setup_uploader(Uploader *u, const char *url, const char *state_file) {
+ int r;
+ const char *host, *proto = "";
+
+ assert(u);
+ assert(url);
+
+ *u = (Uploader) {
+ .input = -1
+ };
+
+ host = STARTSWITH_SET(url, "http://", "https://");
+ if (!host) {
+ host = url;
+ proto = "https://";
+ }
+
+ if (strchr(host, ':'))
+ u->url = strjoin(proto, url, "/upload");
+ else {
+ char *t;
+ size_t x;
+
+ t = strdupa(url);
+ x = strlen(t);
+ while (x > 0 && t[x - 1] == '/')
+ t[x - 1] = '\0';
+
+ u->url = strjoin(proto, t, ":" STRINGIFY(DEFAULT_PORT), "/upload");
+ }
+ if (!u->url)
+ return log_oom();
+
+ u->state_file = state_file;
+
+ r = sd_event_default(&u->events);
+ if (r < 0)
+ return log_error_errno(r, "sd_event_default failed: %m");
+
+ r = setup_signals(u);
+ if (r < 0)
+ return log_error_errno(r, "Failed to set up signals: %m");
+
+ (void) sd_watchdog_enabled(false, &u->watchdog_usec);
+
+ return load_cursor_state(u);
+}
+
+static void destroy_uploader(Uploader *u) {
+ assert(u);
+
+ curl_easy_cleanup(u->easy);
+ curl_slist_free_all(u->header);
+ free(u->answer);
+
+ free(u->last_cursor);
+ free(u->current_cursor);
+
+ free(u->url);
+
+ u->input_event = sd_event_source_unref(u->input_event);
+
+ close_fd_input(u);
+ close_journal_input(u);
+
+ sd_event_source_unref(u->sigterm_event);
+ sd_event_source_unref(u->sigint_event);
+ sd_event_unref(u->events);
+}
+
+static int perform_upload(Uploader *u) {
+ CURLcode code;
+ long status;
+
+ assert(u);
+
+ u->watchdog_timestamp = now(CLOCK_MONOTONIC);
+ code = curl_easy_perform(u->easy);
+ if (code) {
+ if (u->error[0])
+ log_error("Upload to %s failed: %.*s",
+ u->url, (int) sizeof(u->error), u->error);
+ else
+ log_error("Upload to %s failed: %s",
+ u->url, curl_easy_strerror(code));
+ return -EIO;
+ }
+
+ code = curl_easy_getinfo(u->easy, CURLINFO_RESPONSE_CODE, &status);
+ if (code)
+ return log_error_errno(SYNTHETIC_ERRNO(EUCLEAN),
+ "Failed to retrieve response code: %s",
+ curl_easy_strerror(code));
+
+ if (status >= 300)
+ return log_error_errno(SYNTHETIC_ERRNO(EIO),
+ "Upload to %s failed with code %ld: %s",
+ u->url, status, strna(u->answer));
+ else if (status < 200)
+ return log_error_errno(SYNTHETIC_ERRNO(EIO),
+ "Upload to %s finished with unexpected code %ld: %s",
+ u->url, status, strna(u->answer));
+ else
+ log_debug("Upload finished successfully with code %ld: %s",
+ status, strna(u->answer));
+
+ free_and_replace(u->last_cursor, u->current_cursor);
+
+ return update_cursor_state(u);
+}
+
+static int parse_config(void) {
+ const ConfigTableItem items[] = {
+ { "Upload", "URL", config_parse_string, 0, &arg_url },
+ { "Upload", "ServerKeyFile", config_parse_path, 0, &arg_key },
+ { "Upload", "ServerCertificateFile", config_parse_path, 0, &arg_cert },
+ { "Upload", "TrustedCertificateFile", config_parse_path, 0, &arg_trust },
+ {}};
+
+ return config_parse_many_nulstr(PKGSYSCONFDIR "/journal-upload.conf",
+ CONF_PATHS_NULSTR("systemd/journal-upload.conf.d"),
+ "Upload\0", config_item_table_lookup, items,
+ CONFIG_PARSE_WARN, NULL);
+}
+
+static int help(void) {
+ _cleanup_free_ char *link = NULL;
+ int r;
+
+ r = terminal_urlify_man("systemd-journal-upload.service", "8", &link);
+ if (r < 0)
+ return log_oom();
+
+ printf("%s -u URL {FILE|-}...\n\n"
+ "Upload journal events to a remote server.\n\n"
+ " -h --help Show this help\n"
+ " --version Show package version\n"
+ " -u --url=URL Upload to this address (default port "
+ STRINGIFY(DEFAULT_PORT) ")\n"
+ " --key=FILENAME Specify key in PEM format (default:\n"
+ " \"" PRIV_KEY_FILE "\")\n"
+ " --cert=FILENAME Specify certificate in PEM format (default:\n"
+ " \"" CERT_FILE "\")\n"
+ " --trust=FILENAME|all Specify CA certificate or disable checking (default:\n"
+ " \"" TRUST_FILE "\")\n"
+ " --system Use the system journal\n"
+ " --user Use the user journal for the current user\n"
+ " -m --merge Use all available journals\n"
+ " -M --machine=CONTAINER Operate on local container\n"
+ " -D --directory=PATH Use journal files from directory\n"
+ " --file=PATH Use this journal file\n"
+ " --cursor=CURSOR Start at the specified cursor\n"
+ " --after-cursor=CURSOR Start after the specified cursor\n"
+ " --follow[=BOOL] Do [not] wait for input\n"
+ " --save-state[=FILE] Save uploaded cursors (default \n"
+ " " STATE_FILE ")\n"
+ "\nSee the %s for details.\n"
+ , program_invocation_short_name
+ , link
+ );
+
+ return 0;
+}
+
+static int parse_argv(int argc, char *argv[]) {
+ enum {
+ ARG_VERSION = 0x100,
+ ARG_KEY,
+ ARG_CERT,
+ ARG_TRUST,
+ ARG_USER,
+ ARG_SYSTEM,
+ ARG_FILE,
+ ARG_CURSOR,
+ ARG_AFTER_CURSOR,
+ ARG_FOLLOW,
+ ARG_SAVE_STATE,
+ };
+
+ static const struct option options[] = {
+ { "help", no_argument, NULL, 'h' },
+ { "version", no_argument, NULL, ARG_VERSION },
+ { "url", required_argument, NULL, 'u' },
+ { "key", required_argument, NULL, ARG_KEY },
+ { "cert", required_argument, NULL, ARG_CERT },
+ { "trust", required_argument, NULL, ARG_TRUST },
+ { "system", no_argument, NULL, ARG_SYSTEM },
+ { "user", no_argument, NULL, ARG_USER },
+ { "merge", no_argument, NULL, 'm' },
+ { "machine", required_argument, NULL, 'M' },
+ { "directory", required_argument, NULL, 'D' },
+ { "file", required_argument, NULL, ARG_FILE },
+ { "cursor", required_argument, NULL, ARG_CURSOR },
+ { "after-cursor", required_argument, NULL, ARG_AFTER_CURSOR },
+ { "follow", optional_argument, NULL, ARG_FOLLOW },
+ { "save-state", optional_argument, NULL, ARG_SAVE_STATE },
+ {}
+ };
+
+ int c, r;
+
+ assert(argc >= 0);
+ assert(argv);
+
+ opterr = 0;
+
+ while ((c = getopt_long(argc, argv, "hu:mM:D:", options, NULL)) >= 0)
+ switch(c) {
+ case 'h':
+ return help();
+
+ case ARG_VERSION:
+ return version();
+
+ case 'u':
+ if (arg_url)
+ return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
+ "cannot use more than one --url");
+
+ arg_url = optarg;
+ break;
+
+ case ARG_KEY:
+ if (arg_key)
+ return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
+ "cannot use more than one --key");
+
+ arg_key = optarg;
+ break;
+
+ case ARG_CERT:
+ if (arg_cert)
+ return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
+ "cannot use more than one --cert");
+
+ arg_cert = optarg;
+ break;
+
+ case ARG_TRUST:
+ if (arg_trust)
+ return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
+ "cannot use more than one --trust");
+
+ arg_trust = optarg;
+ break;
+
+ case ARG_SYSTEM:
+ arg_journal_type |= SD_JOURNAL_SYSTEM;
+ break;
+
+ case ARG_USER:
+ arg_journal_type |= SD_JOURNAL_CURRENT_USER;
+ break;
+
+ case 'm':
+ arg_merge = true;
+ break;
+
+ case 'M':
+ if (arg_machine)
+ return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
+ "cannot use more than one --machine/-M");
+
+ arg_machine = optarg;
+ break;
+
+ case 'D':
+ if (arg_directory)
+ return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
+ "cannot use more than one --directory/-D");
+
+ arg_directory = optarg;
+ break;
+
+ case ARG_FILE:
+ r = glob_extend(&arg_file, optarg);
+ if (r < 0)
+ return log_error_errno(r, "Failed to add paths: %m");
+ break;
+
+ case ARG_CURSOR:
+ if (arg_cursor)
+ return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
+ "cannot use more than one --cursor/--after-cursor");
+
+ arg_cursor = optarg;
+ break;
+
+ case ARG_AFTER_CURSOR:
+ if (arg_cursor)
+ return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
+ "cannot use more than one --cursor/--after-cursor");
+
+ arg_cursor = optarg;
+ arg_after_cursor = true;
+ break;
+
+ case ARG_FOLLOW:
+ if (optarg) {
+ r = parse_boolean(optarg);
+ if (r < 0)
+ return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
+ "Failed to parse --follow= parameter.");
+
+ arg_follow = !!r;
+ } else
+ arg_follow = true;
+
+ break;
+
+ case ARG_SAVE_STATE:
+ arg_save_state = optarg ?: STATE_FILE;
+ break;
+
+ case '?':
+ return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
+ "Unknown option %s.",
+ argv[optind - 1]);
+
+ case ':':
+ return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
+ "Missing argument to %s.",
+ argv[optind - 1]);
+
+ default:
+ assert_not_reached("Unhandled option code.");
+ }
+
+ if (!arg_url)
+ return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
+ "Required --url/-u option missing.");
+
+ if (!!arg_key != !!arg_cert)
+ return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
+ "Options --key and --cert must be used together.");
+
+ if (optind < argc && (arg_directory || arg_file || arg_machine || arg_journal_type))
+ return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
+ "Input arguments make no sense with journal input.");
+
+ return 1;
+}
+
+static int open_journal(sd_journal **j) {
+ int r;
+
+ if (arg_directory)
+ r = sd_journal_open_directory(j, arg_directory, arg_journal_type);
+ else if (arg_file)
+ r = sd_journal_open_files(j, (const char**) arg_file, 0);
+ else if (arg_machine)
+ r = sd_journal_open_container(j, arg_machine, 0);
+ else
+ r = sd_journal_open(j, !arg_merge*SD_JOURNAL_LOCAL_ONLY + arg_journal_type);
+ if (r < 0)
+ log_error_errno(r, "Failed to open %s: %m",
+ arg_directory ? arg_directory : arg_file ? "files" : "journal");
+ return r;
+}
+
+static int run(int argc, char **argv) {
+ _cleanup_(notify_on_cleanup) const char *notify_message = NULL;
+ _cleanup_(destroy_uploader) Uploader u = {};
+ bool use_journal;
+ int r;
+
+ log_show_color(true);
+ log_parse_environment();
+
+ /* The journal merging logic potentially needs a lot of fds. */
+ (void) rlimit_nofile_bump(HIGH_RLIMIT_NOFILE);
+
+ r = parse_config();
+ if (r < 0)
+ return r;
+
+ r = parse_argv(argc, argv);
+ if (r <= 0)
+ return r;
+
+ sigbus_install();
+
+ r = setup_uploader(&u, arg_url, arg_save_state);
+ if (r < 0)
+ return r;
+
+ sd_event_set_watchdog(u.events, true);
+
+ r = check_cursor_updating(&u);
+ if (r < 0)
+ return r;
+
+ log_debug("%s running as pid "PID_FMT,
+ program_invocation_short_name, getpid_cached());
+
+ use_journal = optind >= argc;
+ if (use_journal) {
+ sd_journal *j;
+ r = open_journal(&j);
+ if (r < 0)
+ return r;
+ r = open_journal_for_upload(&u, j,
+ arg_cursor ?: u.last_cursor,
+ arg_cursor ? arg_after_cursor : true,
+ !!arg_follow);
+ if (r < 0)
+ return r;
+ }
+
+ notify_message = notify_start("READY=1\n"
+ "STATUS=Processing input...",
+ NOTIFY_STOPPING);
+
+ for (;;) {
+ r = sd_event_get_state(u.events);
+ if (r < 0)
+ return r;
+ if (r == SD_EVENT_FINISHED)
+ return 0;
+
+ if (use_journal) {
+ if (!u.journal)
+ return 0;
+
+ r = check_journal_input(&u);
+ } else if (u.input < 0 && !use_journal) {
+ if (optind >= argc)
+ return 0;
+
+ log_debug("Using %s as input.", argv[optind]);
+ r = open_file_for_upload(&u, argv[optind++]);
+ }
+ if (r < 0)
+ return r;
+
+ if (u.uploading) {
+ r = perform_upload(&u);
+ if (r < 0)
+ return r;
+ }
+
+ r = sd_event_run(u.events, u.timeout);
+ if (r < 0)
+ return log_error_errno(r, "Failed to run event loop: %m");
+ }
+}
+
+DEFINE_MAIN_FUNCTION(run);
diff --git a/src/journal-remote/journal-upload.conf.in b/src/journal-remote/journal-upload.conf.in
new file mode 100644
index 0000000..5f59a6f
--- /dev/null
+++ b/src/journal-remote/journal-upload.conf.in
@@ -0,0 +1,18 @@
+# This file is part of systemd.
+#
+# systemd is free software; you can redistribute it and/or modify it
+# under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation; either version 2.1 of the License, or
+# (at your option) any later version.
+#
+# Entries in this file show the compile time defaults.
+# You can change settings by editing this file.
+# Defaults can be restored by simply deleting this file.
+#
+# See journal-upload.conf(5) for details
+
+[Upload]
+# URL=
+# ServerKeyFile=@CERTIFICATEROOT@/private/journal-upload.pem
+# ServerCertificateFile=@CERTIFICATEROOT@/certs/journal-upload.pem
+# TrustedCertificateFile=@CERTIFICATEROOT@/ca/trusted.pem
diff --git a/src/journal-remote/journal-upload.h b/src/journal-remote/journal-upload.h
new file mode 100644
index 0000000..5711905
--- /dev/null
+++ b/src/journal-remote/journal-upload.h
@@ -0,0 +1,71 @@
+#pragma once
+
+#include <inttypes.h>
+
+#include "sd-event.h"
+#include "sd-journal.h"
+#include "time-util.h"
+
+typedef enum {
+ ENTRY_CURSOR = 0, /* Nothing actually written yet. */
+ ENTRY_REALTIME,
+ ENTRY_MONOTONIC,
+ ENTRY_BOOT_ID,
+ ENTRY_NEW_FIELD, /* In between fields. */
+ ENTRY_TEXT_FIELD, /* In the middle of a text field. */
+ ENTRY_BINARY_FIELD_START, /* Writing the name of a binary field. */
+ ENTRY_BINARY_FIELD_SIZE, /* Writing the size of a binary field. */
+ ENTRY_BINARY_FIELD, /* In the middle of a binary field. */
+ ENTRY_OUTRO, /* Writing '\n' */
+ ENTRY_DONE, /* Need to move to a new field. */
+} entry_state;
+
+typedef struct Uploader {
+ sd_event *events;
+ sd_event_source *sigint_event, *sigterm_event;
+
+ char *url;
+ CURL *easy;
+ bool uploading;
+ char error[CURL_ERROR_SIZE];
+ struct curl_slist *header;
+ char *answer;
+
+ sd_event_source *input_event;
+ uint64_t timeout;
+
+ /* fd stuff */
+ int input;
+
+ /* journal stuff */
+ sd_journal* journal;
+
+ entry_state entry_state;
+ const void *field_data;
+ size_t field_pos, field_length;
+
+ /* general metrics */
+ const char *state_file;
+
+ size_t entries_sent;
+ char *last_cursor, *current_cursor;
+ usec_t watchdog_timestamp;
+ usec_t watchdog_usec;
+} Uploader;
+
+#define JOURNAL_UPLOAD_POLL_TIMEOUT (10 * USEC_PER_SEC)
+
+int start_upload(Uploader *u,
+ size_t (*input_callback)(void *ptr,
+ size_t size,
+ size_t nmemb,
+ void *userdata),
+ void *data);
+
+int open_journal_for_upload(Uploader *u,
+ sd_journal *j,
+ const char *cursor,
+ bool after_cursor,
+ bool follow);
+void close_journal_input(Uploader *u);
+int check_journal_input(Uploader *u);
diff --git a/src/journal-remote/log-generator.py b/src/journal-remote/log-generator.py
new file mode 100755
index 0000000..e1725b1
--- /dev/null
+++ b/src/journal-remote/log-generator.py
@@ -0,0 +1,78 @@
+#!/usr/bin/env python3
+import sys
+import argparse
+
+PARSER = argparse.ArgumentParser()
+PARSER.add_argument('n', type=int)
+PARSER.add_argument('--dots', action='store_true')
+PARSER.add_argument('-m', '--message-size', type=int, default=200)
+PARSER.add_argument('-d', '--data-size', type=int, default=4000)
+PARSER.add_argument('--data-type', choices={'random', 'simple'})
+OPTIONS = PARSER.parse_args()
+
+template = """\
+__CURSOR=s=6863c726210b4560b7048889d8ada5c5;i=3e931;b=f446871715504074bf7049ef0718fa93;m={m:x};t=4fd05c
+__REALTIME_TIMESTAMP={realtime_ts}
+__MONOTONIC_TIMESTAMP={monotonic_ts}
+_BOOT_ID=f446871715504074bf7049ef0718fa93
+_TRANSPORT=syslog
+PRIORITY={priority}
+SYSLOG_FACILITY={facility}
+SYSLOG_IDENTIFIER=/USR/SBIN/CRON
+MESSAGE={message}
+_UID=0
+_GID=0
+_MACHINE_ID=69121ca41d12c1b69a7960174c27b618
+_HOSTNAME=hostname
+SYSLOG_PID=25721
+_PID=25721
+_SOURCE_REALTIME_TIMESTAMP={source_realtime_ts}
+DATA={data}
+"""
+
+m = 0x198603b12d7
+realtime_ts = 1404101101501873
+monotonic_ts = 1753961140951
+source_realtime_ts = 1404101101483516
+priority = 3
+facility = 6
+
+src = open('/dev/urandom', 'rb')
+
+bytes = 0
+counter = 0
+
+for i in range(OPTIONS.n):
+ message = src.read(OPTIONS.message_size)
+ message = repr(message)[2:-1]
+
+ if OPTIONS.data_type == 'random':
+ data = repr(src.read(OPTIONS.data_size))
+ else:
+ # keep the pattern non-repeating so we get a different blob every time
+ data = '{:0{}}'.format(counter, OPTIONS.data_size)
+ counter += 1
+
+ entry = template.format(m=m,
+ realtime_ts=realtime_ts,
+ monotonic_ts=monotonic_ts,
+ source_realtime_ts=source_realtime_ts,
+ priority=priority,
+ facility=facility,
+ message=message,
+ data=data)
+ m += 1
+ realtime_ts += 1
+ monotonic_ts += 1
+ source_realtime_ts += 1
+
+ bytes += len(entry)
+
+ print(entry)
+
+ if OPTIONS.dots:
+ print('.', file=sys.stderr, end='', flush=True)
+
+if OPTIONS.dots:
+ print(file=sys.stderr)
+print('Wrote {} bytes'.format(bytes), file=sys.stderr)
diff --git a/src/journal-remote/meson.build b/src/journal-remote/meson.build
new file mode 100644
index 0000000..2887daa
--- /dev/null
+++ b/src/journal-remote/meson.build
@@ -0,0 +1,71 @@
+# SPDX-License-Identifier: LGPL-2.1+
+
+systemd_journal_upload_sources = files('''
+ journal-upload.h
+ journal-upload.c
+ journal-upload-journal.c
+'''.split())
+
+libsystemd_journal_remote_sources = files('''
+ journal-remote-parse.h
+ journal-remote-parse.c
+ journal-remote-write.h
+ journal-remote-write.c
+ journal-remote.h
+ journal-remote.c
+'''.split())
+
+if conf.get('HAVE_MICROHTTPD') == 1
+ libsystemd_journal_remote_sources += files('''
+ microhttpd-util.h
+ microhttpd-util.c
+'''.split())
+endif
+
+libsystemd_journal_remote = static_library(
+ 'systemd-journal-remote',
+ libsystemd_journal_remote_sources,
+ include_directories : includes,
+ dependencies : [threads,
+ libmicrohttpd,
+ libgnutls,
+ libxz,
+ liblz4],
+ install : false)
+
+systemd_journal_remote_sources = files('''
+ journal-remote-main.c
+'''.split())
+
+systemd_journal_gatewayd_sources = files('''
+ journal-gatewayd.c
+ microhttpd-util.h
+ microhttpd-util.c
+'''.split())
+
+if conf.get('ENABLE_REMOTE') ==1 and conf.get('HAVE_LIBCURL') == 1
+ journal_upload_conf = configure_file(
+ input : 'journal-upload.conf.in',
+ output : 'journal-upload.conf',
+ configuration : substs)
+ install_data(journal_upload_conf,
+ install_dir : pkgsysconfdir)
+endif
+
+if conf.get('ENABLE_REMOTE') == 1 and conf.get('HAVE_MICROHTTPD') == 1
+ journal_remote_conf = configure_file(
+ input : 'journal-remote.conf.in',
+ output : 'journal-remote.conf',
+ configuration : substs)
+ install_data(journal_remote_conf,
+ install_dir : pkgsysconfdir)
+
+ install_data('browse.html',
+ install_dir : join_paths(pkgdatadir, 'gatewayd'))
+
+ meson.add_install_script('sh', '-c',
+ mkdir_p.format('/var/log/journal/remote'))
+ meson.add_install_script('sh', '-c',
+ '''chown 0:0 $DESTDIR/var/log/journal/remote &&
+ chmod 755 $DESTDIR/var/log/journal/remote || :''')
+endif
diff --git a/src/journal-remote/microhttpd-util.c b/src/journal-remote/microhttpd-util.c
new file mode 100644
index 0000000..5f56919
--- /dev/null
+++ b/src/journal-remote/microhttpd-util.c
@@ -0,0 +1,312 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+
+#include <stddef.h>
+#include <stdio.h>
+#include <string.h>
+
+#if HAVE_GNUTLS
+#include <gnutls/gnutls.h>
+#include <gnutls/x509.h>
+#endif
+
+#include "alloc-util.h"
+#include "log.h"
+#include "macro.h"
+#include "microhttpd-util.h"
+#include "string-util.h"
+#include "strv.h"
+#include "util.h"
+
+void microhttpd_logger(void *arg, const char *fmt, va_list ap) {
+ char *f;
+
+ f = strjoina("microhttpd: ", fmt);
+
+ DISABLE_WARNING_FORMAT_NONLITERAL;
+ log_internalv(LOG_INFO, 0, NULL, 0, NULL, f, ap);
+ REENABLE_WARNING;
+}
+
+static int mhd_respond_internal(struct MHD_Connection *connection,
+ enum MHD_RequestTerminationCode code,
+ const char *buffer,
+ size_t size,
+ enum MHD_ResponseMemoryMode mode) {
+ assert(connection);
+
+ _cleanup_(MHD_destroy_responsep) struct MHD_Response *response
+ = MHD_create_response_from_buffer(size, (char*) buffer, mode);
+ if (!response)
+ return MHD_NO;
+
+ log_debug("Queueing response %u: %s", code, buffer);
+ MHD_add_response_header(response, "Content-Type", "text/plain");
+ return MHD_queue_response(connection, code, response);
+}
+
+int mhd_respond(struct MHD_Connection *connection,
+ enum MHD_RequestTerminationCode code,
+ const char *message) {
+
+ const char *fmt;
+
+ fmt = strjoina(message, "\n");
+
+ return mhd_respond_internal(connection, code,
+ fmt, strlen(message) + 1,
+ MHD_RESPMEM_PERSISTENT);
+}
+
+int mhd_respond_oom(struct MHD_Connection *connection) {
+ return mhd_respond(connection, MHD_HTTP_SERVICE_UNAVAILABLE, "Out of memory.");
+}
+
+int mhd_respondf(struct MHD_Connection *connection,
+ int error,
+ enum MHD_RequestTerminationCode code,
+ const char *format, ...) {
+
+ const char *fmt;
+ char *m;
+ int r;
+ va_list ap;
+
+ assert(connection);
+ assert(format);
+
+ if (error < 0)
+ error = -error;
+ errno = -error;
+ fmt = strjoina(format, "\n");
+ va_start(ap, format);
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wformat-nonliteral"
+ r = vasprintf(&m, fmt, ap);
+#pragma GCC diagnostic pop
+ va_end(ap);
+
+ if (r < 0)
+ return respond_oom(connection);
+
+ return mhd_respond_internal(connection, code, m, r, MHD_RESPMEM_MUST_FREE);
+}
+
+#if HAVE_GNUTLS
+
+static struct {
+ const char *const names[4];
+ int level;
+ bool enabled;
+} gnutls_log_map[] = {
+ { {"0"}, LOG_DEBUG },
+ { {"1", "audit"}, LOG_WARNING, true}, /* gnutls session audit */
+ { {"2", "assert"}, LOG_DEBUG }, /* gnutls assert log */
+ { {"3", "hsk", "ext"}, LOG_DEBUG }, /* gnutls handshake log */
+ { {"4", "rec"}, LOG_DEBUG }, /* gnutls record log */
+ { {"5", "dtls"}, LOG_DEBUG }, /* gnutls DTLS log */
+ { {"6", "buf"}, LOG_DEBUG },
+ { {"7", "write", "read"}, LOG_DEBUG },
+ { {"8"}, LOG_DEBUG },
+ { {"9", "enc", "int"}, LOG_DEBUG },
+};
+
+static void log_func_gnutls(int level, const char *message) {
+ assert_se(message);
+
+ if (0 <= level && level < (int) ELEMENTSOF(gnutls_log_map)) {
+ if (gnutls_log_map[level].enabled)
+ log_internal(gnutls_log_map[level].level, 0, NULL, 0, NULL, "gnutls %d/%s: %s", level, gnutls_log_map[level].names[1], message);
+ } else {
+ log_debug("Received GNUTLS message with unknown level %d.", level);
+ log_internal(LOG_DEBUG, 0, NULL, 0, NULL, "gnutls: %s", message);
+ }
+}
+
+static void log_reset_gnutls_level(void) {
+ int i;
+
+ for (i = ELEMENTSOF(gnutls_log_map) - 1; i >= 0; i--)
+ if (gnutls_log_map[i].enabled) {
+ log_debug("Setting gnutls log level to %d", i);
+ gnutls_global_set_log_level(i);
+ break;
+ }
+}
+
+static int log_enable_gnutls_category(const char *cat) {
+ unsigned i;
+
+ if (streq(cat, "all")) {
+ for (i = 0; i < ELEMENTSOF(gnutls_log_map); i++)
+ gnutls_log_map[i].enabled = true;
+ log_reset_gnutls_level();
+ return 0;
+ } else
+ for (i = 0; i < ELEMENTSOF(gnutls_log_map); i++)
+ if (strv_contains((char**)gnutls_log_map[i].names, cat)) {
+ gnutls_log_map[i].enabled = true;
+ log_reset_gnutls_level();
+ return 0;
+ }
+ return log_error_errno(SYNTHETIC_ERRNO(EINVAL), "No such log category: %s", cat);
+}
+
+int setup_gnutls_logger(char **categories) {
+ char **cat;
+ int r;
+
+ gnutls_global_set_log_function(log_func_gnutls);
+
+ if (categories) {
+ STRV_FOREACH(cat, categories) {
+ r = log_enable_gnutls_category(*cat);
+ if (r < 0)
+ return r;
+ }
+ } else
+ log_reset_gnutls_level();
+
+ return 0;
+}
+
+static int verify_cert_authorized(gnutls_session_t session) {
+ unsigned status;
+ gnutls_certificate_type_t type;
+ gnutls_datum_t out;
+ int r;
+
+ r = gnutls_certificate_verify_peers2(session, &status);
+ if (r < 0)
+ return log_error_errno(r, "gnutls_certificate_verify_peers2 failed: %m");
+
+ type = gnutls_certificate_type_get(session);
+ r = gnutls_certificate_verification_status_print(status, type, &out, 0);
+ if (r < 0)
+ return log_error_errno(r, "gnutls_certificate_verification_status_print failed: %m");
+
+ log_debug("Certificate status: %s", out.data);
+ gnutls_free(out.data);
+
+ return status == 0 ? 0 : -EPERM;
+}
+
+static int get_client_cert(gnutls_session_t session, gnutls_x509_crt_t *client_cert) {
+ const gnutls_datum_t *pcert;
+ unsigned listsize;
+ gnutls_x509_crt_t cert;
+ int r;
+
+ assert(session);
+ assert(client_cert);
+
+ pcert = gnutls_certificate_get_peers(session, &listsize);
+ if (!pcert || !listsize)
+ return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
+ "Failed to retrieve certificate chain");
+
+ r = gnutls_x509_crt_init(&cert);
+ if (r < 0) {
+ log_error("Failed to initialize client certificate");
+ return r;
+ }
+
+ /* Note that by passing values between 0 and listsize here, you
+ can get access to the CA's certs */
+ r = gnutls_x509_crt_import(cert, &pcert[0], GNUTLS_X509_FMT_DER);
+ if (r < 0) {
+ log_error("Failed to import client certificate");
+ gnutls_x509_crt_deinit(cert);
+ return r;
+ }
+
+ *client_cert = cert;
+ return 0;
+}
+
+static int get_auth_dn(gnutls_x509_crt_t client_cert, char **buf) {
+ size_t len = 0;
+ int r;
+
+ assert(buf);
+ assert(*buf == NULL);
+
+ r = gnutls_x509_crt_get_dn(client_cert, NULL, &len);
+ if (r != GNUTLS_E_SHORT_MEMORY_BUFFER) {
+ log_error("gnutls_x509_crt_get_dn failed");
+ return r;
+ }
+
+ *buf = malloc(len);
+ if (!*buf)
+ return log_oom();
+
+ gnutls_x509_crt_get_dn(client_cert, *buf, &len);
+ return 0;
+}
+
+static void gnutls_x509_crt_deinitp(gnutls_x509_crt_t *p) {
+ gnutls_x509_crt_deinit(*p);
+}
+
+int check_permissions(struct MHD_Connection *connection, int *code, char **hostname) {
+ const union MHD_ConnectionInfo *ci;
+ gnutls_session_t session;
+ _cleanup_(gnutls_x509_crt_deinitp) gnutls_x509_crt_t client_cert = NULL;
+ _cleanup_free_ char *buf = NULL;
+ int r;
+
+ assert(connection);
+ assert(code);
+
+ *code = 0;
+
+ ci = MHD_get_connection_info(connection,
+ MHD_CONNECTION_INFO_GNUTLS_SESSION);
+ if (!ci) {
+ log_error("MHD_get_connection_info failed: session is unencrypted");
+ *code = mhd_respond(connection, MHD_HTTP_FORBIDDEN,
+ "Encrypted connection is required");
+ return -EPERM;
+ }
+ session = ci->tls_session;
+ assert(session);
+
+ r = get_client_cert(session, &client_cert);
+ if (r < 0) {
+ *code = mhd_respond(connection, MHD_HTTP_UNAUTHORIZED,
+ "Authorization through certificate is required");
+ return -EPERM;
+ }
+
+ r = get_auth_dn(client_cert, &buf);
+ if (r < 0) {
+ *code = mhd_respond(connection, MHD_HTTP_UNAUTHORIZED,
+ "Failed to determine distinguished name from certificate");
+ return -EPERM;
+ }
+
+ log_debug("Connection from %s", buf);
+
+ if (hostname)
+ *hostname = TAKE_PTR(buf);
+
+ r = verify_cert_authorized(session);
+ if (r < 0) {
+ log_warning("Client is not authorized");
+ *code = mhd_respond(connection, MHD_HTTP_UNAUTHORIZED,
+ "Client certificate not signed by recognized authority");
+ }
+ return r;
+}
+
+#else
+int check_permissions(struct MHD_Connection *connection, int *code, char **hostname) {
+ return -EPERM;
+}
+
+int setup_gnutls_logger(char **categories) {
+ if (categories)
+ log_notice("Ignoring specified gnutls logging categories — gnutls not available.");
+ return 0;
+}
+#endif
diff --git a/src/journal-remote/microhttpd-util.h b/src/journal-remote/microhttpd-util.h
new file mode 100644
index 0000000..ba51d84
--- /dev/null
+++ b/src/journal-remote/microhttpd-util.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+#pragma once
+
+#include <microhttpd.h>
+#include <stdarg.h>
+
+#include "macro.h"
+
+/* Those defines are added when options are renamed. If the old names
+ * are not '#define'd, then they are not deprecated yet and there are
+ * enum elements with the same name. Hence let's check for the *old* name,
+ * and define the new name by the value of the old name. */
+
+/* Renamed in µhttpd 0.9.51 */
+#ifndef MHD_USE_PIPE_FOR_SHUTDOWN
+# define MHD_USE_ITC MHD_USE_PIPE_FOR_SHUTDOWN
+#endif
+
+/* Renamed in µhttpd 0.9.52 */
+#ifndef MHD_USE_EPOLL_LINUX_ONLY
+# define MHD_USE_EPOLL MHD_USE_EPOLL_LINUX_ONLY
+#endif
+
+/* Renamed in µhttpd 0.9.52 */
+#ifndef MHD_USE_SSL
+# define MHD_USE_TLS MHD_USE_SSL
+#endif
+
+/* Renamed in µhttpd 0.9.53 */
+#ifndef MHD_USE_POLL_INTERNALLY
+# define MHD_USE_POLL_INTERNAL_THREAD MHD_USE_POLL_INTERNALLY
+#endif
+
+/* Both the old and new names are defines, check for the new one. */
+
+/* Compatiblity with libmicrohttpd < 0.9.38 */
+#ifndef MHD_HTTP_NOT_ACCEPTABLE
+# define MHD_HTTP_NOT_ACCEPTABLE MHD_HTTP_METHOD_NOT_ACCEPTABLE
+#endif
+
+/* Renamed in µhttpd 0.9.53 */
+#ifndef MHD_HTTP_PAYLOAD_TOO_LARGE
+# define MHD_HTTP_PAYLOAD_TOO_LARGE MHD_HTTP_REQUEST_ENTITY_TOO_LARGE
+#endif
+
+#if MHD_VERSION < 0x00094203
+# define MHD_create_response_from_fd_at_offset64 MHD_create_response_from_fd_at_offset
+#endif
+
+void microhttpd_logger(void *arg, const char *fmt, va_list ap) _printf_(2, 0);
+
+/* respond_oom() must be usable with return, hence this form. */
+#define respond_oom(connection) log_oom(), mhd_respond_oom(connection)
+
+int mhd_respondf(struct MHD_Connection *connection,
+ int error,
+ unsigned code,
+ const char *format, ...) _printf_(4,5);
+
+int mhd_respond(struct MHD_Connection *connection,
+ unsigned code,
+ const char *message);
+
+int mhd_respond_oom(struct MHD_Connection *connection);
+
+int check_permissions(struct MHD_Connection *connection, int *code, char **hostname);
+
+/* Set gnutls internal logging function to a callback which uses our
+ * own logging framework.
+ *
+ * gnutls categories are additionally filtered by our internal log
+ * level, so it should be set fairly high to capture all potentially
+ * interesting events without overwhelming detail.
+ */
+int setup_gnutls_logger(char **categories);
+
+DEFINE_TRIVIAL_CLEANUP_FUNC(struct MHD_Daemon*, MHD_stop_daemon);
+DEFINE_TRIVIAL_CLEANUP_FUNC(struct MHD_Response*, MHD_destroy_response);
diff --git a/src/journal/audit-type.c b/src/journal/audit-type.c
new file mode 100644
index 0000000..7b3dc1e
--- /dev/null
+++ b/src/journal/audit-type.c
@@ -0,0 +1,6 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+
+#include "audit-type.h"
+#include "missing_audit.h"
+
+#include "audit_type-to-name.h"
diff --git a/src/journal/audit-type.h b/src/journal/audit-type.h
new file mode 100644
index 0000000..98e5c39
--- /dev/null
+++ b/src/journal/audit-type.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+#pragma once
+
+#include <alloca.h>
+#include <stdio.h>
+
+#include "alloc-util.h"
+#include "macro.h"
+
+const char *audit_type_to_string(int type);
+int audit_type_from_string(const char *s);
+
+/* This is inspired by DNS TYPEnnn formatting */
+#define audit_type_name_alloca(type) \
+ ({ \
+ const char *_s_; \
+ _s_ = audit_type_to_string(type); \
+ if (!_s_) { \
+ _s_ = newa(char, STRLEN("AUDIT") + DECIMAL_STR_MAX(int)); \
+ sprintf((char*) _s_, "AUDIT%04i", type); \
+ } \
+ _s_; \
+ })
diff --git a/src/journal/audit_type-to-name.awk b/src/journal/audit_type-to-name.awk
new file mode 100644
index 0000000..44fc702
--- /dev/null
+++ b/src/journal/audit_type-to-name.awk
@@ -0,0 +1,9 @@
+BEGIN{
+ print "const char *audit_type_to_string(int type) {\n\tswitch(type) {"
+}
+{
+ printf " case AUDIT_%s: return \"%s\";\n", $1, $1
+}
+END{
+ print " default: return NULL;\n\t}\n}\n"
+}
diff --git a/src/journal/cat.c b/src/journal/cat.c
new file mode 100644
index 0000000..9900bd2
--- /dev/null
+++ b/src/journal/cat.c
@@ -0,0 +1,168 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+
+#include <errno.h>
+#include <fcntl.h>
+#include <getopt.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include "sd-journal.h"
+
+#include "alloc-util.h"
+#include "fd-util.h"
+#include "main-func.h"
+#include "parse-util.h"
+#include "pretty-print.h"
+#include "string-util.h"
+#include "syslog-util.h"
+#include "util.h"
+
+static const char *arg_identifier = NULL;
+static int arg_priority = LOG_INFO;
+static int arg_stderr_priority = -1;
+static bool arg_level_prefix = true;
+
+static int help(void) {
+ _cleanup_free_ char *link = NULL;
+ int r;
+
+ r = terminal_urlify_man("systemd-cat", "1", &link);
+ if (r < 0)
+ return log_oom();
+
+ printf("%s [OPTIONS...] {COMMAND} ...\n\n"
+ "Execute process with stdout/stderr connected to the journal.\n\n"
+ " -h --help Show this help\n"
+ " --version Show package version\n"
+ " -t --identifier=STRING Set syslog identifier\n"
+ " -p --priority=PRIORITY Set priority value (0..7)\n"
+ " --stderr-priority=PRIORITY Set priority value (0..7) used for stderr\n"
+ " --level-prefix=BOOL Control whether level prefix shall be parsed\n"
+ "\nSee the %s for details.\n"
+ , program_invocation_short_name
+ , link
+ );
+
+ return 0;
+}
+
+static int parse_argv(int argc, char *argv[]) {
+
+ enum {
+ ARG_VERSION = 0x100,
+ ARG_STDERR_PRIORITY,
+ ARG_LEVEL_PREFIX
+ };
+
+ static const struct option options[] = {
+ { "help", no_argument, NULL, 'h' },
+ { "version", no_argument, NULL, ARG_VERSION },
+ { "identifier", required_argument, NULL, 't' },
+ { "priority", required_argument, NULL, 'p' },
+ { "stderr-priority", required_argument, NULL, ARG_STDERR_PRIORITY },
+ { "level-prefix", required_argument, NULL, ARG_LEVEL_PREFIX },
+ {}
+ };
+
+ int c;
+
+ assert(argc >= 0);
+ assert(argv);
+
+ while ((c = getopt_long(argc, argv, "+ht:p:", options, NULL)) >= 0)
+
+ switch (c) {
+
+ case 'h':
+ help();
+ return 0;
+
+ case ARG_VERSION:
+ return version();
+
+ case 't':
+ if (isempty(optarg))
+ arg_identifier = NULL;
+ else
+ arg_identifier = optarg;
+ break;
+
+ case 'p':
+ arg_priority = log_level_from_string(optarg);
+ if (arg_priority < 0)
+ return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
+ "Failed to parse priority value.");
+ break;
+
+ case ARG_STDERR_PRIORITY:
+ arg_stderr_priority = log_level_from_string(optarg);
+ if (arg_stderr_priority < 0)
+ return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
+ "Failed to parse stderr priority value.");
+ break;
+
+ case ARG_LEVEL_PREFIX: {
+ int k;
+
+ k = parse_boolean(optarg);
+ if (k < 0)
+ return log_error_errno(k, "Failed to parse level prefix value.");
+
+ arg_level_prefix = k;
+ break;
+ }
+
+ case '?':
+ return -EINVAL;
+
+ default:
+ assert_not_reached("Unhandled option");
+ }
+
+ return 1;
+}
+
+static int run(int argc, char *argv[]) {
+ _cleanup_close_ int outfd = -1, errfd = -1, saved_stderr = -1;
+ int r;
+
+ log_parse_environment();
+ log_open();
+
+ r = parse_argv(argc, argv);
+ if (r <= 0)
+ return r;
+
+ outfd = sd_journal_stream_fd(arg_identifier, arg_priority, arg_level_prefix);
+ if (outfd < 0)
+ return log_error_errno(outfd, "Failed to create stream fd: %m");
+
+ if (arg_stderr_priority >= 0 && arg_stderr_priority != arg_priority) {
+ errfd = sd_journal_stream_fd(arg_identifier, arg_stderr_priority, arg_level_prefix);
+ if (errfd < 0)
+ return log_error_errno(errfd, "Failed to create stream fd: %m");
+ }
+
+ saved_stderr = fcntl(STDERR_FILENO, F_DUPFD_CLOEXEC, 3);
+
+ r = rearrange_stdio(STDIN_FILENO, outfd, errfd < 0 ? outfd : errfd); /* Invalidates fd on succcess + error! */
+ TAKE_FD(outfd);
+ TAKE_FD(errfd);
+ if (r < 0)
+ return log_error_errno(r, "Failed to rearrange stdout/stderr: %m");
+
+ if (argc <= optind)
+ (void) execl("/bin/cat", "/bin/cat", NULL);
+ else
+ (void) execvp(argv[optind], argv + optind);
+ r = -errno;
+
+ /* Let's try to restore a working stderr, so we can print the error message */
+ if (saved_stderr >= 0)
+ (void) dup3(saved_stderr, STDERR_FILENO, 0);
+
+ return log_error_errno(r, "Failed to execute process: %m");
+}
+
+DEFINE_MAIN_FUNCTION(run);
diff --git a/src/journal/catalog.c b/src/journal/catalog.c
new file mode 100644
index 0000000..4062f12
--- /dev/null
+++ b/src/journal/catalog.c
@@ -0,0 +1,743 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+
+#include <errno.h>
+#include <fcntl.h>
+#include <locale.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <unistd.h>
+
+#include "sd-id128.h"
+
+#include "alloc-util.h"
+#include "catalog.h"
+#include "conf-files.h"
+#include "fd-util.h"
+#include "fileio.h"
+#include "hashmap.h"
+#include "log.h"
+#include "mkdir.h"
+#include "path-util.h"
+#include "siphash24.h"
+#include "sparse-endian.h"
+#include "strbuf.h"
+#include "string-util.h"
+#include "strv.h"
+#include "tmpfile-util.h"
+#include "util.h"
+
+const char * const catalog_file_dirs[] = {
+ "/usr/local/lib/systemd/catalog/",
+ "/usr/lib/systemd/catalog/",
+ NULL
+};
+
+#define CATALOG_SIGNATURE (uint8_t[]) { 'R', 'H', 'H', 'H', 'K', 'S', 'L', 'P' }
+
+typedef struct CatalogHeader {
+ uint8_t signature[8]; /* "RHHHKSLP" */
+ le32_t compatible_flags;
+ le32_t incompatible_flags;
+ le64_t header_size;
+ le64_t n_items;
+ le64_t catalog_item_size;
+} CatalogHeader;
+
+typedef struct CatalogItem {
+ sd_id128_t id;
+ char language[32]; /* One byte is used for termination, so the maximum allowed
+ * length of the string is actually 31 bytes. */
+ le64_t offset;
+} CatalogItem;
+
+static void catalog_hash_func(const CatalogItem *i, struct siphash *state) {
+ siphash24_compress(&i->id, sizeof(i->id), state);
+ siphash24_compress(i->language, strlen(i->language), state);
+}
+
+static int catalog_compare_func(const CatalogItem *a, const CatalogItem *b) {
+ unsigned k;
+ int r;
+
+ for (k = 0; k < ELEMENTSOF(b->id.bytes); k++) {
+ r = CMP(a->id.bytes[k], b->id.bytes[k]);
+ if (r != 0)
+ return r;
+ }
+
+ return strcmp(a->language, b->language);
+}
+
+DEFINE_HASH_OPS(catalog_hash_ops, CatalogItem, catalog_hash_func, catalog_compare_func);
+
+static bool next_header(const char **s) {
+ const char *e;
+
+ e = strchr(*s, '\n');
+
+ /* Unexpected end */
+ if (!e)
+ return false;
+
+ /* End of headers */
+ if (e == *s)
+ return false;
+
+ *s = e + 1;
+ return true;
+}
+
+static const char *skip_header(const char *s) {
+ while (next_header(&s))
+ ;
+ return s;
+}
+
+static char *combine_entries(const char *one, const char *two) {
+ const char *b1, *b2;
+ size_t l1, l2, n;
+ char *dest, *p;
+
+ /* Find split point of headers to body */
+ b1 = skip_header(one);
+ b2 = skip_header(two);
+
+ l1 = strlen(one);
+ l2 = strlen(two);
+ dest = new(char, l1 + l2 + 1);
+ if (!dest) {
+ log_oom();
+ return NULL;
+ }
+
+ p = dest;
+
+ /* Headers from @one */
+ n = b1 - one;
+ p = mempcpy(p, one, n);
+
+ /* Headers from @two, these will only be found if not present above */
+ n = b2 - two;
+ p = mempcpy(p, two, n);
+
+ /* Body from @one */
+ n = l1 - (b1 - one);
+ if (n > 0) {
+ memcpy(p, b1, n);
+ p += n;
+
+ /* Body from @two */
+ } else {
+ n = l2 - (b2 - two);
+ memcpy(p, b2, n);
+ p += n;
+ }
+
+ assert(p - dest <= (ptrdiff_t)(l1 + l2));
+ p[0] = '\0';
+ return dest;
+}
+
+static int finish_item(
+ Hashmap *h,
+ sd_id128_t id,
+ const char *language,
+ char *payload, size_t payload_size) {
+
+ _cleanup_free_ CatalogItem *i = NULL;
+ _cleanup_free_ char *prev = NULL, *combined = NULL;
+
+ assert(h);
+ assert(payload);
+ assert(payload_size > 0);
+
+ i = new0(CatalogItem, 1);
+ if (!i)
+ return log_oom();
+
+ i->id = id;
+ if (language) {
+ assert(strlen(language) > 1 && strlen(language) < 32);
+ strcpy(i->language, language);
+ }
+
+ prev = hashmap_get(h, i);
+ if (prev) {
+ /* Already have such an item, combine them */
+ combined = combine_entries(payload, prev);
+ if (!combined)
+ return log_oom();
+
+ if (hashmap_update(h, i, combined) < 0)
+ return log_oom();
+ combined = NULL;
+ } else {
+ /* A new item */
+ combined = memdup(payload, payload_size + 1);
+ if (!combined)
+ return log_oom();
+
+ if (hashmap_put(h, i, combined) < 0)
+ return log_oom();
+ i = NULL;
+ combined = NULL;
+ }
+
+ return 0;
+}
+
+int catalog_file_lang(const char* filename, char **lang) {
+ char *beg, *end, *_lang;
+
+ end = endswith(filename, ".catalog");
+ if (!end)
+ return 0;
+
+ beg = end - 1;
+ while (beg > filename && !IN_SET(*beg, '.', '/') && end - beg < 32)
+ beg--;
+
+ if (*beg != '.' || end <= beg + 1)
+ return 0;
+
+ _lang = strndup(beg + 1, end - beg - 1);
+ if (!_lang)
+ return -ENOMEM;
+
+ *lang = _lang;
+ return 1;
+}
+
+static int catalog_entry_lang(const char* filename, int line,
+ const char* t, const char* deflang, char **lang) {
+ size_t c;
+
+ c = strlen(t);
+ if (c < 2)
+ return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
+ "[%s:%u] Language too short.",
+ filename, line);
+ if (c > 31)
+ return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
+ "[%s:%u] language too long.", filename,
+ line);
+
+ if (deflang) {
+ if (streq(t, deflang)) {
+ log_warning("[%s:%u] language specified unnecessarily",
+ filename, line);
+ return 0;
+ } else
+ log_warning("[%s:%u] language differs from default for file",
+ filename, line);
+ }
+
+ *lang = strdup(t);
+ if (!*lang)
+ return -ENOMEM;
+
+ return 0;
+}
+
+int catalog_import_file(Hashmap *h, const char *path) {
+ _cleanup_fclose_ FILE *f = NULL;
+ _cleanup_free_ char *payload = NULL;
+ size_t payload_size = 0, payload_allocated = 0;
+ unsigned n = 0;
+ sd_id128_t id;
+ _cleanup_free_ char *deflang = NULL, *lang = NULL;
+ bool got_id = false, empty_line = true;
+ int r;
+
+ assert(h);
+ assert(path);
+
+ f = fopen(path, "re");
+ if (!f)
+ return log_error_errno(errno, "Failed to open file %s: %m", path);
+
+ r = catalog_file_lang(path, &deflang);
+ if (r < 0)
+ log_error_errno(r, "Failed to determine language for file %s: %m", path);
+ if (r == 1)
+ log_debug("File %s has language %s.", path, deflang);
+
+ for (;;) {
+ _cleanup_free_ char *line = NULL;
+ size_t line_len;
+
+ r = read_line(f, LONG_LINE_MAX, &line);
+ if (r < 0)
+ return log_error_errno(r, "Failed to read file %s: %m", path);
+ if (r == 0)
+ break;
+
+ n++;
+
+ if (isempty(line)) {
+ empty_line = true;
+ continue;
+ }
+
+ if (strchr(COMMENTS, line[0]))
+ continue;
+
+ if (empty_line &&
+ strlen(line) >= 2+1+32 &&
+ line[0] == '-' &&
+ line[1] == '-' &&
+ line[2] == ' ' &&
+ IN_SET(line[2+1+32], ' ', '\0')) {
+
+ bool with_language;
+ sd_id128_t jd;
+
+ /* New entry */
+
+ with_language = line[2+1+32] != '\0';
+ line[2+1+32] = '\0';
+
+ if (sd_id128_from_string(line + 2 + 1, &jd) >= 0) {
+
+ if (got_id) {
+ if (payload_size == 0)
+ return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
+ "[%s:%u] No payload text.",
+ path,
+ n);
+
+ r = finish_item(h, id, lang ?: deflang, payload, payload_size);
+ if (r < 0)
+ return r;
+
+ lang = mfree(lang);
+ payload_size = 0;
+ }
+
+ if (with_language) {
+ char *t;
+
+ t = strstrip(line + 2 + 1 + 32 + 1);
+ r = catalog_entry_lang(path, n, t, deflang, &lang);
+ if (r < 0)
+ return r;
+ }
+
+ got_id = true;
+ empty_line = false;
+ id = jd;
+
+ continue;
+ }
+ }
+
+ /* Payload */
+ if (!got_id)
+ return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
+ "[%s:%u] Got payload before ID.",
+ path, n);
+
+ line_len = strlen(line);
+ if (!GREEDY_REALLOC(payload, payload_allocated,
+ payload_size + (empty_line ? 1 : 0) + line_len + 1 + 1))
+ return log_oom();
+
+ if (empty_line)
+ payload[payload_size++] = '\n';
+ memcpy(payload + payload_size, line, line_len);
+ payload_size += line_len;
+ payload[payload_size++] = '\n';
+ payload[payload_size] = '\0';
+
+ empty_line = false;
+ }
+
+ if (got_id) {
+ if (payload_size == 0)
+ return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
+ "[%s:%u] No payload text.",
+ path, n);
+
+ r = finish_item(h, id, lang ?: deflang, payload, payload_size);
+ if (r < 0)
+ return r;
+ }
+
+ return 0;
+}
+
+static int64_t write_catalog(const char *database, struct strbuf *sb,
+ CatalogItem *items, size_t n) {
+ CatalogHeader header;
+ _cleanup_fclose_ FILE *w = NULL;
+ int r;
+ _cleanup_free_ char *d, *p = NULL;
+ size_t k;
+
+ d = dirname_malloc(database);
+ if (!d)
+ return log_oom();
+
+ r = mkdir_p(d, 0775);
+ if (r < 0)
+ return log_error_errno(r, "Recursive mkdir %s: %m", d);
+
+ r = fopen_temporary(database, &w, &p);
+ if (r < 0)
+ return log_error_errno(r, "Failed to open database for writing: %s: %m",
+ database);
+
+ zero(header);
+ memcpy(header.signature, CATALOG_SIGNATURE, sizeof(header.signature));
+ header.header_size = htole64(ALIGN_TO(sizeof(CatalogHeader), 8));
+ header.catalog_item_size = htole64(sizeof(CatalogItem));
+ header.n_items = htole64(n);
+
+ r = -EIO;
+
+ k = fwrite(&header, 1, sizeof(header), w);
+ if (k != sizeof(header)) {
+ log_error("%s: failed to write header.", p);
+ goto error;
+ }
+
+ k = fwrite(items, 1, n * sizeof(CatalogItem), w);
+ if (k != n * sizeof(CatalogItem)) {
+ log_error("%s: failed to write database.", p);
+ goto error;
+ }
+
+ k = fwrite(sb->buf, 1, sb->len, w);
+ if (k != sb->len) {
+ log_error("%s: failed to write strings.", p);
+ goto error;
+ }
+
+ r = fflush_and_check(w);
+ if (r < 0) {
+ log_error_errno(r, "%s: failed to write database: %m", p);
+ goto error;
+ }
+
+ fchmod(fileno(w), 0644);
+
+ if (rename(p, database) < 0) {
+ r = log_error_errno(errno, "rename (%s -> %s) failed: %m", p, database);
+ goto error;
+ }
+
+ return ftello(w);
+
+error:
+ (void) unlink(p);
+ return r;
+}
+
+int catalog_update(const char* database, const char* root, const char* const* dirs) {
+ _cleanup_strv_free_ char **files = NULL;
+ char **f;
+ _cleanup_(strbuf_cleanupp) struct strbuf *sb = NULL;
+ _cleanup_hashmap_free_free_free_ Hashmap *h = NULL;
+ _cleanup_free_ CatalogItem *items = NULL;
+ ssize_t offset;
+ char *payload;
+ CatalogItem *i;
+ Iterator j;
+ unsigned n;
+ int r;
+ int64_t sz;
+
+ h = hashmap_new(&catalog_hash_ops);
+ sb = strbuf_new();
+ if (!h || !sb)
+ return log_oom();
+
+ r = conf_files_list_strv(&files, ".catalog", root, 0, dirs);
+ if (r < 0)
+ return log_error_errno(r, "Failed to get catalog files: %m");
+
+ STRV_FOREACH(f, files) {
+ log_debug("Reading file '%s'", *f);
+ r = catalog_import_file(h, *f);
+ if (r < 0)
+ return log_error_errno(r, "Failed to import file '%s': %m", *f);
+ }
+
+ if (hashmap_size(h) <= 0) {
+ log_info("No items in catalog.");
+ return 0;
+ } else
+ log_debug("Found %u items in catalog.", hashmap_size(h));
+
+ items = new(CatalogItem, hashmap_size(h));
+ if (!items)
+ return log_oom();
+
+ n = 0;
+ HASHMAP_FOREACH_KEY(payload, i, h, j) {
+ log_debug("Found " SD_ID128_FORMAT_STR ", language %s",
+ SD_ID128_FORMAT_VAL(i->id),
+ isempty(i->language) ? "C" : i->language);
+
+ offset = strbuf_add_string(sb, payload, strlen(payload));
+ if (offset < 0)
+ return log_oom();
+
+ i->offset = htole64((uint64_t) offset);
+ items[n++] = *i;
+ }
+
+ assert(n == hashmap_size(h));
+ typesafe_qsort(items, n, catalog_compare_func);
+
+ strbuf_complete(sb);
+
+ sz = write_catalog(database, sb, items, n);
+ if (sz < 0)
+ return log_error_errno(sz, "Failed to write %s: %m", database);
+
+ log_debug("%s: wrote %u items, with %zu bytes of strings, %"PRIi64" total size.",
+ database, n, sb->len, sz);
+ return 0;
+}
+
+static int open_mmap(const char *database, int *_fd, struct stat *_st, void **_p) {
+ const CatalogHeader *h;
+ int fd;
+ void *p;
+ struct stat st;
+
+ assert(_fd);
+ assert(_st);
+ assert(_p);
+
+ fd = open(database, O_RDONLY|O_CLOEXEC);
+ if (fd < 0)
+ return -errno;
+
+ if (fstat(fd, &st) < 0) {
+ safe_close(fd);
+ return -errno;
+ }
+
+ if (st.st_size < (off_t) sizeof(CatalogHeader)) {
+ safe_close(fd);
+ return -EINVAL;
+ }
+
+ p = mmap(NULL, PAGE_ALIGN(st.st_size), PROT_READ, MAP_SHARED, fd, 0);
+ if (p == MAP_FAILED) {
+ safe_close(fd);
+ return -errno;
+ }
+
+ h = p;
+ if (memcmp(h->signature, CATALOG_SIGNATURE, sizeof(h->signature)) != 0 ||
+ le64toh(h->header_size) < sizeof(CatalogHeader) ||
+ le64toh(h->catalog_item_size) < sizeof(CatalogItem) ||
+ h->incompatible_flags != 0 ||
+ le64toh(h->n_items) <= 0 ||
+ st.st_size < (off_t) (le64toh(h->header_size) + le64toh(h->catalog_item_size) * le64toh(h->n_items))) {
+ safe_close(fd);
+ munmap(p, st.st_size);
+ return -EBADMSG;
+ }
+
+ *_fd = fd;
+ *_st = st;
+ *_p = p;
+
+ return 0;
+}
+
+static const char *find_id(void *p, sd_id128_t id) {
+ CatalogItem *f = NULL, key = { .id = id };
+ const CatalogHeader *h = p;
+ const char *loc;
+
+ loc = setlocale(LC_MESSAGES, NULL);
+ if (!isempty(loc) && !STR_IN_SET(loc, "C", "POSIX")) {
+ size_t len;
+
+ len = strcspn(loc, ".@");
+ if (len > sizeof(key.language) - 1)
+ log_debug("LC_MESSAGES value too long, ignoring: \"%.*s\"", (int) len, loc);
+ else {
+ strncpy(key.language, loc, len);
+ key.language[len] = '\0';
+
+ f = bsearch(&key,
+ (const uint8_t*) p + le64toh(h->header_size),
+ le64toh(h->n_items),
+ le64toh(h->catalog_item_size),
+ (comparison_fn_t) catalog_compare_func);
+ if (!f) {
+ char *e;
+
+ e = strchr(key.language, '_');
+ if (e) {
+ *e = 0;
+ f = bsearch(&key,
+ (const uint8_t*) p + le64toh(h->header_size),
+ le64toh(h->n_items),
+ le64toh(h->catalog_item_size),
+ (comparison_fn_t) catalog_compare_func);
+ }
+ }
+ }
+ }
+
+ if (!f) {
+ zero(key.language);
+ f = bsearch(&key,
+ (const uint8_t*) p + le64toh(h->header_size),
+ le64toh(h->n_items),
+ le64toh(h->catalog_item_size),
+ (comparison_fn_t) catalog_compare_func);
+ }
+
+ if (!f)
+ return NULL;
+
+ return (const char*) p +
+ le64toh(h->header_size) +
+ le64toh(h->n_items) * le64toh(h->catalog_item_size) +
+ le64toh(f->offset);
+}
+
+int catalog_get(const char* database, sd_id128_t id, char **_text) {
+ _cleanup_close_ int fd = -1;
+ void *p = NULL;
+ struct stat st = {};
+ char *text = NULL;
+ int r;
+ const char *s;
+
+ assert(_text);
+
+ r = open_mmap(database, &fd, &st, &p);
+ if (r < 0)
+ return r;
+
+ s = find_id(p, id);
+ if (!s) {
+ r = -ENOENT;
+ goto finish;
+ }
+
+ text = strdup(s);
+ if (!text) {
+ r = -ENOMEM;
+ goto finish;
+ }
+
+ *_text = text;
+ r = 0;
+
+finish:
+ if (p)
+ munmap(p, st.st_size);
+
+ return r;
+}
+
+static char *find_header(const char *s, const char *header) {
+
+ for (;;) {
+ const char *v;
+
+ v = startswith(s, header);
+ if (v) {
+ v += strspn(v, WHITESPACE);
+ return strndup(v, strcspn(v, NEWLINE));
+ }
+
+ if (!next_header(&s))
+ return NULL;
+ }
+}
+
+static void dump_catalog_entry(FILE *f, sd_id128_t id, const char *s, bool oneline) {
+ if (oneline) {
+ _cleanup_free_ char *subject = NULL, *defined_by = NULL;
+
+ subject = find_header(s, "Subject:");
+ defined_by = find_header(s, "Defined-By:");
+
+ fprintf(f, SD_ID128_FORMAT_STR " %s: %s\n",
+ SD_ID128_FORMAT_VAL(id),
+ strna(defined_by), strna(subject));
+ } else
+ fprintf(f, "-- " SD_ID128_FORMAT_STR "\n%s\n",
+ SD_ID128_FORMAT_VAL(id), s);
+}
+
+int catalog_list(FILE *f, const char *database, bool oneline) {
+ _cleanup_close_ int fd = -1;
+ void *p = NULL;
+ struct stat st;
+ const CatalogHeader *h;
+ const CatalogItem *items;
+ int r;
+ unsigned n;
+ sd_id128_t last_id;
+ bool last_id_set = false;
+
+ r = open_mmap(database, &fd, &st, &p);
+ if (r < 0)
+ return r;
+
+ h = p;
+ items = (const CatalogItem*) ((const uint8_t*) p + le64toh(h->header_size));
+
+ for (n = 0; n < le64toh(h->n_items); n++) {
+ const char *s;
+
+ if (last_id_set && sd_id128_equal(last_id, items[n].id))
+ continue;
+
+ assert_se(s = find_id(p, items[n].id));
+
+ dump_catalog_entry(f, items[n].id, s, oneline);
+
+ last_id_set = true;
+ last_id = items[n].id;
+ }
+
+ munmap(p, st.st_size);
+
+ return 0;
+}
+
+int catalog_list_items(FILE *f, const char *database, bool oneline, char **items) {
+ char **item;
+ int r = 0;
+
+ STRV_FOREACH(item, items) {
+ sd_id128_t id;
+ int k;
+ _cleanup_free_ char *msg = NULL;
+
+ k = sd_id128_from_string(*item, &id);
+ if (k < 0) {
+ log_error_errno(k, "Failed to parse id128 '%s': %m", *item);
+ if (r == 0)
+ r = k;
+ continue;
+ }
+
+ k = catalog_get(database, id, &msg);
+ if (k < 0) {
+ log_full_errno(k == -ENOENT ? LOG_NOTICE : LOG_ERR, k,
+ "Failed to retrieve catalog entry for '%s': %m", *item);
+ if (r == 0)
+ r = k;
+ continue;
+ }
+
+ dump_catalog_entry(f, id, msg, oneline);
+ }
+
+ return r;
+}
diff --git a/src/journal/catalog.h b/src/journal/catalog.h
new file mode 100644
index 0000000..4e6f161
--- /dev/null
+++ b/src/journal/catalog.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+#pragma once
+
+#include <stdbool.h>
+
+#include "sd-id128.h"
+
+#include "hashmap.h"
+#include "strbuf.h"
+
+int catalog_import_file(Hashmap *h, const char *path);
+int catalog_update(const char* database, const char* root, const char* const* dirs);
+int catalog_get(const char* database, sd_id128_t id, char **data);
+int catalog_list(FILE *f, const char* database, bool oneline);
+int catalog_list_items(FILE *f, const char* database, bool oneline, char **items);
+int catalog_file_lang(const char *filename, char **lang);
+extern const char * const catalog_file_dirs[];
+extern const struct hash_ops catalog_hash_ops;
diff --git a/src/journal/compress.c b/src/journal/compress.c
new file mode 100644
index 0000000..e95ce2b
--- /dev/null
+++ b/src/journal/compress.c
@@ -0,0 +1,677 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+
+#include <inttypes.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <unistd.h>
+
+#if HAVE_XZ
+#include <lzma.h>
+#endif
+
+#if HAVE_LZ4
+#include <lz4.h>
+#include <lz4frame.h>
+#endif
+
+#include "alloc-util.h"
+#include "compress.h"
+#include "fd-util.h"
+#include "io-util.h"
+#include "journal-def.h"
+#include "macro.h"
+#include "sparse-endian.h"
+#include "string-table.h"
+#include "string-util.h"
+#include "util.h"
+
+#if HAVE_LZ4
+DEFINE_TRIVIAL_CLEANUP_FUNC(LZ4F_compressionContext_t, LZ4F_freeCompressionContext);
+DEFINE_TRIVIAL_CLEANUP_FUNC(LZ4F_decompressionContext_t, LZ4F_freeDecompressionContext);
+#endif
+
+#define ALIGN_8(l) ALIGN_TO(l, sizeof(size_t))
+
+static const char* const object_compressed_table[_OBJECT_COMPRESSED_MAX] = {
+ [OBJECT_COMPRESSED_XZ] = "XZ",
+ [OBJECT_COMPRESSED_LZ4] = "LZ4",
+};
+
+DEFINE_STRING_TABLE_LOOKUP(object_compressed, int);
+
+int compress_blob_xz(const void *src, uint64_t src_size,
+ void *dst, size_t dst_alloc_size, size_t *dst_size) {
+#if HAVE_XZ
+ static const lzma_options_lzma opt = {
+ 1u << 20u, NULL, 0, LZMA_LC_DEFAULT, LZMA_LP_DEFAULT,
+ LZMA_PB_DEFAULT, LZMA_MODE_FAST, 128, LZMA_MF_HC3, 4
+ };
+ static const lzma_filter filters[] = {
+ { LZMA_FILTER_LZMA2, (lzma_options_lzma*) &opt },
+ { LZMA_VLI_UNKNOWN, NULL }
+ };
+ lzma_ret ret;
+ size_t out_pos = 0;
+
+ assert(src);
+ assert(src_size > 0);
+ assert(dst);
+ assert(dst_alloc_size > 0);
+ assert(dst_size);
+
+ /* Returns < 0 if we couldn't compress the data or the
+ * compressed result is longer than the original */
+
+ if (src_size < 80)
+ return -ENOBUFS;
+
+ ret = lzma_stream_buffer_encode((lzma_filter*) filters, LZMA_CHECK_NONE, NULL,
+ src, src_size, dst, &out_pos, dst_alloc_size);
+ if (ret != LZMA_OK)
+ return -ENOBUFS;
+
+ *dst_size = out_pos;
+ return 0;
+#else
+ return -EPROTONOSUPPORT;
+#endif
+}
+
+int compress_blob_lz4(const void *src, uint64_t src_size,
+ void *dst, size_t dst_alloc_size, size_t *dst_size) {
+#if HAVE_LZ4
+ int r;
+
+ assert(src);
+ assert(src_size > 0);
+ assert(dst);
+ assert(dst_alloc_size > 0);
+ assert(dst_size);
+
+ /* Returns < 0 if we couldn't compress the data or the
+ * compressed result is longer than the original */
+
+ if (src_size < 9)
+ return -ENOBUFS;
+
+ r = LZ4_compress_default(src, (char*)dst + 8, src_size, (int) dst_alloc_size - 8);
+ if (r <= 0)
+ return -ENOBUFS;
+
+ *(le64_t*) dst = htole64(src_size);
+ *dst_size = r + 8;
+
+ return 0;
+#else
+ return -EPROTONOSUPPORT;
+#endif
+}
+
+int decompress_blob_xz(const void *src, uint64_t src_size,
+ void **dst, size_t *dst_alloc_size, size_t* dst_size, size_t dst_max) {
+
+#if HAVE_XZ
+ _cleanup_(lzma_end) lzma_stream s = LZMA_STREAM_INIT;
+ lzma_ret ret;
+ size_t space;
+
+ assert(src);
+ assert(src_size > 0);
+ assert(dst);
+ assert(dst_alloc_size);
+ assert(dst_size);
+ assert(*dst_alloc_size == 0 || *dst);
+
+ ret = lzma_stream_decoder(&s, UINT64_MAX, 0);
+ if (ret != LZMA_OK)
+ return -ENOMEM;
+
+ space = MIN(src_size * 2, dst_max ?: (size_t) -1);
+ if (!greedy_realloc(dst, dst_alloc_size, space, 1))
+ return -ENOMEM;
+
+ s.next_in = src;
+ s.avail_in = src_size;
+
+ s.next_out = *dst;
+ s.avail_out = space;
+
+ for (;;) {
+ size_t used;
+
+ ret = lzma_code(&s, LZMA_FINISH);
+
+ if (ret == LZMA_STREAM_END)
+ break;
+ else if (ret != LZMA_OK)
+ return -ENOMEM;
+
+ if (dst_max > 0 && (space - s.avail_out) >= dst_max)
+ break;
+ else if (dst_max > 0 && space == dst_max)
+ return -ENOBUFS;
+
+ used = space - s.avail_out;
+ space = MIN(2 * space, dst_max ?: (size_t) -1);
+ if (!greedy_realloc(dst, dst_alloc_size, space, 1))
+ return -ENOMEM;
+
+ s.avail_out = space - used;
+ s.next_out = *(uint8_t**)dst + used;
+ }
+
+ *dst_size = space - s.avail_out;
+ return 0;
+#else
+ return -EPROTONOSUPPORT;
+#endif
+}
+
+int decompress_blob_lz4(const void *src, uint64_t src_size,
+ void **dst, size_t *dst_alloc_size, size_t* dst_size, size_t dst_max) {
+
+#if HAVE_LZ4
+ char* out;
+ int r, size; /* LZ4 uses int for size */
+
+ assert(src);
+ assert(src_size > 0);
+ assert(dst);
+ assert(dst_alloc_size);
+ assert(dst_size);
+ assert(*dst_alloc_size == 0 || *dst);
+
+ if (src_size <= 8)
+ return -EBADMSG;
+
+ size = le64toh( *(le64_t*)src );
+ if (size < 0 || (unsigned) size != le64toh(*(le64_t*)src))
+ return -EFBIG;
+ if ((size_t) size > *dst_alloc_size) {
+ out = realloc(*dst, size);
+ if (!out)
+ return -ENOMEM;
+ *dst = out;
+ *dst_alloc_size = size;
+ } else
+ out = *dst;
+
+ r = LZ4_decompress_safe((char*)src + 8, out, src_size - 8, size);
+ if (r < 0 || r != size)
+ return -EBADMSG;
+
+ *dst_size = size;
+ return 0;
+#else
+ return -EPROTONOSUPPORT;
+#endif
+}
+
+int decompress_blob(int compression,
+ const void *src, uint64_t src_size,
+ void **dst, size_t *dst_alloc_size, size_t* dst_size, size_t dst_max) {
+ if (compression == OBJECT_COMPRESSED_XZ)
+ return decompress_blob_xz(src, src_size,
+ dst, dst_alloc_size, dst_size, dst_max);
+ else if (compression == OBJECT_COMPRESSED_LZ4)
+ return decompress_blob_lz4(src, src_size,
+ dst, dst_alloc_size, dst_size, dst_max);
+ else
+ return -EBADMSG;
+}
+
+int decompress_startswith_xz(const void *src, uint64_t src_size,
+ void **buffer, size_t *buffer_size,
+ const void *prefix, size_t prefix_len,
+ uint8_t extra) {
+
+#if HAVE_XZ
+ _cleanup_(lzma_end) lzma_stream s = LZMA_STREAM_INIT;
+ lzma_ret ret;
+
+ /* Checks whether the decompressed blob starts with the
+ * mentioned prefix. The byte extra needs to follow the
+ * prefix */
+
+ assert(src);
+ assert(src_size > 0);
+ assert(buffer);
+ assert(buffer_size);
+ assert(prefix);
+ assert(*buffer_size == 0 || *buffer);
+
+ ret = lzma_stream_decoder(&s, UINT64_MAX, 0);
+ if (ret != LZMA_OK)
+ return -EBADMSG;
+
+ if (!(greedy_realloc(buffer, buffer_size, ALIGN_8(prefix_len + 1), 1)))
+ return -ENOMEM;
+
+ s.next_in = src;
+ s.avail_in = src_size;
+
+ s.next_out = *buffer;
+ s.avail_out = *buffer_size;
+
+ for (;;) {
+ ret = lzma_code(&s, LZMA_FINISH);
+
+ if (!IN_SET(ret, LZMA_OK, LZMA_STREAM_END))
+ return -EBADMSG;
+
+ if (*buffer_size - s.avail_out >= prefix_len + 1)
+ return memcmp(*buffer, prefix, prefix_len) == 0 &&
+ ((const uint8_t*) *buffer)[prefix_len] == extra;
+
+ if (ret == LZMA_STREAM_END)
+ return 0;
+
+ s.avail_out += *buffer_size;
+
+ if (!(greedy_realloc(buffer, buffer_size, *buffer_size * 2, 1)))
+ return -ENOMEM;
+
+ s.next_out = *(uint8_t**)buffer + *buffer_size - s.avail_out;
+ }
+
+#else
+ return -EPROTONOSUPPORT;
+#endif
+}
+
+int decompress_startswith_lz4(const void *src, uint64_t src_size,
+ void **buffer, size_t *buffer_size,
+ const void *prefix, size_t prefix_len,
+ uint8_t extra) {
+#if HAVE_LZ4
+ /* Checks whether the decompressed blob starts with the
+ * mentioned prefix. The byte extra needs to follow the
+ * prefix */
+
+ int r;
+
+ assert(src);
+ assert(src_size > 0);
+ assert(buffer);
+ assert(buffer_size);
+ assert(prefix);
+ assert(*buffer_size == 0 || *buffer);
+
+ if (src_size <= 8)
+ return -EBADMSG;
+
+ if (!(greedy_realloc(buffer, buffer_size, ALIGN_8(prefix_len + 1), 1)))
+ return -ENOMEM;
+
+ r = LZ4_decompress_safe_partial((char*)src + 8, *buffer, src_size - 8,
+ prefix_len + 1, *buffer_size);
+ /* One lz4 < 1.8.3, we might get "failure" (r < 0), or "success" where
+ * just a part of the buffer is decompressed. But if we get a smaller
+ * amount of bytes than requested, we don't know whether there isn't enough
+ * data to fill the requested size or whether we just got a partial answer.
+ */
+ if (r < 0 || (size_t) r < prefix_len + 1) {
+ size_t size;
+
+ if (LZ4_versionNumber() >= 10803)
+ /* We trust that the newer lz4 decompresses the number of bytes we
+ * requested if available in the compressed string. */
+ return 0;
+
+ if (r > 0)
+ /* Compare what we have first, in case of mismatch we can
+ * shortcut the full comparison. */
+ if (memcmp(*buffer, prefix, r) != 0)
+ return 0;
+
+ /* Before version 1.8.3, lz4 always tries to decode full a "sequence",
+ * so in pathological cases might need to decompress the full field. */
+ r = decompress_blob_lz4(src, src_size, buffer, buffer_size, &size, 0);
+ if (r < 0)
+ return r;
+
+ if (size < prefix_len + 1)
+ return 0;
+ }
+
+ return memcmp(*buffer, prefix, prefix_len) == 0 &&
+ ((const uint8_t*) *buffer)[prefix_len] == extra;
+#else
+ return -EPROTONOSUPPORT;
+#endif
+}
+
+int decompress_startswith(int compression,
+ const void *src, uint64_t src_size,
+ void **buffer, size_t *buffer_size,
+ const void *prefix, size_t prefix_len,
+ uint8_t extra) {
+ if (compression == OBJECT_COMPRESSED_XZ)
+ return decompress_startswith_xz(src, src_size,
+ buffer, buffer_size,
+ prefix, prefix_len,
+ extra);
+ else if (compression == OBJECT_COMPRESSED_LZ4)
+ return decompress_startswith_lz4(src, src_size,
+ buffer, buffer_size,
+ prefix, prefix_len,
+ extra);
+ else
+ return -EBADMSG;
+}
+
+int compress_stream_xz(int fdf, int fdt, uint64_t max_bytes) {
+#if HAVE_XZ
+ _cleanup_(lzma_end) lzma_stream s = LZMA_STREAM_INIT;
+ lzma_ret ret;
+ uint8_t buf[BUFSIZ], out[BUFSIZ];
+ lzma_action action = LZMA_RUN;
+
+ assert(fdf >= 0);
+ assert(fdt >= 0);
+
+ ret = lzma_easy_encoder(&s, LZMA_PRESET_DEFAULT, LZMA_CHECK_CRC64);
+ if (ret != LZMA_OK) {
+ log_error("Failed to initialize XZ encoder: code %u", ret);
+ return -EINVAL;
+ }
+
+ for (;;) {
+ if (s.avail_in == 0 && action == LZMA_RUN) {
+ size_t m = sizeof(buf);
+ ssize_t n;
+
+ if (max_bytes != (uint64_t) -1 && (uint64_t) m > max_bytes)
+ m = (size_t) max_bytes;
+
+ n = read(fdf, buf, m);
+ if (n < 0)
+ return -errno;
+ if (n == 0)
+ action = LZMA_FINISH;
+ else {
+ s.next_in = buf;
+ s.avail_in = n;
+
+ if (max_bytes != (uint64_t) -1) {
+ assert(max_bytes >= (uint64_t) n);
+ max_bytes -= n;
+ }
+ }
+ }
+
+ if (s.avail_out == 0) {
+ s.next_out = out;
+ s.avail_out = sizeof(out);
+ }
+
+ ret = lzma_code(&s, action);
+ if (!IN_SET(ret, LZMA_OK, LZMA_STREAM_END)) {
+ log_error("Compression failed: code %u", ret);
+ return -EBADMSG;
+ }
+
+ if (s.avail_out == 0 || ret == LZMA_STREAM_END) {
+ ssize_t n, k;
+
+ n = sizeof(out) - s.avail_out;
+
+ k = loop_write(fdt, out, n, false);
+ if (k < 0)
+ return k;
+
+ if (ret == LZMA_STREAM_END) {
+ log_debug("XZ compression finished (%"PRIu64" -> %"PRIu64" bytes, %.1f%%)",
+ s.total_in, s.total_out,
+ (double) s.total_out / s.total_in * 100);
+
+ return 0;
+ }
+ }
+ }
+#else
+ return -EPROTONOSUPPORT;
+#endif
+}
+
+#define LZ4_BUFSIZE (512*1024u)
+
+int compress_stream_lz4(int fdf, int fdt, uint64_t max_bytes) {
+
+#if HAVE_LZ4
+ LZ4F_errorCode_t c;
+ _cleanup_(LZ4F_freeCompressionContextp) LZ4F_compressionContext_t ctx = NULL;
+ _cleanup_free_ char *buf = NULL;
+ char *src = NULL;
+ size_t size, n, total_in = 0, total_out, offset = 0, frame_size;
+ struct stat st;
+ int r;
+ static const LZ4F_compressOptions_t options = {
+ .stableSrc = 1,
+ };
+ static const LZ4F_preferences_t preferences = {
+ .frameInfo.blockSizeID = 5,
+ };
+
+ c = LZ4F_createCompressionContext(&ctx, LZ4F_VERSION);
+ if (LZ4F_isError(c))
+ return -ENOMEM;
+
+ if (fstat(fdf, &st) < 0)
+ return log_debug_errno(errno, "fstat() failed: %m");
+
+ frame_size = LZ4F_compressBound(LZ4_BUFSIZE, &preferences);
+ size = frame_size + 64*1024; /* add some space for header and trailer */
+ buf = malloc(size);
+ if (!buf)
+ return -ENOMEM;
+
+ n = offset = total_out = LZ4F_compressBegin(ctx, buf, size, &preferences);
+ if (LZ4F_isError(n))
+ return -EINVAL;
+
+ src = mmap(NULL, st.st_size, PROT_READ, MAP_PRIVATE, fdf, 0);
+ if (src == MAP_FAILED)
+ return -errno;
+
+ log_debug("Buffer size is %zu bytes, header size %zu bytes.", size, n);
+
+ while (total_in < (size_t) st.st_size) {
+ ssize_t k;
+
+ k = MIN(LZ4_BUFSIZE, st.st_size - total_in);
+ n = LZ4F_compressUpdate(ctx, buf + offset, size - offset,
+ src + total_in, k, &options);
+ if (LZ4F_isError(n)) {
+ r = -ENOTRECOVERABLE;
+ goto cleanup;
+ }
+
+ total_in += k;
+ offset += n;
+ total_out += n;
+
+ if (max_bytes != (uint64_t) -1 && total_out > (size_t) max_bytes) {
+ log_debug("Compressed stream longer than %"PRIu64" bytes", max_bytes);
+ return -EFBIG;
+ }
+
+ if (size - offset < frame_size + 4) {
+ k = loop_write(fdt, buf, offset, false);
+ if (k < 0) {
+ r = k;
+ goto cleanup;
+ }
+ offset = 0;
+ }
+ }
+
+ n = LZ4F_compressEnd(ctx, buf + offset, size - offset, &options);
+ if (LZ4F_isError(n)) {
+ r = -ENOTRECOVERABLE;
+ goto cleanup;
+ }
+
+ offset += n;
+ total_out += n;
+ r = loop_write(fdt, buf, offset, false);
+ if (r < 0)
+ goto cleanup;
+
+ log_debug("LZ4 compression finished (%zu -> %zu bytes, %.1f%%)",
+ total_in, total_out,
+ (double) total_out / total_in * 100);
+ cleanup:
+ munmap(src, st.st_size);
+ return r;
+#else
+ return -EPROTONOSUPPORT;
+#endif
+}
+
+int decompress_stream_xz(int fdf, int fdt, uint64_t max_bytes) {
+
+#if HAVE_XZ
+ _cleanup_(lzma_end) lzma_stream s = LZMA_STREAM_INIT;
+ lzma_ret ret;
+
+ uint8_t buf[BUFSIZ], out[BUFSIZ];
+ lzma_action action = LZMA_RUN;
+
+ assert(fdf >= 0);
+ assert(fdt >= 0);
+
+ ret = lzma_stream_decoder(&s, UINT64_MAX, 0);
+ if (ret != LZMA_OK) {
+ log_debug("Failed to initialize XZ decoder: code %u", ret);
+ return -ENOMEM;
+ }
+
+ for (;;) {
+ if (s.avail_in == 0 && action == LZMA_RUN) {
+ ssize_t n;
+
+ n = read(fdf, buf, sizeof(buf));
+ if (n < 0)
+ return -errno;
+ if (n == 0)
+ action = LZMA_FINISH;
+ else {
+ s.next_in = buf;
+ s.avail_in = n;
+ }
+ }
+
+ if (s.avail_out == 0) {
+ s.next_out = out;
+ s.avail_out = sizeof(out);
+ }
+
+ ret = lzma_code(&s, action);
+ if (!IN_SET(ret, LZMA_OK, LZMA_STREAM_END)) {
+ log_debug("Decompression failed: code %u", ret);
+ return -EBADMSG;
+ }
+
+ if (s.avail_out == 0 || ret == LZMA_STREAM_END) {
+ ssize_t n, k;
+
+ n = sizeof(out) - s.avail_out;
+
+ if (max_bytes != (uint64_t) -1) {
+ if (max_bytes < (uint64_t) n)
+ return -EFBIG;
+
+ max_bytes -= n;
+ }
+
+ k = loop_write(fdt, out, n, false);
+ if (k < 0)
+ return k;
+
+ if (ret == LZMA_STREAM_END) {
+ log_debug("XZ decompression finished (%"PRIu64" -> %"PRIu64" bytes, %.1f%%)",
+ s.total_in, s.total_out,
+ (double) s.total_out / s.total_in * 100);
+
+ return 0;
+ }
+ }
+ }
+#else
+ log_debug("Cannot decompress file. Compiled without XZ support.");
+ return -EPROTONOSUPPORT;
+#endif
+}
+
+int decompress_stream_lz4(int in, int out, uint64_t max_bytes) {
+#if HAVE_LZ4
+ size_t c;
+ _cleanup_(LZ4F_freeDecompressionContextp) LZ4F_decompressionContext_t ctx = NULL;
+ _cleanup_free_ char *buf = NULL;
+ char *src;
+ struct stat st;
+ int r = 0;
+ size_t total_in = 0, total_out = 0;
+
+ c = LZ4F_createDecompressionContext(&ctx, LZ4F_VERSION);
+ if (LZ4F_isError(c))
+ return -ENOMEM;
+
+ if (fstat(in, &st) < 0)
+ return log_debug_errno(errno, "fstat() failed: %m");
+
+ buf = malloc(LZ4_BUFSIZE);
+ if (!buf)
+ return -ENOMEM;
+
+ src = mmap(NULL, st.st_size, PROT_READ, MAP_PRIVATE, in, 0);
+ if (src == MAP_FAILED)
+ return -errno;
+
+ while (total_in < (size_t) st.st_size) {
+ size_t produced = LZ4_BUFSIZE;
+ size_t used = st.st_size - total_in;
+
+ c = LZ4F_decompress(ctx, buf, &produced, src + total_in, &used, NULL);
+ if (LZ4F_isError(c)) {
+ r = -EBADMSG;
+ goto cleanup;
+ }
+
+ total_in += used;
+ total_out += produced;
+
+ if (max_bytes != (uint64_t) -1 && total_out > (size_t) max_bytes) {
+ log_debug("Decompressed stream longer than %"PRIu64" bytes", max_bytes);
+ r = -EFBIG;
+ goto cleanup;
+ }
+
+ r = loop_write(out, buf, produced, false);
+ if (r < 0)
+ goto cleanup;
+ }
+
+ log_debug("LZ4 decompression finished (%zu -> %zu bytes, %.1f%%)",
+ total_in, total_out,
+ total_in > 0 ? (double) total_out / total_in * 100 : 0.0);
+ cleanup:
+ munmap(src, st.st_size);
+ return r;
+#else
+ log_debug("Cannot decompress file. Compiled without LZ4 support.");
+ return -EPROTONOSUPPORT;
+#endif
+}
+
+int decompress_stream(const char *filename, int fdf, int fdt, uint64_t max_bytes) {
+
+ if (endswith(filename, ".lz4"))
+ return decompress_stream_lz4(fdf, fdt, max_bytes);
+ else if (endswith(filename, ".xz"))
+ return decompress_stream_xz(fdf, fdt, max_bytes);
+ else
+ return -EPROTONOSUPPORT;
+}
diff --git a/src/journal/compress.h b/src/journal/compress.h
new file mode 100644
index 0000000..5641148
--- /dev/null
+++ b/src/journal/compress.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+#pragma once
+
+#include <unistd.h>
+
+#include "journal-def.h"
+
+const char* object_compressed_to_string(int compression);
+int object_compressed_from_string(const char *compression);
+
+int compress_blob_xz(const void *src, uint64_t src_size,
+ void *dst, size_t dst_alloc_size, size_t *dst_size);
+int compress_blob_lz4(const void *src, uint64_t src_size,
+ void *dst, size_t dst_alloc_size, size_t *dst_size);
+
+static inline int compress_blob(const void *src, uint64_t src_size,
+ void *dst, size_t dst_alloc_size, size_t *dst_size) {
+ int r;
+#if HAVE_LZ4
+ r = compress_blob_lz4(src, src_size, dst, dst_alloc_size, dst_size);
+ if (r == 0)
+ return OBJECT_COMPRESSED_LZ4;
+#else
+ r = compress_blob_xz(src, src_size, dst, dst_alloc_size, dst_size);
+ if (r == 0)
+ return OBJECT_COMPRESSED_XZ;
+#endif
+ return r;
+}
+
+int decompress_blob_xz(const void *src, uint64_t src_size,
+ void **dst, size_t *dst_alloc_size, size_t* dst_size, size_t dst_max);
+int decompress_blob_lz4(const void *src, uint64_t src_size,
+ void **dst, size_t *dst_alloc_size, size_t* dst_size, size_t dst_max);
+int decompress_blob(int compression,
+ const void *src, uint64_t src_size,
+ void **dst, size_t *dst_alloc_size, size_t* dst_size, size_t dst_max);
+
+int decompress_startswith_xz(const void *src, uint64_t src_size,
+ void **buffer, size_t *buffer_size,
+ const void *prefix, size_t prefix_len,
+ uint8_t extra);
+int decompress_startswith_lz4(const void *src, uint64_t src_size,
+ void **buffer, size_t *buffer_size,
+ const void *prefix, size_t prefix_len,
+ uint8_t extra);
+int decompress_startswith(int compression,
+ const void *src, uint64_t src_size,
+ void **buffer, size_t *buffer_size,
+ const void *prefix, size_t prefix_len,
+ uint8_t extra);
+
+int compress_stream_xz(int fdf, int fdt, uint64_t max_bytes);
+int compress_stream_lz4(int fdf, int fdt, uint64_t max_bytes);
+
+int decompress_stream_xz(int fdf, int fdt, uint64_t max_size);
+int decompress_stream_lz4(int fdf, int fdt, uint64_t max_size);
+
+#if HAVE_LZ4
+# define compress_stream compress_stream_lz4
+# define COMPRESSED_EXT ".lz4"
+#else
+# define compress_stream compress_stream_xz
+# define COMPRESSED_EXT ".xz"
+#endif
+
+int decompress_stream(const char *filename, int fdf, int fdt, uint64_t max_bytes);
diff --git a/src/journal/fsprg.c b/src/journal/fsprg.c
new file mode 100644
index 0000000..6d062de
--- /dev/null
+++ b/src/journal/fsprg.c
@@ -0,0 +1,378 @@
+/* SPDX-License-Identifier: LGPL-2.1+
+ *
+ * fsprg v0.1 - (seekable) forward-secure pseudorandom generator
+ * Copyright © 2012 B. Poettering
+ * Contact: fsprg@point-at-infinity.org
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+/*
+ * See "Practical Secure Logging: Seekable Sequential Key Generators"
+ * by G. A. Marson, B. Poettering for details:
+ *
+ * http://eprint.iacr.org/2013/397
+ */
+
+#include <gcrypt.h>
+#include <string.h>
+
+#include "fsprg.h"
+#include "gcrypt-util.h"
+
+#define ISVALID_SECPAR(secpar) (((secpar) % 16 == 0) && ((secpar) >= 16) && ((secpar) <= 16384))
+#define VALIDATE_SECPAR(secpar) assert(ISVALID_SECPAR(secpar));
+
+#define RND_HASH GCRY_MD_SHA256
+#define RND_GEN_P 0x01
+#define RND_GEN_Q 0x02
+#define RND_GEN_X 0x03
+
+#pragma GCC diagnostic ignored "-Wpointer-arith"
+/* TODO: remove void* arithmetic and this work-around */
+
+/******************************************************************************/
+
+static void mpi_export(void *buf, size_t buflen, const gcry_mpi_t x) {
+ unsigned len;
+ size_t nwritten;
+
+ assert(gcry_mpi_cmp_ui(x, 0) >= 0);
+ len = (gcry_mpi_get_nbits(x) + 7) / 8;
+ assert(len <= buflen);
+ memzero(buf, buflen);
+ gcry_mpi_print(GCRYMPI_FMT_USG, buf + (buflen - len), len, &nwritten, x);
+ assert(nwritten == len);
+}
+
+static gcry_mpi_t mpi_import(const void *buf, size_t buflen) {
+ gcry_mpi_t h;
+ unsigned len;
+
+ assert_se(gcry_mpi_scan(&h, GCRYMPI_FMT_USG, buf, buflen, NULL) == 0);
+ len = (gcry_mpi_get_nbits(h) + 7) / 8;
+ assert(len <= buflen);
+ assert(gcry_mpi_cmp_ui(h, 0) >= 0);
+
+ return h;
+}
+
+static void uint64_export(void *buf, size_t buflen, uint64_t x) {
+ assert(buflen == 8);
+ ((uint8_t*) buf)[0] = (x >> 56) & 0xff;
+ ((uint8_t*) buf)[1] = (x >> 48) & 0xff;
+ ((uint8_t*) buf)[2] = (x >> 40) & 0xff;
+ ((uint8_t*) buf)[3] = (x >> 32) & 0xff;
+ ((uint8_t*) buf)[4] = (x >> 24) & 0xff;
+ ((uint8_t*) buf)[5] = (x >> 16) & 0xff;
+ ((uint8_t*) buf)[6] = (x >> 8) & 0xff;
+ ((uint8_t*) buf)[7] = (x >> 0) & 0xff;
+}
+
+_pure_ static uint64_t uint64_import(const void *buf, size_t buflen) {
+ assert(buflen == 8);
+ return
+ (uint64_t)(((uint8_t*) buf)[0]) << 56 |
+ (uint64_t)(((uint8_t*) buf)[1]) << 48 |
+ (uint64_t)(((uint8_t*) buf)[2]) << 40 |
+ (uint64_t)(((uint8_t*) buf)[3]) << 32 |
+ (uint64_t)(((uint8_t*) buf)[4]) << 24 |
+ (uint64_t)(((uint8_t*) buf)[5]) << 16 |
+ (uint64_t)(((uint8_t*) buf)[6]) << 8 |
+ (uint64_t)(((uint8_t*) buf)[7]) << 0;
+}
+
+/* deterministically generate from seed/idx a string of buflen pseudorandom bytes */
+static void det_randomize(void *buf, size_t buflen, const void *seed, size_t seedlen, uint32_t idx) {
+ gcry_md_hd_t hd, hd2;
+ size_t olen, cpylen;
+ uint32_t ctr;
+
+ olen = gcry_md_get_algo_dlen(RND_HASH);
+ gcry_md_open(&hd, RND_HASH, 0);
+ gcry_md_write(hd, seed, seedlen);
+ gcry_md_putc(hd, (idx >> 24) & 0xff);
+ gcry_md_putc(hd, (idx >> 16) & 0xff);
+ gcry_md_putc(hd, (idx >> 8) & 0xff);
+ gcry_md_putc(hd, (idx >> 0) & 0xff);
+
+ for (ctr = 0; buflen; ctr++) {
+ gcry_md_copy(&hd2, hd);
+ gcry_md_putc(hd2, (ctr >> 24) & 0xff);
+ gcry_md_putc(hd2, (ctr >> 16) & 0xff);
+ gcry_md_putc(hd2, (ctr >> 8) & 0xff);
+ gcry_md_putc(hd2, (ctr >> 0) & 0xff);
+ gcry_md_final(hd2);
+ cpylen = (buflen < olen) ? buflen : olen;
+ memcpy(buf, gcry_md_read(hd2, RND_HASH), cpylen);
+ gcry_md_close(hd2);
+ buf += cpylen;
+ buflen -= cpylen;
+ }
+ gcry_md_close(hd);
+}
+
+/* deterministically generate from seed/idx a prime of length `bits' that is 3 (mod 4) */
+static gcry_mpi_t genprime3mod4(int bits, const void *seed, size_t seedlen, uint32_t idx) {
+ size_t buflen = bits / 8;
+ uint8_t buf[buflen];
+ gcry_mpi_t p;
+
+ assert(bits % 8 == 0);
+ assert(buflen > 0);
+
+ det_randomize(buf, buflen, seed, seedlen, idx);
+ buf[0] |= 0xc0; /* set upper two bits, so that n=pq has maximum size */
+ buf[buflen - 1] |= 0x03; /* set lower two bits, to have result 3 (mod 4) */
+
+ p = mpi_import(buf, buflen);
+ while (gcry_prime_check(p, 0))
+ gcry_mpi_add_ui(p, p, 4);
+
+ return p;
+}
+
+/* deterministically generate from seed/idx a quadratic residue (mod n) */
+static gcry_mpi_t gensquare(const gcry_mpi_t n, const void *seed, size_t seedlen, uint32_t idx, unsigned secpar) {
+ size_t buflen = secpar / 8;
+ uint8_t buf[buflen];
+ gcry_mpi_t x;
+
+ det_randomize(buf, buflen, seed, seedlen, idx);
+ buf[0] &= 0x7f; /* clear upper bit, so that we have x < n */
+ x = mpi_import(buf, buflen);
+ assert(gcry_mpi_cmp(x, n) < 0);
+ gcry_mpi_mulm(x, x, x, n);
+ return x;
+}
+
+/* compute 2^m (mod phi(p)), for a prime p */
+static gcry_mpi_t twopowmodphi(uint64_t m, const gcry_mpi_t p) {
+ gcry_mpi_t phi, r;
+ int n;
+
+ phi = gcry_mpi_new(0);
+ gcry_mpi_sub_ui(phi, p, 1);
+
+ /* count number of used bits in m */
+ for (n = 0; (1ULL << n) <= m; n++)
+ ;
+
+ r = gcry_mpi_new(0);
+ gcry_mpi_set_ui(r, 1);
+ while (n) { /* square and multiply algorithm for fast exponentiation */
+ n--;
+ gcry_mpi_mulm(r, r, r, phi);
+ if (m & ((uint64_t)1 << n)) {
+ gcry_mpi_add(r, r, r);
+ if (gcry_mpi_cmp(r, phi) >= 0)
+ gcry_mpi_sub(r, r, phi);
+ }
+ }
+
+ gcry_mpi_release(phi);
+ return r;
+}
+
+/* Decompose $x \in Z_n$ into $(xp,xq) \in Z_p \times Z_q$ using Chinese Remainder Theorem */
+static void CRT_decompose(gcry_mpi_t *xp, gcry_mpi_t *xq, const gcry_mpi_t x, const gcry_mpi_t p, const gcry_mpi_t q) {
+ *xp = gcry_mpi_new(0);
+ *xq = gcry_mpi_new(0);
+ gcry_mpi_mod(*xp, x, p);
+ gcry_mpi_mod(*xq, x, q);
+}
+
+/* Compose $(xp,xq) \in Z_p \times Z_q$ into $x \in Z_n$ using Chinese Remainder Theorem */
+static void CRT_compose(gcry_mpi_t *x, const gcry_mpi_t xp, const gcry_mpi_t xq, const gcry_mpi_t p, const gcry_mpi_t q) {
+ gcry_mpi_t a, u;
+
+ a = gcry_mpi_new(0);
+ u = gcry_mpi_new(0);
+ *x = gcry_mpi_new(0);
+ gcry_mpi_subm(a, xq, xp, q);
+ gcry_mpi_invm(u, p, q);
+ gcry_mpi_mulm(a, a, u, q); /* a = (xq - xp) / p (mod q) */
+ gcry_mpi_mul(*x, p, a);
+ gcry_mpi_add(*x, *x, xp); /* x = p * ((xq - xp) / p mod q) + xp */
+ gcry_mpi_release(a);
+ gcry_mpi_release(u);
+}
+
+/******************************************************************************/
+
+size_t FSPRG_mskinbytes(unsigned _secpar) {
+ VALIDATE_SECPAR(_secpar);
+ return 2 + 2 * (_secpar / 2) / 8; /* to store header,p,q */
+}
+
+size_t FSPRG_mpkinbytes(unsigned _secpar) {
+ VALIDATE_SECPAR(_secpar);
+ return 2 + _secpar / 8; /* to store header,n */
+}
+
+size_t FSPRG_stateinbytes(unsigned _secpar) {
+ VALIDATE_SECPAR(_secpar);
+ return 2 + 2 * _secpar / 8 + 8; /* to store header,n,x,epoch */
+}
+
+static void store_secpar(void *buf, uint16_t secpar) {
+ secpar = secpar / 16 - 1;
+ ((uint8_t*) buf)[0] = (secpar >> 8) & 0xff;
+ ((uint8_t*) buf)[1] = (secpar >> 0) & 0xff;
+}
+
+static uint16_t read_secpar(const void *buf) {
+ uint16_t secpar;
+ secpar =
+ (uint16_t)(((uint8_t*) buf)[0]) << 8 |
+ (uint16_t)(((uint8_t*) buf)[1]) << 0;
+ return 16 * (secpar + 1);
+}
+
+void FSPRG_GenMK(void *msk, void *mpk, const void *seed, size_t seedlen, unsigned _secpar) {
+ uint8_t iseed[FSPRG_RECOMMENDED_SEEDLEN];
+ gcry_mpi_t n, p, q;
+ uint16_t secpar;
+
+ VALIDATE_SECPAR(_secpar);
+ secpar = _secpar;
+
+ initialize_libgcrypt(false);
+
+ if (!seed) {
+ gcry_randomize(iseed, FSPRG_RECOMMENDED_SEEDLEN, GCRY_STRONG_RANDOM);
+ seed = iseed;
+ seedlen = FSPRG_RECOMMENDED_SEEDLEN;
+ }
+
+ p = genprime3mod4(secpar / 2, seed, seedlen, RND_GEN_P);
+ q = genprime3mod4(secpar / 2, seed, seedlen, RND_GEN_Q);
+
+ if (msk) {
+ store_secpar(msk + 0, secpar);
+ mpi_export(msk + 2 + 0 * (secpar / 2) / 8, (secpar / 2) / 8, p);
+ mpi_export(msk + 2 + 1 * (secpar / 2) / 8, (secpar / 2) / 8, q);
+ }
+
+ if (mpk) {
+ n = gcry_mpi_new(0);
+ gcry_mpi_mul(n, p, q);
+ assert(gcry_mpi_get_nbits(n) == secpar);
+
+ store_secpar(mpk + 0, secpar);
+ mpi_export(mpk + 2, secpar / 8, n);
+
+ gcry_mpi_release(n);
+ }
+
+ gcry_mpi_release(p);
+ gcry_mpi_release(q);
+}
+
+void FSPRG_GenState0(void *state, const void *mpk, const void *seed, size_t seedlen) {
+ gcry_mpi_t n, x;
+ uint16_t secpar;
+
+ initialize_libgcrypt(false);
+
+ secpar = read_secpar(mpk + 0);
+ n = mpi_import(mpk + 2, secpar / 8);
+ x = gensquare(n, seed, seedlen, RND_GEN_X, secpar);
+
+ memcpy(state, mpk, 2 + secpar / 8);
+ mpi_export(state + 2 + 1 * secpar / 8, secpar / 8, x);
+ memzero(state + 2 + 2 * secpar / 8, 8);
+
+ gcry_mpi_release(n);
+ gcry_mpi_release(x);
+}
+
+void FSPRG_Evolve(void *state) {
+ gcry_mpi_t n, x;
+ uint16_t secpar;
+ uint64_t epoch;
+
+ initialize_libgcrypt(false);
+
+ secpar = read_secpar(state + 0);
+ n = mpi_import(state + 2 + 0 * secpar / 8, secpar / 8);
+ x = mpi_import(state + 2 + 1 * secpar / 8, secpar / 8);
+ epoch = uint64_import(state + 2 + 2 * secpar / 8, 8);
+
+ gcry_mpi_mulm(x, x, x, n);
+ epoch++;
+
+ mpi_export(state + 2 + 1 * secpar / 8, secpar / 8, x);
+ uint64_export(state + 2 + 2 * secpar / 8, 8, epoch);
+
+ gcry_mpi_release(n);
+ gcry_mpi_release(x);
+}
+
+uint64_t FSPRG_GetEpoch(const void *state) {
+ uint16_t secpar;
+ secpar = read_secpar(state + 0);
+ return uint64_import(state + 2 + 2 * secpar / 8, 8);
+}
+
+void FSPRG_Seek(void *state, uint64_t epoch, const void *msk, const void *seed, size_t seedlen) {
+ gcry_mpi_t p, q, n, x, xp, xq, kp, kq, xm;
+ uint16_t secpar;
+
+ initialize_libgcrypt(false);
+
+ secpar = read_secpar(msk + 0);
+ p = mpi_import(msk + 2 + 0 * (secpar / 2) / 8, (secpar / 2) / 8);
+ q = mpi_import(msk + 2 + 1 * (secpar / 2) / 8, (secpar / 2) / 8);
+
+ n = gcry_mpi_new(0);
+ gcry_mpi_mul(n, p, q);
+
+ x = gensquare(n, seed, seedlen, RND_GEN_X, secpar);
+ CRT_decompose(&xp, &xq, x, p, q); /* split (mod n) into (mod p) and (mod q) using CRT */
+
+ kp = twopowmodphi(epoch, p); /* compute 2^epoch (mod phi(p)) */
+ kq = twopowmodphi(epoch, q); /* compute 2^epoch (mod phi(q)) */
+
+ gcry_mpi_powm(xp, xp, kp, p); /* compute x^(2^epoch) (mod p) */
+ gcry_mpi_powm(xq, xq, kq, q); /* compute x^(2^epoch) (mod q) */
+
+ CRT_compose(&xm, xp, xq, p, q); /* combine (mod p) and (mod q) to (mod n) using CRT */
+
+ store_secpar(state + 0, secpar);
+ mpi_export(state + 2 + 0 * secpar / 8, secpar / 8, n);
+ mpi_export(state + 2 + 1 * secpar / 8, secpar / 8, xm);
+ uint64_export(state + 2 + 2 * secpar / 8, 8, epoch);
+
+ gcry_mpi_release(p);
+ gcry_mpi_release(q);
+ gcry_mpi_release(n);
+ gcry_mpi_release(x);
+ gcry_mpi_release(xp);
+ gcry_mpi_release(xq);
+ gcry_mpi_release(kp);
+ gcry_mpi_release(kq);
+ gcry_mpi_release(xm);
+}
+
+void FSPRG_GetKey(const void *state, void *key, size_t keylen, uint32_t idx) {
+ uint16_t secpar;
+
+ initialize_libgcrypt(false);
+
+ secpar = read_secpar(state + 0);
+ det_randomize(key, keylen, state + 2, 2 * secpar / 8 + 8, idx);
+}
diff --git a/src/journal/fsprg.h b/src/journal/fsprg.h
new file mode 100644
index 0000000..3341267
--- /dev/null
+++ b/src/journal/fsprg.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+#pragma once
+
+/*
+ * fsprg v0.1 - (seekable) forward-secure pseudorandom generator
+ * Copyright © 2012 B. Poettering
+ * Contact: fsprg@point-at-infinity.org
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
+ * 02110-1301 USA
+ */
+
+#include <inttypes.h>
+#include <sys/types.h>
+
+#include "macro.h"
+#include "util.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define FSPRG_RECOMMENDED_SECPAR 1536
+#define FSPRG_RECOMMENDED_SEEDLEN (96/8)
+
+size_t FSPRG_mskinbytes(unsigned secpar) _const_;
+size_t FSPRG_mpkinbytes(unsigned secpar) _const_;
+size_t FSPRG_stateinbytes(unsigned secpar) _const_;
+
+/* Setup msk and mpk. Providing seed != NULL makes this algorithm deterministic. */
+void FSPRG_GenMK(void *msk, void *mpk, const void *seed, size_t seedlen, unsigned secpar);
+
+/* Initialize state deterministically in dependence on seed. */
+/* Note: in case one wants to run only one GenState0 per GenMK it is safe to use
+ the same seed for both GenMK and GenState0.
+*/
+void FSPRG_GenState0(void *state, const void *mpk, const void *seed, size_t seedlen);
+
+void FSPRG_Evolve(void *state);
+
+uint64_t FSPRG_GetEpoch(const void *state) _pure_;
+
+/* Seek to any arbitrary state (by providing msk together with seed from GenState0). */
+void FSPRG_Seek(void *state, uint64_t epoch, const void *msk, const void *seed, size_t seedlen);
+
+void FSPRG_GetKey(const void *state, void *key, size_t keylen, uint32_t idx);
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/src/journal/generate-audit_type-list.sh b/src/journal/generate-audit_type-list.sh
new file mode 100755
index 0000000..2445a02
--- /dev/null
+++ b/src/journal/generate-audit_type-list.sh
@@ -0,0 +1,15 @@
+#!/bin/sh
+set -eu
+
+cpp="$1"
+shift
+
+includes=""
+for i in "$@"; do
+ includes="$includes -include $i"
+done
+
+$cpp -dM $includes - </dev/null | \
+ grep -vE 'AUDIT_.*(FIRST|LAST)_' | \
+ sed -r -n 's/^#define\s+AUDIT_(\w+)\s+([0-9]{4})\s*$$/\1\t\2/p' | \
+ sort -k2
diff --git a/src/journal/journal-authenticate.c b/src/journal/journal-authenticate.c
new file mode 100644
index 0000000..8a9ce8a
--- /dev/null
+++ b/src/journal/journal-authenticate.c
@@ -0,0 +1,534 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+
+#include <fcntl.h>
+#include <sys/mman.h>
+
+#include "fd-util.h"
+#include "fsprg.h"
+#include "gcrypt-util.h"
+#include "hexdecoct.h"
+#include "journal-authenticate.h"
+#include "journal-def.h"
+#include "journal-file.h"
+
+static uint64_t journal_file_tag_seqnum(JournalFile *f) {
+ uint64_t r;
+
+ assert(f);
+
+ r = le64toh(f->header->n_tags) + 1;
+ f->header->n_tags = htole64(r);
+
+ return r;
+}
+
+int journal_file_append_tag(JournalFile *f) {
+ Object *o;
+ uint64_t p;
+ int r;
+
+ assert(f);
+
+ if (!f->seal)
+ return 0;
+
+ if (!f->hmac_running)
+ return 0;
+
+ assert(f->hmac);
+
+ r = journal_file_append_object(f, OBJECT_TAG, sizeof(struct TagObject), &o, &p);
+ if (r < 0)
+ return r;
+
+ o->tag.seqnum = htole64(journal_file_tag_seqnum(f));
+ o->tag.epoch = htole64(FSPRG_GetEpoch(f->fsprg_state));
+
+ log_debug("Writing tag %"PRIu64" for epoch %"PRIu64"",
+ le64toh(o->tag.seqnum),
+ FSPRG_GetEpoch(f->fsprg_state));
+
+ /* Add the tag object itself, so that we can protect its
+ * header. This will exclude the actual hash value in it */
+ r = journal_file_hmac_put_object(f, OBJECT_TAG, o, p);
+ if (r < 0)
+ return r;
+
+ /* Get the HMAC tag and store it in the object */
+ memcpy(o->tag.tag, gcry_md_read(f->hmac, 0), TAG_LENGTH);
+ f->hmac_running = false;
+
+ return 0;
+}
+
+int journal_file_hmac_start(JournalFile *f) {
+ uint8_t key[256 / 8]; /* Let's pass 256 bit from FSPRG to HMAC */
+ assert(f);
+
+ if (!f->seal)
+ return 0;
+
+ if (f->hmac_running)
+ return 0;
+
+ /* Prepare HMAC for next cycle */
+ gcry_md_reset(f->hmac);
+ FSPRG_GetKey(f->fsprg_state, key, sizeof(key), 0);
+ gcry_md_setkey(f->hmac, key, sizeof(key));
+
+ f->hmac_running = true;
+
+ return 0;
+}
+
+static int journal_file_get_epoch(JournalFile *f, uint64_t realtime, uint64_t *epoch) {
+ uint64_t t;
+
+ assert(f);
+ assert(epoch);
+ assert(f->seal);
+
+ if (f->fss_start_usec == 0 ||
+ f->fss_interval_usec == 0)
+ return -EOPNOTSUPP;
+
+ if (realtime < f->fss_start_usec)
+ return -ESTALE;
+
+ t = realtime - f->fss_start_usec;
+ t = t / f->fss_interval_usec;
+
+ *epoch = t;
+ return 0;
+}
+
+static int journal_file_fsprg_need_evolve(JournalFile *f, uint64_t realtime) {
+ uint64_t goal, epoch;
+ int r;
+ assert(f);
+
+ if (!f->seal)
+ return 0;
+
+ r = journal_file_get_epoch(f, realtime, &goal);
+ if (r < 0)
+ return r;
+
+ epoch = FSPRG_GetEpoch(f->fsprg_state);
+ if (epoch > goal)
+ return -ESTALE;
+
+ return epoch != goal;
+}
+
+int journal_file_fsprg_evolve(JournalFile *f, uint64_t realtime) {
+ uint64_t goal, epoch;
+ int r;
+
+ assert(f);
+
+ if (!f->seal)
+ return 0;
+
+ r = journal_file_get_epoch(f, realtime, &goal);
+ if (r < 0)
+ return r;
+
+ epoch = FSPRG_GetEpoch(f->fsprg_state);
+ if (epoch < goal)
+ log_debug("Evolving FSPRG key from epoch %"PRIu64" to %"PRIu64".", epoch, goal);
+
+ for (;;) {
+ if (epoch > goal)
+ return -ESTALE;
+ if (epoch == goal)
+ return 0;
+
+ FSPRG_Evolve(f->fsprg_state);
+ epoch = FSPRG_GetEpoch(f->fsprg_state);
+ }
+}
+
+int journal_file_fsprg_seek(JournalFile *f, uint64_t goal) {
+ void *msk;
+ uint64_t epoch;
+
+ assert(f);
+
+ if (!f->seal)
+ return 0;
+
+ assert(f->fsprg_seed);
+
+ if (f->fsprg_state) {
+ /* Cheaper... */
+
+ epoch = FSPRG_GetEpoch(f->fsprg_state);
+ if (goal == epoch)
+ return 0;
+
+ if (goal == epoch+1) {
+ FSPRG_Evolve(f->fsprg_state);
+ return 0;
+ }
+ } else {
+ f->fsprg_state_size = FSPRG_stateinbytes(FSPRG_RECOMMENDED_SECPAR);
+ f->fsprg_state = malloc(f->fsprg_state_size);
+
+ if (!f->fsprg_state)
+ return -ENOMEM;
+ }
+
+ log_debug("Seeking FSPRG key to %"PRIu64".", goal);
+
+ msk = alloca(FSPRG_mskinbytes(FSPRG_RECOMMENDED_SECPAR));
+ FSPRG_GenMK(msk, NULL, f->fsprg_seed, f->fsprg_seed_size, FSPRG_RECOMMENDED_SECPAR);
+ FSPRG_Seek(f->fsprg_state, goal, msk, f->fsprg_seed, f->fsprg_seed_size);
+ return 0;
+}
+
+int journal_file_maybe_append_tag(JournalFile *f, uint64_t realtime) {
+ int r;
+
+ assert(f);
+
+ if (!f->seal)
+ return 0;
+
+ if (realtime <= 0)
+ realtime = now(CLOCK_REALTIME);
+
+ r = journal_file_fsprg_need_evolve(f, realtime);
+ if (r <= 0)
+ return 0;
+
+ r = journal_file_append_tag(f);
+ if (r < 0)
+ return r;
+
+ r = journal_file_fsprg_evolve(f, realtime);
+ if (r < 0)
+ return r;
+
+ return 0;
+}
+
+int journal_file_hmac_put_object(JournalFile *f, ObjectType type, Object *o, uint64_t p) {
+ int r;
+
+ assert(f);
+
+ if (!f->seal)
+ return 0;
+
+ r = journal_file_hmac_start(f);
+ if (r < 0)
+ return r;
+
+ if (!o) {
+ r = journal_file_move_to_object(f, type, p, &o);
+ if (r < 0)
+ return r;
+ } else {
+ if (type > OBJECT_UNUSED && o->object.type != type)
+ return -EBADMSG;
+ }
+
+ gcry_md_write(f->hmac, o, offsetof(ObjectHeader, payload));
+
+ switch (o->object.type) {
+
+ case OBJECT_DATA:
+ /* All but hash and payload are mutable */
+ gcry_md_write(f->hmac, &o->data.hash, sizeof(o->data.hash));
+ gcry_md_write(f->hmac, o->data.payload, le64toh(o->object.size) - offsetof(DataObject, payload));
+ break;
+
+ case OBJECT_FIELD:
+ /* Same here */
+ gcry_md_write(f->hmac, &o->field.hash, sizeof(o->field.hash));
+ gcry_md_write(f->hmac, o->field.payload, le64toh(o->object.size) - offsetof(FieldObject, payload));
+ break;
+
+ case OBJECT_ENTRY:
+ /* All */
+ gcry_md_write(f->hmac, &o->entry.seqnum, le64toh(o->object.size) - offsetof(EntryObject, seqnum));
+ break;
+
+ case OBJECT_FIELD_HASH_TABLE:
+ case OBJECT_DATA_HASH_TABLE:
+ case OBJECT_ENTRY_ARRAY:
+ /* Nothing: everything is mutable */
+ break;
+
+ case OBJECT_TAG:
+ /* All but the tag itself */
+ gcry_md_write(f->hmac, &o->tag.seqnum, sizeof(o->tag.seqnum));
+ gcry_md_write(f->hmac, &o->tag.epoch, sizeof(o->tag.epoch));
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int journal_file_hmac_put_header(JournalFile *f) {
+ int r;
+
+ assert(f);
+
+ if (!f->seal)
+ return 0;
+
+ r = journal_file_hmac_start(f);
+ if (r < 0)
+ return r;
+
+ /* All but state+reserved, boot_id, arena_size,
+ * tail_object_offset, n_objects, n_entries,
+ * tail_entry_seqnum, head_entry_seqnum, entry_array_offset,
+ * head_entry_realtime, tail_entry_realtime,
+ * tail_entry_monotonic, n_data, n_fields, n_tags,
+ * n_entry_arrays. */
+
+ gcry_md_write(f->hmac, f->header->signature, offsetof(Header, state) - offsetof(Header, signature));
+ gcry_md_write(f->hmac, &f->header->file_id, offsetof(Header, boot_id) - offsetof(Header, file_id));
+ gcry_md_write(f->hmac, &f->header->seqnum_id, offsetof(Header, arena_size) - offsetof(Header, seqnum_id));
+ gcry_md_write(f->hmac, &f->header->data_hash_table_offset, offsetof(Header, tail_object_offset) - offsetof(Header, data_hash_table_offset));
+
+ return 0;
+}
+
+int journal_file_fss_load(JournalFile *f) {
+ int r, fd = -1;
+ char *p = NULL;
+ struct stat st;
+ FSSHeader *m = NULL;
+ sd_id128_t machine;
+
+ assert(f);
+
+ if (!f->seal)
+ return 0;
+
+ r = sd_id128_get_machine(&machine);
+ if (r < 0)
+ return r;
+
+ if (asprintf(&p, "/var/log/journal/" SD_ID128_FORMAT_STR "/fss",
+ SD_ID128_FORMAT_VAL(machine)) < 0)
+ return -ENOMEM;
+
+ fd = open(p, O_RDWR|O_CLOEXEC|O_NOCTTY, 0600);
+ if (fd < 0) {
+ if (errno != ENOENT)
+ log_error_errno(errno, "Failed to open %s: %m", p);
+
+ r = -errno;
+ goto finish;
+ }
+
+ if (fstat(fd, &st) < 0) {
+ r = -errno;
+ goto finish;
+ }
+
+ if (st.st_size < (off_t) sizeof(FSSHeader)) {
+ r = -ENODATA;
+ goto finish;
+ }
+
+ m = mmap(NULL, PAGE_ALIGN(sizeof(FSSHeader)), PROT_READ, MAP_SHARED, fd, 0);
+ if (m == MAP_FAILED) {
+ m = NULL;
+ r = -errno;
+ goto finish;
+ }
+
+ if (memcmp(m->signature, FSS_HEADER_SIGNATURE, 8) != 0) {
+ r = -EBADMSG;
+ goto finish;
+ }
+
+ if (m->incompatible_flags != 0) {
+ r = -EPROTONOSUPPORT;
+ goto finish;
+ }
+
+ if (le64toh(m->header_size) < sizeof(FSSHeader)) {
+ r = -EBADMSG;
+ goto finish;
+ }
+
+ if (le64toh(m->fsprg_state_size) != FSPRG_stateinbytes(le16toh(m->fsprg_secpar))) {
+ r = -EBADMSG;
+ goto finish;
+ }
+
+ f->fss_file_size = le64toh(m->header_size) + le64toh(m->fsprg_state_size);
+ if ((uint64_t) st.st_size < f->fss_file_size) {
+ r = -ENODATA;
+ goto finish;
+ }
+
+ if (!sd_id128_equal(machine, m->machine_id)) {
+ r = -EHOSTDOWN;
+ goto finish;
+ }
+
+ if (le64toh(m->start_usec) <= 0 ||
+ le64toh(m->interval_usec) <= 0) {
+ r = -EBADMSG;
+ goto finish;
+ }
+
+ f->fss_file = mmap(NULL, PAGE_ALIGN(f->fss_file_size), PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
+ if (f->fss_file == MAP_FAILED) {
+ f->fss_file = NULL;
+ r = -errno;
+ goto finish;
+ }
+
+ f->fss_start_usec = le64toh(f->fss_file->start_usec);
+ f->fss_interval_usec = le64toh(f->fss_file->interval_usec);
+
+ f->fsprg_state = (uint8_t*) f->fss_file + le64toh(f->fss_file->header_size);
+ f->fsprg_state_size = le64toh(f->fss_file->fsprg_state_size);
+
+ r = 0;
+
+finish:
+ if (m)
+ munmap(m, PAGE_ALIGN(sizeof(FSSHeader)));
+
+ safe_close(fd);
+ free(p);
+
+ return r;
+}
+
+int journal_file_hmac_setup(JournalFile *f) {
+ gcry_error_t e;
+
+ if (!f->seal)
+ return 0;
+
+ initialize_libgcrypt(true);
+
+ e = gcry_md_open(&f->hmac, GCRY_MD_SHA256, GCRY_MD_FLAG_HMAC);
+ if (e != 0)
+ return -EOPNOTSUPP;
+
+ return 0;
+}
+
+int journal_file_append_first_tag(JournalFile *f) {
+ int r;
+ uint64_t p;
+
+ if (!f->seal)
+ return 0;
+
+ log_debug("Calculating first tag...");
+
+ r = journal_file_hmac_put_header(f);
+ if (r < 0)
+ return r;
+
+ p = le64toh(f->header->field_hash_table_offset);
+ if (p < offsetof(Object, hash_table.items))
+ return -EINVAL;
+ p -= offsetof(Object, hash_table.items);
+
+ r = journal_file_hmac_put_object(f, OBJECT_FIELD_HASH_TABLE, NULL, p);
+ if (r < 0)
+ return r;
+
+ p = le64toh(f->header->data_hash_table_offset);
+ if (p < offsetof(Object, hash_table.items))
+ return -EINVAL;
+ p -= offsetof(Object, hash_table.items);
+
+ r = journal_file_hmac_put_object(f, OBJECT_DATA_HASH_TABLE, NULL, p);
+ if (r < 0)
+ return r;
+
+ r = journal_file_append_tag(f);
+ if (r < 0)
+ return r;
+
+ return 0;
+}
+
+int journal_file_parse_verification_key(JournalFile *f, const char *key) {
+ uint8_t *seed;
+ size_t seed_size, c;
+ const char *k;
+ int r;
+ unsigned long long start, interval;
+
+ seed_size = FSPRG_RECOMMENDED_SEEDLEN;
+ seed = malloc(seed_size);
+ if (!seed)
+ return -ENOMEM;
+
+ k = key;
+ for (c = 0; c < seed_size; c++) {
+ int x, y;
+
+ while (*k == '-')
+ k++;
+
+ x = unhexchar(*k);
+ if (x < 0) {
+ free(seed);
+ return -EINVAL;
+ }
+ k++;
+ y = unhexchar(*k);
+ if (y < 0) {
+ free(seed);
+ return -EINVAL;
+ }
+ k++;
+
+ seed[c] = (uint8_t) (x * 16 + y);
+ }
+
+ if (*k != '/') {
+ free(seed);
+ return -EINVAL;
+ }
+ k++;
+
+ r = sscanf(k, "%llx-%llx", &start, &interval);
+ if (r != 2) {
+ free(seed);
+ return -EINVAL;
+ }
+
+ f->fsprg_seed = seed;
+ f->fsprg_seed_size = seed_size;
+
+ f->fss_start_usec = start * interval;
+ f->fss_interval_usec = interval;
+
+ return 0;
+}
+
+bool journal_file_next_evolve_usec(JournalFile *f, usec_t *u) {
+ uint64_t epoch;
+
+ assert(f);
+ assert(u);
+
+ if (!f->seal)
+ return false;
+
+ epoch = FSPRG_GetEpoch(f->fsprg_state);
+
+ *u = (usec_t) (f->fss_start_usec + f->fss_interval_usec * epoch + f->fss_interval_usec);
+
+ return true;
+}
diff --git a/src/journal/journal-authenticate.h b/src/journal/journal-authenticate.h
new file mode 100644
index 0000000..2ef0133
--- /dev/null
+++ b/src/journal/journal-authenticate.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+#pragma once
+
+#include <stdbool.h>
+
+#include "journal-file.h"
+
+int journal_file_append_tag(JournalFile *f);
+int journal_file_maybe_append_tag(JournalFile *f, uint64_t realtime);
+int journal_file_append_first_tag(JournalFile *f);
+
+int journal_file_hmac_setup(JournalFile *f);
+int journal_file_hmac_start(JournalFile *f);
+int journal_file_hmac_put_header(JournalFile *f);
+int journal_file_hmac_put_object(JournalFile *f, ObjectType type, Object *o, uint64_t p);
+
+int journal_file_fss_load(JournalFile *f);
+int journal_file_parse_verification_key(JournalFile *f, const char *key);
+
+int journal_file_fsprg_evolve(JournalFile *f, uint64_t realtime);
+int journal_file_fsprg_seek(JournalFile *f, uint64_t epoch);
+
+bool journal_file_next_evolve_usec(JournalFile *f, usec_t *u);
diff --git a/src/journal/journal-def.h b/src/journal/journal-def.h
new file mode 100644
index 0000000..d68ce38
--- /dev/null
+++ b/src/journal/journal-def.h
@@ -0,0 +1,230 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+#pragma once
+
+#include "sd-id128.h"
+
+#include "macro.h"
+#include "sparse-endian.h"
+
+/*
+ * If you change this file you probably should also change its documentation:
+ *
+ * http://www.freedesktop.org/wiki/Software/systemd/journal-files
+ */
+
+typedef struct Header Header;
+
+typedef struct ObjectHeader ObjectHeader;
+typedef union Object Object;
+
+typedef struct DataObject DataObject;
+typedef struct FieldObject FieldObject;
+typedef struct EntryObject EntryObject;
+typedef struct HashTableObject HashTableObject;
+typedef struct EntryArrayObject EntryArrayObject;
+typedef struct TagObject TagObject;
+
+typedef struct EntryItem EntryItem;
+typedef struct HashItem HashItem;
+
+typedef struct FSSHeader FSSHeader;
+
+/* Object types */
+typedef enum ObjectType {
+ OBJECT_UNUSED, /* also serves as "any type" or "additional context" */
+ OBJECT_DATA,
+ OBJECT_FIELD,
+ OBJECT_ENTRY,
+ OBJECT_DATA_HASH_TABLE,
+ OBJECT_FIELD_HASH_TABLE,
+ OBJECT_ENTRY_ARRAY,
+ OBJECT_TAG,
+ _OBJECT_TYPE_MAX
+} ObjectType;
+
+/* Object flags */
+enum {
+ OBJECT_COMPRESSED_XZ = 1 << 0,
+ OBJECT_COMPRESSED_LZ4 = 1 << 1,
+ _OBJECT_COMPRESSED_MAX
+};
+
+#define OBJECT_COMPRESSION_MASK (OBJECT_COMPRESSED_XZ | OBJECT_COMPRESSED_LZ4)
+
+struct ObjectHeader {
+ uint8_t type;
+ uint8_t flags;
+ uint8_t reserved[6];
+ le64_t size;
+ uint8_t payload[];
+} _packed_;
+
+#define DataObject__contents { \
+ ObjectHeader object; \
+ le64_t hash; \
+ le64_t next_hash_offset; \
+ le64_t next_field_offset; \
+ le64_t entry_offset; /* the first array entry we store inline */ \
+ le64_t entry_array_offset; \
+ le64_t n_entries; \
+ uint8_t payload[]; \
+ }
+
+struct DataObject DataObject__contents;
+struct DataObject__packed DataObject__contents _packed_;
+assert_cc(sizeof(struct DataObject) == sizeof(struct DataObject__packed));
+
+struct FieldObject {
+ ObjectHeader object;
+ le64_t hash;
+ le64_t next_hash_offset;
+ le64_t head_data_offset;
+ uint8_t payload[];
+} _packed_;
+
+struct EntryItem {
+ le64_t object_offset;
+ le64_t hash;
+} _packed_;
+
+#define EntryObject__contents { \
+ ObjectHeader object; \
+ le64_t seqnum; \
+ le64_t realtime; \
+ le64_t monotonic; \
+ sd_id128_t boot_id; \
+ le64_t xor_hash; \
+ EntryItem items[]; \
+ }
+
+struct EntryObject EntryObject__contents;
+struct EntryObject__packed EntryObject__contents _packed_;
+assert_cc(sizeof(struct EntryObject) == sizeof(struct EntryObject__packed));
+
+
+struct HashItem {
+ le64_t head_hash_offset;
+ le64_t tail_hash_offset;
+} _packed_;
+
+struct HashTableObject {
+ ObjectHeader object;
+ HashItem items[];
+} _packed_;
+
+struct EntryArrayObject {
+ ObjectHeader object;
+ le64_t next_entry_array_offset;
+ le64_t items[];
+} _packed_;
+
+#define TAG_LENGTH (256/8)
+
+struct TagObject {
+ ObjectHeader object;
+ le64_t seqnum;
+ le64_t epoch;
+ uint8_t tag[TAG_LENGTH]; /* SHA-256 HMAC */
+} _packed_;
+
+union Object {
+ ObjectHeader object;
+ DataObject data;
+ FieldObject field;
+ EntryObject entry;
+ HashTableObject hash_table;
+ EntryArrayObject entry_array;
+ TagObject tag;
+};
+
+enum {
+ STATE_OFFLINE = 0,
+ STATE_ONLINE = 1,
+ STATE_ARCHIVED = 2,
+ _STATE_MAX
+};
+
+/* Header flags */
+enum {
+ HEADER_INCOMPATIBLE_COMPRESSED_XZ = 1 << 0,
+ HEADER_INCOMPATIBLE_COMPRESSED_LZ4 = 1 << 1,
+};
+
+#define HEADER_INCOMPATIBLE_ANY (HEADER_INCOMPATIBLE_COMPRESSED_XZ|HEADER_INCOMPATIBLE_COMPRESSED_LZ4)
+
+#if HAVE_XZ && HAVE_LZ4
+# define HEADER_INCOMPATIBLE_SUPPORTED HEADER_INCOMPATIBLE_ANY
+#elif HAVE_XZ
+# define HEADER_INCOMPATIBLE_SUPPORTED HEADER_INCOMPATIBLE_COMPRESSED_XZ
+#elif HAVE_LZ4
+# define HEADER_INCOMPATIBLE_SUPPORTED HEADER_INCOMPATIBLE_COMPRESSED_LZ4
+#else
+# define HEADER_INCOMPATIBLE_SUPPORTED 0
+#endif
+
+enum {
+ HEADER_COMPATIBLE_SEALED = 1
+};
+
+#define HEADER_COMPATIBLE_ANY HEADER_COMPATIBLE_SEALED
+#if HAVE_GCRYPT
+# define HEADER_COMPATIBLE_SUPPORTED HEADER_COMPATIBLE_SEALED
+#else
+# define HEADER_COMPATIBLE_SUPPORTED 0
+#endif
+
+#define HEADER_SIGNATURE ((char[]) { 'L', 'P', 'K', 'S', 'H', 'H', 'R', 'H' })
+
+#define struct_Header__contents { \
+ uint8_t signature[8]; /* "LPKSHHRH" */ \
+ le32_t compatible_flags; \
+ le32_t incompatible_flags; \
+ uint8_t state; \
+ uint8_t reserved[7]; \
+ sd_id128_t file_id; \
+ sd_id128_t machine_id; \
+ sd_id128_t boot_id; /* last writer */ \
+ sd_id128_t seqnum_id; \
+ le64_t header_size; \
+ le64_t arena_size; \
+ le64_t data_hash_table_offset; \
+ le64_t data_hash_table_size; \
+ le64_t field_hash_table_offset; \
+ le64_t field_hash_table_size; \
+ le64_t tail_object_offset; \
+ le64_t n_objects; \
+ le64_t n_entries; \
+ le64_t tail_entry_seqnum; \
+ le64_t head_entry_seqnum; \
+ le64_t entry_array_offset; \
+ le64_t head_entry_realtime; \
+ le64_t tail_entry_realtime; \
+ le64_t tail_entry_monotonic; \
+ /* Added in 187 */ \
+ le64_t n_data; \
+ le64_t n_fields; \
+ /* Added in 189 */ \
+ le64_t n_tags; \
+ le64_t n_entry_arrays; \
+ }
+
+struct Header struct_Header__contents;
+struct Header__packed struct_Header__contents _packed_;
+assert_cc(sizeof(struct Header) == sizeof(struct Header__packed));
+assert_cc(sizeof(struct Header) == 240);
+
+#define FSS_HEADER_SIGNATURE ((char[]) { 'K', 'S', 'H', 'H', 'R', 'H', 'L', 'P' })
+
+struct FSSHeader {
+ uint8_t signature[8]; /* "KSHHRHLP" */
+ le32_t compatible_flags;
+ le32_t incompatible_flags;
+ sd_id128_t machine_id;
+ sd_id128_t boot_id; /* last writer */
+ le64_t header_size;
+ le64_t start_usec;
+ le64_t interval_usec;
+ le16_t fsprg_secpar;
+ le16_t reserved[3];
+ le64_t fsprg_state_size;
+} _packed_;
diff --git a/src/journal/journal-file.c b/src/journal/journal-file.c
new file mode 100644
index 0000000..56827f9
--- /dev/null
+++ b/src/journal/journal-file.c
@@ -0,0 +1,3908 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+
+#include <errno.h>
+#include <fcntl.h>
+#include <linux/fs.h>
+#include <pthread.h>
+#include <stddef.h>
+#include <sys/mman.h>
+#include <sys/statvfs.h>
+#include <sys/uio.h>
+#include <unistd.h>
+
+#include "sd-event.h"
+
+#include "alloc-util.h"
+#include "btrfs-util.h"
+#include "chattr-util.h"
+#include "compress.h"
+#include "fd-util.h"
+#include "fs-util.h"
+#include "journal-authenticate.h"
+#include "journal-def.h"
+#include "journal-file.h"
+#include "lookup3.h"
+#include "parse-util.h"
+#include "path-util.h"
+#include "random-util.h"
+#include "set.h"
+#include "stat-util.h"
+#include "string-util.h"
+#include "strv.h"
+#include "xattr-util.h"
+
+#define DEFAULT_DATA_HASH_TABLE_SIZE (2047ULL*sizeof(HashItem))
+#define DEFAULT_FIELD_HASH_TABLE_SIZE (333ULL*sizeof(HashItem))
+
+#define DEFAULT_COMPRESS_THRESHOLD (512ULL)
+#define MIN_COMPRESS_THRESHOLD (8ULL)
+
+/* This is the minimum journal file size */
+#define JOURNAL_FILE_SIZE_MIN (512ULL*1024ULL) /* 512 KiB */
+
+/* These are the lower and upper bounds if we deduce the max_use value
+ * from the file system size */
+#define DEFAULT_MAX_USE_LOWER (1ULL*1024ULL*1024ULL) /* 1 MiB */
+#define DEFAULT_MAX_USE_UPPER (4ULL*1024ULL*1024ULL*1024ULL) /* 4 GiB */
+
+/* This is the default minimal use limit, how much we'll use even if keep_free suggests otherwise. */
+#define DEFAULT_MIN_USE (1ULL*1024ULL*1024ULL) /* 1 MiB */
+
+/* This is the upper bound if we deduce max_size from max_use */
+#define DEFAULT_MAX_SIZE_UPPER (128ULL*1024ULL*1024ULL) /* 128 MiB */
+
+/* This is the upper bound if we deduce the keep_free value from the
+ * file system size */
+#define DEFAULT_KEEP_FREE_UPPER (4ULL*1024ULL*1024ULL*1024ULL) /* 4 GiB */
+
+/* This is the keep_free value when we can't determine the system
+ * size */
+#define DEFAULT_KEEP_FREE (1024ULL*1024ULL) /* 1 MB */
+
+/* This is the default maximum number of journal files to keep around. */
+#define DEFAULT_N_MAX_FILES (100)
+
+/* n_data was the first entry we added after the initial file format design */
+#define HEADER_SIZE_MIN ALIGN64(offsetof(Header, n_data))
+
+/* How many entries to keep in the entry array chain cache at max */
+#define CHAIN_CACHE_MAX 20
+
+/* How much to increase the journal file size at once each time we allocate something new. */
+#define FILE_SIZE_INCREASE (8ULL*1024ULL*1024ULL) /* 8MB */
+
+/* Reread fstat() of the file for detecting deletions at least this often */
+#define LAST_STAT_REFRESH_USEC (5*USEC_PER_SEC)
+
+/* The mmap context to use for the header we pick as one above the last defined typed */
+#define CONTEXT_HEADER _OBJECT_TYPE_MAX
+
+#ifdef __clang__
+# pragma GCC diagnostic ignored "-Waddress-of-packed-member"
+#endif
+
+/* This may be called from a separate thread to prevent blocking the caller for the duration of fsync().
+ * As a result we use atomic operations on f->offline_state for inter-thread communications with
+ * journal_file_set_offline() and journal_file_set_online(). */
+static void journal_file_set_offline_internal(JournalFile *f) {
+ assert(f);
+ assert(f->fd >= 0);
+ assert(f->header);
+
+ for (;;) {
+ switch (f->offline_state) {
+ case OFFLINE_CANCEL:
+ if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_CANCEL, OFFLINE_DONE))
+ continue;
+ return;
+
+ case OFFLINE_AGAIN_FROM_SYNCING:
+ if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_AGAIN_FROM_SYNCING, OFFLINE_SYNCING))
+ continue;
+ break;
+
+ case OFFLINE_AGAIN_FROM_OFFLINING:
+ if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_AGAIN_FROM_OFFLINING, OFFLINE_SYNCING))
+ continue;
+ break;
+
+ case OFFLINE_SYNCING:
+ (void) fsync(f->fd);
+
+ if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_SYNCING, OFFLINE_OFFLINING))
+ continue;
+
+ f->header->state = f->archive ? STATE_ARCHIVED : STATE_OFFLINE;
+ (void) fsync(f->fd);
+ break;
+
+ case OFFLINE_OFFLINING:
+ if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_OFFLINING, OFFLINE_DONE))
+ continue;
+ _fallthrough_;
+ case OFFLINE_DONE:
+ return;
+
+ case OFFLINE_JOINED:
+ log_debug("OFFLINE_JOINED unexpected offline state for journal_file_set_offline_internal()");
+ return;
+ }
+ }
+}
+
+static void * journal_file_set_offline_thread(void *arg) {
+ JournalFile *f = arg;
+
+ (void) pthread_setname_np(pthread_self(), "journal-offline");
+
+ journal_file_set_offline_internal(f);
+
+ return NULL;
+}
+
+static int journal_file_set_offline_thread_join(JournalFile *f) {
+ int r;
+
+ assert(f);
+
+ if (f->offline_state == OFFLINE_JOINED)
+ return 0;
+
+ r = pthread_join(f->offline_thread, NULL);
+ if (r)
+ return -r;
+
+ f->offline_state = OFFLINE_JOINED;
+
+ if (mmap_cache_got_sigbus(f->mmap, f->cache_fd))
+ return -EIO;
+
+ return 0;
+}
+
+/* Trigger a restart if the offline thread is mid-flight in a restartable state. */
+static bool journal_file_set_offline_try_restart(JournalFile *f) {
+ for (;;) {
+ switch (f->offline_state) {
+ case OFFLINE_AGAIN_FROM_SYNCING:
+ case OFFLINE_AGAIN_FROM_OFFLINING:
+ return true;
+
+ case OFFLINE_CANCEL:
+ if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_CANCEL, OFFLINE_AGAIN_FROM_SYNCING))
+ continue;
+ return true;
+
+ case OFFLINE_SYNCING:
+ if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_SYNCING, OFFLINE_AGAIN_FROM_SYNCING))
+ continue;
+ return true;
+
+ case OFFLINE_OFFLINING:
+ if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_OFFLINING, OFFLINE_AGAIN_FROM_OFFLINING))
+ continue;
+ return true;
+
+ default:
+ return false;
+ }
+ }
+}
+
+/* Sets a journal offline.
+ *
+ * If wait is false then an offline is dispatched in a separate thread for a
+ * subsequent journal_file_set_offline() or journal_file_set_online() of the
+ * same journal to synchronize with.
+ *
+ * If wait is true, then either an existing offline thread will be restarted
+ * and joined, or if none exists the offline is simply performed in this
+ * context without involving another thread.
+ */
+int journal_file_set_offline(JournalFile *f, bool wait) {
+ bool restarted;
+ int r;
+
+ assert(f);
+
+ if (!f->writable)
+ return -EPERM;
+
+ if (f->fd < 0 || !f->header)
+ return -EINVAL;
+
+ /* An offlining journal is implicitly online and may modify f->header->state,
+ * we must also join any potentially lingering offline thread when not online. */
+ if (!journal_file_is_offlining(f) && f->header->state != STATE_ONLINE)
+ return journal_file_set_offline_thread_join(f);
+
+ /* Restart an in-flight offline thread and wait if needed, or join a lingering done one. */
+ restarted = journal_file_set_offline_try_restart(f);
+ if ((restarted && wait) || !restarted) {
+ r = journal_file_set_offline_thread_join(f);
+ if (r < 0)
+ return r;
+ }
+
+ if (restarted)
+ return 0;
+
+ /* Initiate a new offline. */
+ f->offline_state = OFFLINE_SYNCING;
+
+ if (wait) /* Without using a thread if waiting. */
+ journal_file_set_offline_internal(f);
+ else {
+ sigset_t ss, saved_ss;
+ int k;
+
+ assert_se(sigfillset(&ss) >= 0);
+
+ r = pthread_sigmask(SIG_BLOCK, &ss, &saved_ss);
+ if (r > 0)
+ return -r;
+
+ r = pthread_create(&f->offline_thread, NULL, journal_file_set_offline_thread, f);
+
+ k = pthread_sigmask(SIG_SETMASK, &saved_ss, NULL);
+ if (r > 0) {
+ f->offline_state = OFFLINE_JOINED;
+ return -r;
+ }
+ if (k > 0)
+ return -k;
+ }
+
+ return 0;
+}
+
+static int journal_file_set_online(JournalFile *f) {
+ bool wait = true;
+
+ assert(f);
+
+ if (!f->writable)
+ return -EPERM;
+
+ if (f->fd < 0 || !f->header)
+ return -EINVAL;
+
+ while (wait) {
+ switch (f->offline_state) {
+ case OFFLINE_JOINED:
+ /* No offline thread, no need to wait. */
+ wait = false;
+ break;
+
+ case OFFLINE_SYNCING:
+ if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_SYNCING, OFFLINE_CANCEL))
+ continue;
+ /* Canceled syncing prior to offlining, no need to wait. */
+ wait = false;
+ break;
+
+ case OFFLINE_AGAIN_FROM_SYNCING:
+ if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_AGAIN_FROM_SYNCING, OFFLINE_CANCEL))
+ continue;
+ /* Canceled restart from syncing, no need to wait. */
+ wait = false;
+ break;
+
+ case OFFLINE_AGAIN_FROM_OFFLINING:
+ if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_AGAIN_FROM_OFFLINING, OFFLINE_CANCEL))
+ continue;
+ /* Canceled restart from offlining, must wait for offlining to complete however. */
+ _fallthrough_;
+ default: {
+ int r;
+
+ r = journal_file_set_offline_thread_join(f);
+ if (r < 0)
+ return r;
+
+ wait = false;
+ break;
+ }
+ }
+ }
+
+ if (mmap_cache_got_sigbus(f->mmap, f->cache_fd))
+ return -EIO;
+
+ switch (f->header->state) {
+ case STATE_ONLINE:
+ return 0;
+
+ case STATE_OFFLINE:
+ f->header->state = STATE_ONLINE;
+ (void) fsync(f->fd);
+ return 0;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+bool journal_file_is_offlining(JournalFile *f) {
+ assert(f);
+
+ __sync_synchronize();
+
+ if (IN_SET(f->offline_state, OFFLINE_DONE, OFFLINE_JOINED))
+ return false;
+
+ return true;
+}
+
+JournalFile* journal_file_close(JournalFile *f) {
+ assert(f);
+
+#if HAVE_GCRYPT
+ /* Write the final tag */
+ if (f->seal && f->writable) {
+ int r;
+
+ r = journal_file_append_tag(f);
+ if (r < 0)
+ log_error_errno(r, "Failed to append tag when closing journal: %m");
+ }
+#endif
+
+ if (f->post_change_timer) {
+ if (sd_event_source_get_enabled(f->post_change_timer, NULL) > 0)
+ journal_file_post_change(f);
+
+ (void) sd_event_source_set_enabled(f->post_change_timer, SD_EVENT_OFF);
+ sd_event_source_unref(f->post_change_timer);
+ }
+
+ journal_file_set_offline(f, true);
+
+ if (f->mmap && f->cache_fd)
+ mmap_cache_free_fd(f->mmap, f->cache_fd);
+
+ if (f->fd >= 0 && f->defrag_on_close) {
+
+ /* Be friendly to btrfs: turn COW back on again now,
+ * and defragment the file. We won't write to the file
+ * ever again, hence remove all fragmentation, and
+ * reenable all the good bits COW usually provides
+ * (such as data checksumming). */
+
+ (void) chattr_fd(f->fd, 0, FS_NOCOW_FL, NULL);
+ (void) btrfs_defrag_fd(f->fd);
+ }
+
+ if (f->close_fd)
+ safe_close(f->fd);
+ free(f->path);
+
+ mmap_cache_unref(f->mmap);
+
+ ordered_hashmap_free_free(f->chain_cache);
+
+#if HAVE_XZ || HAVE_LZ4
+ free(f->compress_buffer);
+#endif
+
+#if HAVE_GCRYPT
+ if (f->fss_file)
+ munmap(f->fss_file, PAGE_ALIGN(f->fss_file_size));
+ else
+ free(f->fsprg_state);
+
+ free(f->fsprg_seed);
+
+ if (f->hmac)
+ gcry_md_close(f->hmac);
+#endif
+
+ return mfree(f);
+}
+
+static int journal_file_init_header(JournalFile *f, JournalFile *template) {
+ Header h = {};
+ ssize_t k;
+ int r;
+
+ assert(f);
+
+ memcpy(h.signature, HEADER_SIGNATURE, 8);
+ h.header_size = htole64(ALIGN64(sizeof(h)));
+
+ h.incompatible_flags |= htole32(
+ f->compress_xz * HEADER_INCOMPATIBLE_COMPRESSED_XZ |
+ f->compress_lz4 * HEADER_INCOMPATIBLE_COMPRESSED_LZ4);
+
+ h.compatible_flags = htole32(
+ f->seal * HEADER_COMPATIBLE_SEALED);
+
+ r = sd_id128_randomize(&h.file_id);
+ if (r < 0)
+ return r;
+
+ if (template) {
+ h.seqnum_id = template->header->seqnum_id;
+ h.tail_entry_seqnum = template->header->tail_entry_seqnum;
+ } else
+ h.seqnum_id = h.file_id;
+
+ k = pwrite(f->fd, &h, sizeof(h), 0);
+ if (k < 0)
+ return -errno;
+
+ if (k != sizeof(h))
+ return -EIO;
+
+ return 0;
+}
+
+static int journal_file_refresh_header(JournalFile *f) {
+ sd_id128_t boot_id;
+ int r;
+
+ assert(f);
+ assert(f->header);
+
+ r = sd_id128_get_machine(&f->header->machine_id);
+ if (IN_SET(r, -ENOENT, -ENOMEDIUM))
+ /* We don't have a machine-id, let's continue without */
+ zero(f->header->machine_id);
+ else if (r < 0)
+ return r;
+
+ r = sd_id128_get_boot(&boot_id);
+ if (r < 0)
+ return r;
+
+ f->header->boot_id = boot_id;
+
+ r = journal_file_set_online(f);
+
+ /* Sync the online state to disk */
+ (void) fsync(f->fd);
+
+ /* We likely just created a new file, also sync the directory this file is located in. */
+ (void) fsync_directory_of_file(f->fd);
+
+ return r;
+}
+
+static bool warn_wrong_flags(const JournalFile *f, bool compatible) {
+ const uint32_t any = compatible ? HEADER_COMPATIBLE_ANY : HEADER_INCOMPATIBLE_ANY,
+ supported = compatible ? HEADER_COMPATIBLE_SUPPORTED : HEADER_INCOMPATIBLE_SUPPORTED;
+ const char *type = compatible ? "compatible" : "incompatible";
+ uint32_t flags;
+
+ flags = le32toh(compatible ? f->header->compatible_flags : f->header->incompatible_flags);
+
+ if (flags & ~supported) {
+ if (flags & ~any)
+ log_debug("Journal file %s has unknown %s flags 0x%"PRIx32,
+ f->path, type, flags & ~any);
+ flags = (flags & any) & ~supported;
+ if (flags) {
+ const char* strv[3];
+ unsigned n = 0;
+ _cleanup_free_ char *t = NULL;
+
+ if (compatible && (flags & HEADER_COMPATIBLE_SEALED))
+ strv[n++] = "sealed";
+ if (!compatible && (flags & HEADER_INCOMPATIBLE_COMPRESSED_XZ))
+ strv[n++] = "xz-compressed";
+ if (!compatible && (flags & HEADER_INCOMPATIBLE_COMPRESSED_LZ4))
+ strv[n++] = "lz4-compressed";
+ strv[n] = NULL;
+ assert(n < ELEMENTSOF(strv));
+
+ t = strv_join((char**) strv, ", ");
+ log_debug("Journal file %s uses %s %s %s disabled at compilation time.",
+ f->path, type, n > 1 ? "flags" : "flag", strnull(t));
+ }
+ return true;
+ }
+
+ return false;
+}
+
+static int journal_file_verify_header(JournalFile *f) {
+ uint64_t arena_size, header_size;
+
+ assert(f);
+ assert(f->header);
+
+ if (memcmp(f->header->signature, HEADER_SIGNATURE, 8))
+ return -EBADMSG;
+
+ /* In both read and write mode we refuse to open files with incompatible
+ * flags we don't know. */
+ if (warn_wrong_flags(f, false))
+ return -EPROTONOSUPPORT;
+
+ /* When open for writing we refuse to open files with compatible flags, too. */
+ if (f->writable && warn_wrong_flags(f, true))
+ return -EPROTONOSUPPORT;
+
+ if (f->header->state >= _STATE_MAX)
+ return -EBADMSG;
+
+ header_size = le64toh(f->header->header_size);
+
+ /* The first addition was n_data, so check that we are at least this large */
+ if (header_size < HEADER_SIZE_MIN)
+ return -EBADMSG;
+
+ if (JOURNAL_HEADER_SEALED(f->header) && !JOURNAL_HEADER_CONTAINS(f->header, n_entry_arrays))
+ return -EBADMSG;
+
+ arena_size = le64toh(f->header->arena_size);
+
+ if (UINT64_MAX - header_size < arena_size || header_size + arena_size > (uint64_t) f->last_stat.st_size)
+ return -ENODATA;
+
+ if (le64toh(f->header->tail_object_offset) > header_size + arena_size)
+ return -ENODATA;
+
+ if (!VALID64(le64toh(f->header->data_hash_table_offset)) ||
+ !VALID64(le64toh(f->header->field_hash_table_offset)) ||
+ !VALID64(le64toh(f->header->tail_object_offset)) ||
+ !VALID64(le64toh(f->header->entry_array_offset)))
+ return -ENODATA;
+
+ if (f->writable) {
+ sd_id128_t machine_id;
+ uint8_t state;
+ int r;
+
+ r = sd_id128_get_machine(&machine_id);
+ if (r < 0)
+ return r;
+
+ if (!sd_id128_equal(machine_id, f->header->machine_id))
+ return -EHOSTDOWN;
+
+ state = f->header->state;
+
+ if (state == STATE_ARCHIVED)
+ return -ESHUTDOWN; /* Already archived */
+ else if (state == STATE_ONLINE)
+ return log_debug_errno(SYNTHETIC_ERRNO(EBUSY),
+ "Journal file %s is already online. Assuming unclean closing.",
+ f->path);
+ else if (state != STATE_OFFLINE)
+ return log_debug_errno(SYNTHETIC_ERRNO(EBUSY),
+ "Journal file %s has unknown state %i.",
+ f->path, state);
+
+ if (f->header->field_hash_table_size == 0 || f->header->data_hash_table_size == 0)
+ return -EBADMSG;
+
+ /* Don't permit appending to files from the future. Because otherwise the realtime timestamps wouldn't
+ * be strictly ordered in the entries in the file anymore, and we can't have that since it breaks
+ * bisection. */
+ if (le64toh(f->header->tail_entry_realtime) > now(CLOCK_REALTIME))
+ return log_debug_errno(SYNTHETIC_ERRNO(ETXTBSY),
+ "Journal file %s is from the future, refusing to append new data to it that'd be older.",
+ f->path);
+ }
+
+ f->compress_xz = JOURNAL_HEADER_COMPRESSED_XZ(f->header);
+ f->compress_lz4 = JOURNAL_HEADER_COMPRESSED_LZ4(f->header);
+
+ f->seal = JOURNAL_HEADER_SEALED(f->header);
+
+ return 0;
+}
+
+static int journal_file_fstat(JournalFile *f) {
+ int r;
+
+ assert(f);
+ assert(f->fd >= 0);
+
+ if (fstat(f->fd, &f->last_stat) < 0)
+ return -errno;
+
+ f->last_stat_usec = now(CLOCK_MONOTONIC);
+
+ /* Refuse dealing with with files that aren't regular */
+ r = stat_verify_regular(&f->last_stat);
+ if (r < 0)
+ return r;
+
+ /* Refuse appending to files that are already deleted */
+ if (f->last_stat.st_nlink <= 0)
+ return -EIDRM;
+
+ return 0;
+}
+
+static int journal_file_allocate(JournalFile *f, uint64_t offset, uint64_t size) {
+ uint64_t old_size, new_size;
+ int r;
+
+ assert(f);
+ assert(f->header);
+
+ /* We assume that this file is not sparse, and we know that
+ * for sure, since we always call posix_fallocate()
+ * ourselves */
+
+ if (mmap_cache_got_sigbus(f->mmap, f->cache_fd))
+ return -EIO;
+
+ old_size =
+ le64toh(f->header->header_size) +
+ le64toh(f->header->arena_size);
+
+ new_size = PAGE_ALIGN(offset + size);
+ if (new_size < le64toh(f->header->header_size))
+ new_size = le64toh(f->header->header_size);
+
+ if (new_size <= old_size) {
+
+ /* We already pre-allocated enough space, but before
+ * we write to it, let's check with fstat() if the
+ * file got deleted, in order make sure we don't throw
+ * away the data immediately. Don't check fstat() for
+ * all writes though, but only once ever 10s. */
+
+ if (f->last_stat_usec + LAST_STAT_REFRESH_USEC > now(CLOCK_MONOTONIC))
+ return 0;
+
+ return journal_file_fstat(f);
+ }
+
+ /* Allocate more space. */
+
+ if (f->metrics.max_size > 0 && new_size > f->metrics.max_size)
+ return -E2BIG;
+
+ if (new_size > f->metrics.min_size && f->metrics.keep_free > 0) {
+ struct statvfs svfs;
+
+ if (fstatvfs(f->fd, &svfs) >= 0) {
+ uint64_t available;
+
+ available = LESS_BY((uint64_t) svfs.f_bfree * (uint64_t) svfs.f_bsize, f->metrics.keep_free);
+
+ if (new_size - old_size > available)
+ return -E2BIG;
+ }
+ }
+
+ /* Increase by larger blocks at once */
+ new_size = DIV_ROUND_UP(new_size, FILE_SIZE_INCREASE) * FILE_SIZE_INCREASE;
+ if (f->metrics.max_size > 0 && new_size > f->metrics.max_size)
+ new_size = f->metrics.max_size;
+
+ /* Note that the glibc fallocate() fallback is very
+ inefficient, hence we try to minimize the allocation area
+ as we can. */
+ r = posix_fallocate(f->fd, old_size, new_size - old_size);
+ if (r != 0)
+ return -r;
+
+ f->header->arena_size = htole64(new_size - le64toh(f->header->header_size));
+
+ return journal_file_fstat(f);
+}
+
+static unsigned type_to_context(ObjectType type) {
+ /* One context for each type, plus one catch-all for the rest */
+ assert_cc(_OBJECT_TYPE_MAX <= MMAP_CACHE_MAX_CONTEXTS);
+ assert_cc(CONTEXT_HEADER < MMAP_CACHE_MAX_CONTEXTS);
+ return type > OBJECT_UNUSED && type < _OBJECT_TYPE_MAX ? type : 0;
+}
+
+static int journal_file_move_to(JournalFile *f, ObjectType type, bool keep_always, uint64_t offset, uint64_t size, void **ret, size_t *ret_size) {
+ int r;
+
+ assert(f);
+ assert(ret);
+
+ if (size <= 0)
+ return -EINVAL;
+
+ /* Avoid SIGBUS on invalid accesses */
+ if (offset + size > (uint64_t) f->last_stat.st_size) {
+ /* Hmm, out of range? Let's refresh the fstat() data
+ * first, before we trust that check. */
+
+ r = journal_file_fstat(f);
+ if (r < 0)
+ return r;
+
+ if (offset + size > (uint64_t) f->last_stat.st_size)
+ return -EADDRNOTAVAIL;
+ }
+
+ return mmap_cache_get(f->mmap, f->cache_fd, f->prot, type_to_context(type), keep_always, offset, size, &f->last_stat, ret, ret_size);
+}
+
+static uint64_t minimum_header_size(Object *o) {
+
+ static const uint64_t table[] = {
+ [OBJECT_DATA] = sizeof(DataObject),
+ [OBJECT_FIELD] = sizeof(FieldObject),
+ [OBJECT_ENTRY] = sizeof(EntryObject),
+ [OBJECT_DATA_HASH_TABLE] = sizeof(HashTableObject),
+ [OBJECT_FIELD_HASH_TABLE] = sizeof(HashTableObject),
+ [OBJECT_ENTRY_ARRAY] = sizeof(EntryArrayObject),
+ [OBJECT_TAG] = sizeof(TagObject),
+ };
+
+ if (o->object.type >= ELEMENTSOF(table) || table[o->object.type] <= 0)
+ return sizeof(ObjectHeader);
+
+ return table[o->object.type];
+}
+
+/* Lightweight object checks. We want this to be fast, so that we won't
+ * slowdown every journal_file_move_to_object() call too much. */
+static int journal_file_check_object(JournalFile *f, uint64_t offset, Object *o) {
+ assert(f);
+ assert(o);
+
+ switch (o->object.type) {
+
+ case OBJECT_DATA: {
+ if ((le64toh(o->data.entry_offset) == 0) ^ (le64toh(o->data.n_entries) == 0))
+ return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
+ "Bad n_entries: %" PRIu64 ": %" PRIu64,
+ le64toh(o->data.n_entries),
+ offset);
+
+ if (le64toh(o->object.size) - offsetof(DataObject, payload) <= 0)
+ return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
+ "Bad object size (<= %zu): %" PRIu64 ": %" PRIu64,
+ offsetof(DataObject, payload),
+ le64toh(o->object.size),
+ offset);
+
+ if (!VALID64(le64toh(o->data.next_hash_offset)) ||
+ !VALID64(le64toh(o->data.next_field_offset)) ||
+ !VALID64(le64toh(o->data.entry_offset)) ||
+ !VALID64(le64toh(o->data.entry_array_offset)))
+ return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
+ "Invalid offset, next_hash_offset=" OFSfmt ", next_field_offset=" OFSfmt ", entry_offset=" OFSfmt ", entry_array_offset=" OFSfmt ": %" PRIu64,
+ le64toh(o->data.next_hash_offset),
+ le64toh(o->data.next_field_offset),
+ le64toh(o->data.entry_offset),
+ le64toh(o->data.entry_array_offset),
+ offset);
+
+ break;
+ }
+
+ case OBJECT_FIELD:
+ if (le64toh(o->object.size) - offsetof(FieldObject, payload) <= 0)
+ return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
+ "Bad field size (<= %zu): %" PRIu64 ": %" PRIu64,
+ offsetof(FieldObject, payload),
+ le64toh(o->object.size),
+ offset);
+
+ if (!VALID64(le64toh(o->field.next_hash_offset)) ||
+ !VALID64(le64toh(o->field.head_data_offset)))
+ return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
+ "Invalid offset, next_hash_offset=" OFSfmt ", head_data_offset=" OFSfmt ": %" PRIu64,
+ le64toh(o->field.next_hash_offset),
+ le64toh(o->field.head_data_offset),
+ offset);
+ break;
+
+ case OBJECT_ENTRY:
+ if ((le64toh(o->object.size) - offsetof(EntryObject, items)) % sizeof(EntryItem) != 0)
+ return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
+ "Bad entry size (<= %zu): %" PRIu64 ": %" PRIu64,
+ offsetof(EntryObject, items),
+ le64toh(o->object.size),
+ offset);
+
+ if ((le64toh(o->object.size) - offsetof(EntryObject, items)) / sizeof(EntryItem) <= 0)
+ return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
+ "Invalid number items in entry: %" PRIu64 ": %" PRIu64,
+ (le64toh(o->object.size) - offsetof(EntryObject, items)) / sizeof(EntryItem),
+ offset);
+
+ if (le64toh(o->entry.seqnum) <= 0)
+ return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
+ "Invalid entry seqnum: %" PRIx64 ": %" PRIu64,
+ le64toh(o->entry.seqnum),
+ offset);
+
+ if (!VALID_REALTIME(le64toh(o->entry.realtime)))
+ return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
+ "Invalid entry realtime timestamp: %" PRIu64 ": %" PRIu64,
+ le64toh(o->entry.realtime),
+ offset);
+
+ if (!VALID_MONOTONIC(le64toh(o->entry.monotonic)))
+ return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
+ "Invalid entry monotonic timestamp: %" PRIu64 ": %" PRIu64,
+ le64toh(o->entry.monotonic),
+ offset);
+
+ break;
+
+ case OBJECT_DATA_HASH_TABLE:
+ case OBJECT_FIELD_HASH_TABLE:
+ if ((le64toh(o->object.size) - offsetof(HashTableObject, items)) % sizeof(HashItem) != 0 ||
+ (le64toh(o->object.size) - offsetof(HashTableObject, items)) / sizeof(HashItem) <= 0)
+ return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
+ "Invalid %s hash table size: %" PRIu64 ": %" PRIu64,
+ o->object.type == OBJECT_DATA_HASH_TABLE ? "data" : "field",
+ le64toh(o->object.size),
+ offset);
+
+ break;
+
+ case OBJECT_ENTRY_ARRAY:
+ if ((le64toh(o->object.size) - offsetof(EntryArrayObject, items)) % sizeof(le64_t) != 0 ||
+ (le64toh(o->object.size) - offsetof(EntryArrayObject, items)) / sizeof(le64_t) <= 0)
+ return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
+ "Invalid object entry array size: %" PRIu64 ": %" PRIu64,
+ le64toh(o->object.size),
+ offset);
+
+ if (!VALID64(le64toh(o->entry_array.next_entry_array_offset)))
+ return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
+ "Invalid object entry array next_entry_array_offset: " OFSfmt ": %" PRIu64,
+ le64toh(o->entry_array.next_entry_array_offset),
+ offset);
+
+ break;
+
+ case OBJECT_TAG:
+ if (le64toh(o->object.size) != sizeof(TagObject))
+ return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
+ "Invalid object tag size: %" PRIu64 ": %" PRIu64,
+ le64toh(o->object.size),
+ offset);
+
+ if (!VALID_EPOCH(le64toh(o->tag.epoch)))
+ return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
+ "Invalid object tag epoch: %" PRIu64 ": %" PRIu64,
+ le64toh(o->tag.epoch), offset);
+
+ break;
+ }
+
+ return 0;
+}
+
+int journal_file_move_to_object(JournalFile *f, ObjectType type, uint64_t offset, Object **ret) {
+ int r;
+ void *t;
+ size_t tsize;
+ Object *o;
+ uint64_t s;
+
+ assert(f);
+ assert(ret);
+
+ /* Objects may only be located at multiple of 64 bit */
+ if (!VALID64(offset))
+ return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
+ "Attempt to move to object at non-64bit boundary: %" PRIu64,
+ offset);
+
+ /* Object may not be located in the file header */
+ if (offset < le64toh(f->header->header_size))
+ return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
+ "Attempt to move to object located in file header: %" PRIu64,
+ offset);
+
+ r = journal_file_move_to(f, type, false, offset, sizeof(ObjectHeader), &t, &tsize);
+ if (r < 0)
+ return r;
+
+ o = (Object*) t;
+ s = le64toh(o->object.size);
+
+ if (s == 0)
+ return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
+ "Attempt to move to uninitialized object: %" PRIu64,
+ offset);
+ if (s < sizeof(ObjectHeader))
+ return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
+ "Attempt to move to overly short object: %" PRIu64,
+ offset);
+
+ if (o->object.type <= OBJECT_UNUSED)
+ return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
+ "Attempt to move to object with invalid type: %" PRIu64,
+ offset);
+
+ if (s < minimum_header_size(o))
+ return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
+ "Attempt to move to truncated object: %" PRIu64,
+ offset);
+
+ if (type > OBJECT_UNUSED && o->object.type != type)
+ return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
+ "Attempt to move to object of unexpected type: %" PRIu64,
+ offset);
+
+ if (s > tsize) {
+ r = journal_file_move_to(f, type, false, offset, s, &t, NULL);
+ if (r < 0)
+ return r;
+
+ o = (Object*) t;
+ }
+
+ r = journal_file_check_object(f, offset, o);
+ if (r < 0)
+ return r;
+
+ *ret = o;
+ return 0;
+}
+
+static uint64_t journal_file_entry_seqnum(JournalFile *f, uint64_t *seqnum) {
+ uint64_t r;
+
+ assert(f);
+ assert(f->header);
+
+ r = le64toh(f->header->tail_entry_seqnum) + 1;
+
+ if (seqnum) {
+ /* If an external seqnum counter was passed, we update
+ * both the local and the external one, and set it to
+ * the maximum of both */
+
+ if (*seqnum + 1 > r)
+ r = *seqnum + 1;
+
+ *seqnum = r;
+ }
+
+ f->header->tail_entry_seqnum = htole64(r);
+
+ if (f->header->head_entry_seqnum == 0)
+ f->header->head_entry_seqnum = htole64(r);
+
+ return r;
+}
+
+int journal_file_append_object(JournalFile *f, ObjectType type, uint64_t size, Object **ret, uint64_t *offset) {
+ int r;
+ uint64_t p;
+ Object *tail, *o;
+ void *t;
+
+ assert(f);
+ assert(f->header);
+ assert(type > OBJECT_UNUSED && type < _OBJECT_TYPE_MAX);
+ assert(size >= sizeof(ObjectHeader));
+ assert(offset);
+ assert(ret);
+
+ r = journal_file_set_online(f);
+ if (r < 0)
+ return r;
+
+ p = le64toh(f->header->tail_object_offset);
+ if (p == 0)
+ p = le64toh(f->header->header_size);
+ else {
+ r = journal_file_move_to_object(f, OBJECT_UNUSED, p, &tail);
+ if (r < 0)
+ return r;
+
+ p += ALIGN64(le64toh(tail->object.size));
+ }
+
+ r = journal_file_allocate(f, p, size);
+ if (r < 0)
+ return r;
+
+ r = journal_file_move_to(f, type, false, p, size, &t, NULL);
+ if (r < 0)
+ return r;
+
+ o = (Object*) t;
+
+ zero(o->object);
+ o->object.type = type;
+ o->object.size = htole64(size);
+
+ f->header->tail_object_offset = htole64(p);
+ f->header->n_objects = htole64(le64toh(f->header->n_objects) + 1);
+
+ *ret = o;
+ *offset = p;
+
+ return 0;
+}
+
+static int journal_file_setup_data_hash_table(JournalFile *f) {
+ uint64_t s, p;
+ Object *o;
+ int r;
+
+ assert(f);
+ assert(f->header);
+
+ /* We estimate that we need 1 hash table entry per 768 bytes
+ of journal file and we want to make sure we never get
+ beyond 75% fill level. Calculate the hash table size for
+ the maximum file size based on these metrics. */
+
+ s = (f->metrics.max_size * 4 / 768 / 3) * sizeof(HashItem);
+ if (s < DEFAULT_DATA_HASH_TABLE_SIZE)
+ s = DEFAULT_DATA_HASH_TABLE_SIZE;
+
+ log_debug("Reserving %"PRIu64" entries in hash table.", s / sizeof(HashItem));
+
+ r = journal_file_append_object(f,
+ OBJECT_DATA_HASH_TABLE,
+ offsetof(Object, hash_table.items) + s,
+ &o, &p);
+ if (r < 0)
+ return r;
+
+ memzero(o->hash_table.items, s);
+
+ f->header->data_hash_table_offset = htole64(p + offsetof(Object, hash_table.items));
+ f->header->data_hash_table_size = htole64(s);
+
+ return 0;
+}
+
+static int journal_file_setup_field_hash_table(JournalFile *f) {
+ uint64_t s, p;
+ Object *o;
+ int r;
+
+ assert(f);
+ assert(f->header);
+
+ /* We use a fixed size hash table for the fields as this
+ * number should grow very slowly only */
+
+ s = DEFAULT_FIELD_HASH_TABLE_SIZE;
+ r = journal_file_append_object(f,
+ OBJECT_FIELD_HASH_TABLE,
+ offsetof(Object, hash_table.items) + s,
+ &o, &p);
+ if (r < 0)
+ return r;
+
+ memzero(o->hash_table.items, s);
+
+ f->header->field_hash_table_offset = htole64(p + offsetof(Object, hash_table.items));
+ f->header->field_hash_table_size = htole64(s);
+
+ return 0;
+}
+
+int journal_file_map_data_hash_table(JournalFile *f) {
+ uint64_t s, p;
+ void *t;
+ int r;
+
+ assert(f);
+ assert(f->header);
+
+ if (f->data_hash_table)
+ return 0;
+
+ p = le64toh(f->header->data_hash_table_offset);
+ s = le64toh(f->header->data_hash_table_size);
+
+ r = journal_file_move_to(f,
+ OBJECT_DATA_HASH_TABLE,
+ true,
+ p, s,
+ &t, NULL);
+ if (r < 0)
+ return r;
+
+ f->data_hash_table = t;
+ return 0;
+}
+
+int journal_file_map_field_hash_table(JournalFile *f) {
+ uint64_t s, p;
+ void *t;
+ int r;
+
+ assert(f);
+ assert(f->header);
+
+ if (f->field_hash_table)
+ return 0;
+
+ p = le64toh(f->header->field_hash_table_offset);
+ s = le64toh(f->header->field_hash_table_size);
+
+ r = journal_file_move_to(f,
+ OBJECT_FIELD_HASH_TABLE,
+ true,
+ p, s,
+ &t, NULL);
+ if (r < 0)
+ return r;
+
+ f->field_hash_table = t;
+ return 0;
+}
+
+static int journal_file_link_field(
+ JournalFile *f,
+ Object *o,
+ uint64_t offset,
+ uint64_t hash) {
+
+ uint64_t p, h, m;
+ int r;
+
+ assert(f);
+ assert(f->header);
+ assert(f->field_hash_table);
+ assert(o);
+ assert(offset > 0);
+
+ if (o->object.type != OBJECT_FIELD)
+ return -EINVAL;
+
+ m = le64toh(f->header->field_hash_table_size) / sizeof(HashItem);
+ if (m <= 0)
+ return -EBADMSG;
+
+ /* This might alter the window we are looking at */
+ o->field.next_hash_offset = o->field.head_data_offset = 0;
+
+ h = hash % m;
+ p = le64toh(f->field_hash_table[h].tail_hash_offset);
+ if (p == 0)
+ f->field_hash_table[h].head_hash_offset = htole64(offset);
+ else {
+ r = journal_file_move_to_object(f, OBJECT_FIELD, p, &o);
+ if (r < 0)
+ return r;
+
+ o->field.next_hash_offset = htole64(offset);
+ }
+
+ f->field_hash_table[h].tail_hash_offset = htole64(offset);
+
+ if (JOURNAL_HEADER_CONTAINS(f->header, n_fields))
+ f->header->n_fields = htole64(le64toh(f->header->n_fields) + 1);
+
+ return 0;
+}
+
+static int journal_file_link_data(
+ JournalFile *f,
+ Object *o,
+ uint64_t offset,
+ uint64_t hash) {
+
+ uint64_t p, h, m;
+ int r;
+
+ assert(f);
+ assert(f->header);
+ assert(f->data_hash_table);
+ assert(o);
+ assert(offset > 0);
+
+ if (o->object.type != OBJECT_DATA)
+ return -EINVAL;
+
+ m = le64toh(f->header->data_hash_table_size) / sizeof(HashItem);
+ if (m <= 0)
+ return -EBADMSG;
+
+ /* This might alter the window we are looking at */
+ o->data.next_hash_offset = o->data.next_field_offset = 0;
+ o->data.entry_offset = o->data.entry_array_offset = 0;
+ o->data.n_entries = 0;
+
+ h = hash % m;
+ p = le64toh(f->data_hash_table[h].tail_hash_offset);
+ if (p == 0)
+ /* Only entry in the hash table is easy */
+ f->data_hash_table[h].head_hash_offset = htole64(offset);
+ else {
+ /* Move back to the previous data object, to patch in
+ * pointer */
+
+ r = journal_file_move_to_object(f, OBJECT_DATA, p, &o);
+ if (r < 0)
+ return r;
+
+ o->data.next_hash_offset = htole64(offset);
+ }
+
+ f->data_hash_table[h].tail_hash_offset = htole64(offset);
+
+ if (JOURNAL_HEADER_CONTAINS(f->header, n_data))
+ f->header->n_data = htole64(le64toh(f->header->n_data) + 1);
+
+ return 0;
+}
+
+int journal_file_find_field_object_with_hash(
+ JournalFile *f,
+ const void *field, uint64_t size, uint64_t hash,
+ Object **ret, uint64_t *offset) {
+
+ uint64_t p, osize, h, m;
+ int r;
+
+ assert(f);
+ assert(f->header);
+ assert(field && size > 0);
+
+ /* If the field hash table is empty, we can't find anything */
+ if (le64toh(f->header->field_hash_table_size) <= 0)
+ return 0;
+
+ /* Map the field hash table, if it isn't mapped yet. */
+ r = journal_file_map_field_hash_table(f);
+ if (r < 0)
+ return r;
+
+ osize = offsetof(Object, field.payload) + size;
+
+ m = le64toh(f->header->field_hash_table_size) / sizeof(HashItem);
+ if (m <= 0)
+ return -EBADMSG;
+
+ h = hash % m;
+ p = le64toh(f->field_hash_table[h].head_hash_offset);
+
+ while (p > 0) {
+ Object *o;
+
+ r = journal_file_move_to_object(f, OBJECT_FIELD, p, &o);
+ if (r < 0)
+ return r;
+
+ if (le64toh(o->field.hash) == hash &&
+ le64toh(o->object.size) == osize &&
+ memcmp(o->field.payload, field, size) == 0) {
+
+ if (ret)
+ *ret = o;
+ if (offset)
+ *offset = p;
+
+ return 1;
+ }
+
+ p = le64toh(o->field.next_hash_offset);
+ }
+
+ return 0;
+}
+
+int journal_file_find_field_object(
+ JournalFile *f,
+ const void *field, uint64_t size,
+ Object **ret, uint64_t *offset) {
+
+ uint64_t hash;
+
+ assert(f);
+ assert(field && size > 0);
+
+ hash = hash64(field, size);
+
+ return journal_file_find_field_object_with_hash(f,
+ field, size, hash,
+ ret, offset);
+}
+
+int journal_file_find_data_object_with_hash(
+ JournalFile *f,
+ const void *data, uint64_t size, uint64_t hash,
+ Object **ret, uint64_t *offset) {
+
+ uint64_t p, osize, h, m;
+ int r;
+
+ assert(f);
+ assert(f->header);
+ assert(data || size == 0);
+
+ /* If there's no data hash table, then there's no entry. */
+ if (le64toh(f->header->data_hash_table_size) <= 0)
+ return 0;
+
+ /* Map the data hash table, if it isn't mapped yet. */
+ r = journal_file_map_data_hash_table(f);
+ if (r < 0)
+ return r;
+
+ osize = offsetof(Object, data.payload) + size;
+
+ m = le64toh(f->header->data_hash_table_size) / sizeof(HashItem);
+ if (m <= 0)
+ return -EBADMSG;
+
+ h = hash % m;
+ p = le64toh(f->data_hash_table[h].head_hash_offset);
+
+ while (p > 0) {
+ Object *o;
+
+ r = journal_file_move_to_object(f, OBJECT_DATA, p, &o);
+ if (r < 0)
+ return r;
+
+ if (le64toh(o->data.hash) != hash)
+ goto next;
+
+ if (o->object.flags & OBJECT_COMPRESSION_MASK) {
+#if HAVE_XZ || HAVE_LZ4
+ uint64_t l;
+ size_t rsize = 0;
+
+ l = le64toh(o->object.size);
+ if (l <= offsetof(Object, data.payload))
+ return -EBADMSG;
+
+ l -= offsetof(Object, data.payload);
+
+ r = decompress_blob(o->object.flags & OBJECT_COMPRESSION_MASK,
+ o->data.payload, l, &f->compress_buffer, &f->compress_buffer_size, &rsize, 0);
+ if (r < 0)
+ return r;
+
+ if (rsize == size &&
+ memcmp(f->compress_buffer, data, size) == 0) {
+
+ if (ret)
+ *ret = o;
+
+ if (offset)
+ *offset = p;
+
+ return 1;
+ }
+#else
+ return -EPROTONOSUPPORT;
+#endif
+ } else if (le64toh(o->object.size) == osize &&
+ memcmp(o->data.payload, data, size) == 0) {
+
+ if (ret)
+ *ret = o;
+
+ if (offset)
+ *offset = p;
+
+ return 1;
+ }
+
+ next:
+ p = le64toh(o->data.next_hash_offset);
+ }
+
+ return 0;
+}
+
+int journal_file_find_data_object(
+ JournalFile *f,
+ const void *data, uint64_t size,
+ Object **ret, uint64_t *offset) {
+
+ uint64_t hash;
+
+ assert(f);
+ assert(data || size == 0);
+
+ hash = hash64(data, size);
+
+ return journal_file_find_data_object_with_hash(f,
+ data, size, hash,
+ ret, offset);
+}
+
+static int journal_file_append_field(
+ JournalFile *f,
+ const void *field, uint64_t size,
+ Object **ret, uint64_t *offset) {
+
+ uint64_t hash, p;
+ uint64_t osize;
+ Object *o;
+ int r;
+
+ assert(f);
+ assert(field && size > 0);
+
+ hash = hash64(field, size);
+
+ r = journal_file_find_field_object_with_hash(f, field, size, hash, &o, &p);
+ if (r < 0)
+ return r;
+ else if (r > 0) {
+
+ if (ret)
+ *ret = o;
+
+ if (offset)
+ *offset = p;
+
+ return 0;
+ }
+
+ osize = offsetof(Object, field.payload) + size;
+ r = journal_file_append_object(f, OBJECT_FIELD, osize, &o, &p);
+ if (r < 0)
+ return r;
+
+ o->field.hash = htole64(hash);
+ memcpy(o->field.payload, field, size);
+
+ r = journal_file_link_field(f, o, p, hash);
+ if (r < 0)
+ return r;
+
+ /* The linking might have altered the window, so let's
+ * refresh our pointer */
+ r = journal_file_move_to_object(f, OBJECT_FIELD, p, &o);
+ if (r < 0)
+ return r;
+
+#if HAVE_GCRYPT
+ r = journal_file_hmac_put_object(f, OBJECT_FIELD, o, p);
+ if (r < 0)
+ return r;
+#endif
+
+ if (ret)
+ *ret = o;
+
+ if (offset)
+ *offset = p;
+
+ return 0;
+}
+
+static int journal_file_append_data(
+ JournalFile *f,
+ const void *data, uint64_t size,
+ Object **ret, uint64_t *offset) {
+
+ uint64_t hash, p;
+ uint64_t osize;
+ Object *o;
+ int r, compression = 0;
+ const void *eq;
+
+ assert(f);
+ assert(data || size == 0);
+
+ hash = hash64(data, size);
+
+ r = journal_file_find_data_object_with_hash(f, data, size, hash, &o, &p);
+ if (r < 0)
+ return r;
+ if (r > 0) {
+
+ if (ret)
+ *ret = o;
+
+ if (offset)
+ *offset = p;
+
+ return 0;
+ }
+
+ osize = offsetof(Object, data.payload) + size;
+ r = journal_file_append_object(f, OBJECT_DATA, osize, &o, &p);
+ if (r < 0)
+ return r;
+
+ o->data.hash = htole64(hash);
+
+#if HAVE_XZ || HAVE_LZ4
+ if (JOURNAL_FILE_COMPRESS(f) && size >= f->compress_threshold_bytes) {
+ size_t rsize = 0;
+
+ compression = compress_blob(data, size, o->data.payload, size - 1, &rsize);
+
+ if (compression >= 0) {
+ o->object.size = htole64(offsetof(Object, data.payload) + rsize);
+ o->object.flags |= compression;
+
+ log_debug("Compressed data object %"PRIu64" -> %zu using %s",
+ size, rsize, object_compressed_to_string(compression));
+ } else
+ /* Compression didn't work, we don't really care why, let's continue without compression */
+ compression = 0;
+ }
+#endif
+
+ if (compression == 0)
+ memcpy_safe(o->data.payload, data, size);
+
+ r = journal_file_link_data(f, o, p, hash);
+ if (r < 0)
+ return r;
+
+#if HAVE_GCRYPT
+ r = journal_file_hmac_put_object(f, OBJECT_DATA, o, p);
+ if (r < 0)
+ return r;
+#endif
+
+ /* The linking might have altered the window, so let's
+ * refresh our pointer */
+ r = journal_file_move_to_object(f, OBJECT_DATA, p, &o);
+ if (r < 0)
+ return r;
+
+ if (!data)
+ eq = NULL;
+ else
+ eq = memchr(data, '=', size);
+ if (eq && eq > data) {
+ Object *fo = NULL;
+ uint64_t fp;
+
+ /* Create field object ... */
+ r = journal_file_append_field(f, data, (uint8_t*) eq - (uint8_t*) data, &fo, &fp);
+ if (r < 0)
+ return r;
+
+ /* ... and link it in. */
+ o->data.next_field_offset = fo->field.head_data_offset;
+ fo->field.head_data_offset = le64toh(p);
+ }
+
+ if (ret)
+ *ret = o;
+
+ if (offset)
+ *offset = p;
+
+ return 0;
+}
+
+uint64_t journal_file_entry_n_items(Object *o) {
+ assert(o);
+
+ if (o->object.type != OBJECT_ENTRY)
+ return 0;
+
+ return (le64toh(o->object.size) - offsetof(Object, entry.items)) / sizeof(EntryItem);
+}
+
+uint64_t journal_file_entry_array_n_items(Object *o) {
+ assert(o);
+
+ if (o->object.type != OBJECT_ENTRY_ARRAY)
+ return 0;
+
+ return (le64toh(o->object.size) - offsetof(Object, entry_array.items)) / sizeof(uint64_t);
+}
+
+uint64_t journal_file_hash_table_n_items(Object *o) {
+ assert(o);
+
+ if (!IN_SET(o->object.type, OBJECT_DATA_HASH_TABLE, OBJECT_FIELD_HASH_TABLE))
+ return 0;
+
+ return (le64toh(o->object.size) - offsetof(Object, hash_table.items)) / sizeof(HashItem);
+}
+
+static int link_entry_into_array(JournalFile *f,
+ le64_t *first,
+ le64_t *idx,
+ uint64_t p) {
+ int r;
+ uint64_t n = 0, ap = 0, q, i, a, hidx;
+ Object *o;
+
+ assert(f);
+ assert(f->header);
+ assert(first);
+ assert(idx);
+ assert(p > 0);
+
+ a = le64toh(*first);
+ i = hidx = le64toh(*idx);
+ while (a > 0) {
+
+ r = journal_file_move_to_object(f, OBJECT_ENTRY_ARRAY, a, &o);
+ if (r < 0)
+ return r;
+
+ n = journal_file_entry_array_n_items(o);
+ if (i < n) {
+ o->entry_array.items[i] = htole64(p);
+ *idx = htole64(hidx + 1);
+ return 0;
+ }
+
+ i -= n;
+ ap = a;
+ a = le64toh(o->entry_array.next_entry_array_offset);
+ }
+
+ if (hidx > n)
+ n = (hidx+1) * 2;
+ else
+ n = n * 2;
+
+ if (n < 4)
+ n = 4;
+
+ r = journal_file_append_object(f, OBJECT_ENTRY_ARRAY,
+ offsetof(Object, entry_array.items) + n * sizeof(uint64_t),
+ &o, &q);
+ if (r < 0)
+ return r;
+
+#if HAVE_GCRYPT
+ r = journal_file_hmac_put_object(f, OBJECT_ENTRY_ARRAY, o, q);
+ if (r < 0)
+ return r;
+#endif
+
+ o->entry_array.items[i] = htole64(p);
+
+ if (ap == 0)
+ *first = htole64(q);
+ else {
+ r = journal_file_move_to_object(f, OBJECT_ENTRY_ARRAY, ap, &o);
+ if (r < 0)
+ return r;
+
+ o->entry_array.next_entry_array_offset = htole64(q);
+ }
+
+ if (JOURNAL_HEADER_CONTAINS(f->header, n_entry_arrays))
+ f->header->n_entry_arrays = htole64(le64toh(f->header->n_entry_arrays) + 1);
+
+ *idx = htole64(hidx + 1);
+
+ return 0;
+}
+
+static int link_entry_into_array_plus_one(JournalFile *f,
+ le64_t *extra,
+ le64_t *first,
+ le64_t *idx,
+ uint64_t p) {
+
+ int r;
+
+ assert(f);
+ assert(extra);
+ assert(first);
+ assert(idx);
+ assert(p > 0);
+
+ if (*idx == 0)
+ *extra = htole64(p);
+ else {
+ le64_t i;
+
+ i = htole64(le64toh(*idx) - 1);
+ r = link_entry_into_array(f, first, &i, p);
+ if (r < 0)
+ return r;
+ }
+
+ *idx = htole64(le64toh(*idx) + 1);
+ return 0;
+}
+
+static int journal_file_link_entry_item(JournalFile *f, Object *o, uint64_t offset, uint64_t i) {
+ uint64_t p;
+ int r;
+ assert(f);
+ assert(o);
+ assert(offset > 0);
+
+ p = le64toh(o->entry.items[i].object_offset);
+ if (p == 0)
+ return -EINVAL;
+
+ r = journal_file_move_to_object(f, OBJECT_DATA, p, &o);
+ if (r < 0)
+ return r;
+
+ return link_entry_into_array_plus_one(f,
+ &o->data.entry_offset,
+ &o->data.entry_array_offset,
+ &o->data.n_entries,
+ offset);
+}
+
+static int journal_file_link_entry(JournalFile *f, Object *o, uint64_t offset) {
+ uint64_t n, i;
+ int r;
+
+ assert(f);
+ assert(f->header);
+ assert(o);
+ assert(offset > 0);
+
+ if (o->object.type != OBJECT_ENTRY)
+ return -EINVAL;
+
+ __sync_synchronize();
+
+ /* Link up the entry itself */
+ r = link_entry_into_array(f,
+ &f->header->entry_array_offset,
+ &f->header->n_entries,
+ offset);
+ if (r < 0)
+ return r;
+
+ /* log_debug("=> %s seqnr=%"PRIu64" n_entries=%"PRIu64, f->path, o->entry.seqnum, f->header->n_entries); */
+
+ if (f->header->head_entry_realtime == 0)
+ f->header->head_entry_realtime = o->entry.realtime;
+
+ f->header->tail_entry_realtime = o->entry.realtime;
+ f->header->tail_entry_monotonic = o->entry.monotonic;
+
+ /* Link up the items */
+ n = journal_file_entry_n_items(o);
+ for (i = 0; i < n; i++) {
+ r = journal_file_link_entry_item(f, o, offset, i);
+ if (r < 0)
+ return r;
+ }
+
+ return 0;
+}
+
+static int journal_file_append_entry_internal(
+ JournalFile *f,
+ const dual_timestamp *ts,
+ const sd_id128_t *boot_id,
+ uint64_t xor_hash,
+ const EntryItem items[], unsigned n_items,
+ uint64_t *seqnum,
+ Object **ret, uint64_t *offset) {
+ uint64_t np;
+ uint64_t osize;
+ Object *o;
+ int r;
+
+ assert(f);
+ assert(f->header);
+ assert(items || n_items == 0);
+ assert(ts);
+
+ osize = offsetof(Object, entry.items) + (n_items * sizeof(EntryItem));
+
+ r = journal_file_append_object(f, OBJECT_ENTRY, osize, &o, &np);
+ if (r < 0)
+ return r;
+
+ o->entry.seqnum = htole64(journal_file_entry_seqnum(f, seqnum));
+ memcpy_safe(o->entry.items, items, n_items * sizeof(EntryItem));
+ o->entry.realtime = htole64(ts->realtime);
+ o->entry.monotonic = htole64(ts->monotonic);
+ o->entry.xor_hash = htole64(xor_hash);
+ o->entry.boot_id = boot_id ? *boot_id : f->header->boot_id;
+
+#if HAVE_GCRYPT
+ r = journal_file_hmac_put_object(f, OBJECT_ENTRY, o, np);
+ if (r < 0)
+ return r;
+#endif
+
+ r = journal_file_link_entry(f, o, np);
+ if (r < 0)
+ return r;
+
+ if (ret)
+ *ret = o;
+
+ if (offset)
+ *offset = np;
+
+ return 0;
+}
+
+void journal_file_post_change(JournalFile *f) {
+ assert(f);
+
+ if (f->fd < 0)
+ return;
+
+ /* inotify() does not receive IN_MODIFY events from file
+ * accesses done via mmap(). After each access we hence
+ * trigger IN_MODIFY by truncating the journal file to its
+ * current size which triggers IN_MODIFY. */
+
+ __sync_synchronize();
+
+ if (ftruncate(f->fd, f->last_stat.st_size) < 0)
+ log_debug_errno(errno, "Failed to truncate file to its own size: %m");
+}
+
+static int post_change_thunk(sd_event_source *timer, uint64_t usec, void *userdata) {
+ assert(userdata);
+
+ journal_file_post_change(userdata);
+
+ return 1;
+}
+
+static void schedule_post_change(JournalFile *f) {
+ uint64_t now;
+ int r;
+
+ assert(f);
+ assert(f->post_change_timer);
+
+ r = sd_event_source_get_enabled(f->post_change_timer, NULL);
+ if (r < 0) {
+ log_debug_errno(r, "Failed to get ftruncate timer state: %m");
+ goto fail;
+ }
+ if (r > 0)
+ return;
+
+ r = sd_event_now(sd_event_source_get_event(f->post_change_timer), CLOCK_MONOTONIC, &now);
+ if (r < 0) {
+ log_debug_errno(r, "Failed to get clock's now for scheduling ftruncate: %m");
+ goto fail;
+ }
+
+ r = sd_event_source_set_time(f->post_change_timer, now + f->post_change_timer_period);
+ if (r < 0) {
+ log_debug_errno(r, "Failed to set time for scheduling ftruncate: %m");
+ goto fail;
+ }
+
+ r = sd_event_source_set_enabled(f->post_change_timer, SD_EVENT_ONESHOT);
+ if (r < 0) {
+ log_debug_errno(r, "Failed to enable scheduled ftruncate: %m");
+ goto fail;
+ }
+
+ return;
+
+fail:
+ /* On failure, let's simply post the change immediately. */
+ journal_file_post_change(f);
+}
+
+/* Enable coalesced change posting in a timer on the provided sd_event instance */
+int journal_file_enable_post_change_timer(JournalFile *f, sd_event *e, usec_t t) {
+ _cleanup_(sd_event_source_unrefp) sd_event_source *timer = NULL;
+ int r;
+
+ assert(f);
+ assert_return(!f->post_change_timer, -EINVAL);
+ assert(e);
+ assert(t);
+
+ r = sd_event_add_time(e, &timer, CLOCK_MONOTONIC, 0, 0, post_change_thunk, f);
+ if (r < 0)
+ return r;
+
+ r = sd_event_source_set_enabled(timer, SD_EVENT_OFF);
+ if (r < 0)
+ return r;
+
+ f->post_change_timer = TAKE_PTR(timer);
+ f->post_change_timer_period = t;
+
+ return r;
+}
+
+static int entry_item_cmp(const EntryItem *a, const EntryItem *b) {
+ return CMP(le64toh(a->object_offset), le64toh(b->object_offset));
+}
+
+int journal_file_append_entry(
+ JournalFile *f,
+ const dual_timestamp *ts,
+ const sd_id128_t *boot_id,
+ const struct iovec iovec[], unsigned n_iovec,
+ uint64_t *seqnum,
+ Object **ret, uint64_t *offset) {
+
+ unsigned i;
+ EntryItem *items;
+ int r;
+ uint64_t xor_hash = 0;
+ struct dual_timestamp _ts;
+
+ assert(f);
+ assert(f->header);
+ assert(iovec || n_iovec == 0);
+
+ if (ts) {
+ if (!VALID_REALTIME(ts->realtime))
+ return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
+ "Invalid realtime timestamp %" PRIu64 ", refusing entry.",
+ ts->realtime);
+ if (!VALID_MONOTONIC(ts->monotonic))
+ return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
+ "Invalid monotomic timestamp %" PRIu64 ", refusing entry.",
+ ts->monotonic);
+ } else {
+ dual_timestamp_get(&_ts);
+ ts = &_ts;
+ }
+
+#if HAVE_GCRYPT
+ r = journal_file_maybe_append_tag(f, ts->realtime);
+ if (r < 0)
+ return r;
+#endif
+
+ /* alloca() can't take 0, hence let's allocate at least one */
+ items = newa(EntryItem, MAX(1u, n_iovec));
+
+ for (i = 0; i < n_iovec; i++) {
+ uint64_t p;
+ Object *o;
+
+ r = journal_file_append_data(f, iovec[i].iov_base, iovec[i].iov_len, &o, &p);
+ if (r < 0)
+ return r;
+
+ xor_hash ^= le64toh(o->data.hash);
+ items[i].object_offset = htole64(p);
+ items[i].hash = o->data.hash;
+ }
+
+ /* Order by the position on disk, in order to improve seek
+ * times for rotating media. */
+ typesafe_qsort(items, n_iovec, entry_item_cmp);
+
+ r = journal_file_append_entry_internal(f, ts, boot_id, xor_hash, items, n_iovec, seqnum, ret, offset);
+
+ /* If the memory mapping triggered a SIGBUS then we return an
+ * IO error and ignore the error code passed down to us, since
+ * it is very likely just an effect of a nullified replacement
+ * mapping page */
+
+ if (mmap_cache_got_sigbus(f->mmap, f->cache_fd))
+ r = -EIO;
+
+ if (f->post_change_timer)
+ schedule_post_change(f);
+ else
+ journal_file_post_change(f);
+
+ return r;
+}
+
+typedef struct ChainCacheItem {
+ uint64_t first; /* the array at the beginning of the chain */
+ uint64_t array; /* the cached array */
+ uint64_t begin; /* the first item in the cached array */
+ uint64_t total; /* the total number of items in all arrays before this one in the chain */
+ uint64_t last_index; /* the last index we looked at, to optimize locality when bisecting */
+} ChainCacheItem;
+
+static void chain_cache_put(
+ OrderedHashmap *h,
+ ChainCacheItem *ci,
+ uint64_t first,
+ uint64_t array,
+ uint64_t begin,
+ uint64_t total,
+ uint64_t last_index) {
+
+ if (!ci) {
+ /* If the chain item to cache for this chain is the
+ * first one it's not worth caching anything */
+ if (array == first)
+ return;
+
+ if (ordered_hashmap_size(h) >= CHAIN_CACHE_MAX) {
+ ci = ordered_hashmap_steal_first(h);
+ assert(ci);
+ } else {
+ ci = new(ChainCacheItem, 1);
+ if (!ci)
+ return;
+ }
+
+ ci->first = first;
+
+ if (ordered_hashmap_put(h, &ci->first, ci) < 0) {
+ free(ci);
+ return;
+ }
+ } else
+ assert(ci->first == first);
+
+ ci->array = array;
+ ci->begin = begin;
+ ci->total = total;
+ ci->last_index = last_index;
+}
+
+static int generic_array_get(
+ JournalFile *f,
+ uint64_t first,
+ uint64_t i,
+ Object **ret, uint64_t *offset) {
+
+ Object *o;
+ uint64_t p = 0, a, t = 0;
+ int r;
+ ChainCacheItem *ci;
+
+ assert(f);
+
+ a = first;
+
+ /* Try the chain cache first */
+ ci = ordered_hashmap_get(f->chain_cache, &first);
+ if (ci && i > ci->total) {
+ a = ci->array;
+ i -= ci->total;
+ t = ci->total;
+ }
+
+ while (a > 0) {
+ uint64_t k;
+
+ r = journal_file_move_to_object(f, OBJECT_ENTRY_ARRAY, a, &o);
+ if (r < 0)
+ return r;
+
+ k = journal_file_entry_array_n_items(o);
+ if (i < k) {
+ p = le64toh(o->entry_array.items[i]);
+ goto found;
+ }
+
+ i -= k;
+ t += k;
+ a = le64toh(o->entry_array.next_entry_array_offset);
+ }
+
+ return 0;
+
+found:
+ /* Let's cache this item for the next invocation */
+ chain_cache_put(f->chain_cache, ci, first, a, le64toh(o->entry_array.items[0]), t, i);
+
+ r = journal_file_move_to_object(f, OBJECT_ENTRY, p, &o);
+ if (r < 0)
+ return r;
+
+ if (ret)
+ *ret = o;
+
+ if (offset)
+ *offset = p;
+
+ return 1;
+}
+
+static int generic_array_get_plus_one(
+ JournalFile *f,
+ uint64_t extra,
+ uint64_t first,
+ uint64_t i,
+ Object **ret, uint64_t *offset) {
+
+ Object *o;
+
+ assert(f);
+
+ if (i == 0) {
+ int r;
+
+ r = journal_file_move_to_object(f, OBJECT_ENTRY, extra, &o);
+ if (r < 0)
+ return r;
+
+ if (ret)
+ *ret = o;
+
+ if (offset)
+ *offset = extra;
+
+ return 1;
+ }
+
+ return generic_array_get(f, first, i-1, ret, offset);
+}
+
+enum {
+ TEST_FOUND,
+ TEST_LEFT,
+ TEST_RIGHT
+};
+
+static int generic_array_bisect(
+ JournalFile *f,
+ uint64_t first,
+ uint64_t n,
+ uint64_t needle,
+ int (*test_object)(JournalFile *f, uint64_t p, uint64_t needle),
+ direction_t direction,
+ Object **ret,
+ uint64_t *offset,
+ uint64_t *idx) {
+
+ uint64_t a, p, t = 0, i = 0, last_p = 0, last_index = (uint64_t) -1;
+ bool subtract_one = false;
+ Object *o, *array = NULL;
+ int r;
+ ChainCacheItem *ci;
+
+ assert(f);
+ assert(test_object);
+
+ /* Start with the first array in the chain */
+ a = first;
+
+ ci = ordered_hashmap_get(f->chain_cache, &first);
+ if (ci && n > ci->total && ci->begin != 0) {
+ /* Ah, we have iterated this bisection array chain
+ * previously! Let's see if we can skip ahead in the
+ * chain, as far as the last time. But we can't jump
+ * backwards in the chain, so let's check that
+ * first. */
+
+ r = test_object(f, ci->begin, needle);
+ if (r < 0)
+ return r;
+
+ if (r == TEST_LEFT) {
+ /* OK, what we are looking for is right of the
+ * begin of this EntryArray, so let's jump
+ * straight to previously cached array in the
+ * chain */
+
+ a = ci->array;
+ n -= ci->total;
+ t = ci->total;
+ last_index = ci->last_index;
+ }
+ }
+
+ while (a > 0) {
+ uint64_t left, right, k, lp;
+
+ r = journal_file_move_to_object(f, OBJECT_ENTRY_ARRAY, a, &array);
+ if (r < 0)
+ return r;
+
+ k = journal_file_entry_array_n_items(array);
+ right = MIN(k, n);
+ if (right <= 0)
+ return 0;
+
+ i = right - 1;
+ lp = p = le64toh(array->entry_array.items[i]);
+ if (p <= 0)
+ r = -EBADMSG;
+ else
+ r = test_object(f, p, needle);
+ if (r == -EBADMSG) {
+ log_debug_errno(r, "Encountered invalid entry while bisecting, cutting algorithm short. (1)");
+ n = i;
+ continue;
+ }
+ if (r < 0)
+ return r;
+
+ if (r == TEST_FOUND)
+ r = direction == DIRECTION_DOWN ? TEST_RIGHT : TEST_LEFT;
+
+ if (r == TEST_RIGHT) {
+ left = 0;
+ right -= 1;
+
+ if (last_index != (uint64_t) -1) {
+ assert(last_index <= right);
+
+ /* If we cached the last index we
+ * looked at, let's try to not to jump
+ * too wildly around and see if we can
+ * limit the range to look at early to
+ * the immediate neighbors of the last
+ * index we looked at. */
+
+ if (last_index > 0) {
+ uint64_t x = last_index - 1;
+
+ p = le64toh(array->entry_array.items[x]);
+ if (p <= 0)
+ return -EBADMSG;
+
+ r = test_object(f, p, needle);
+ if (r < 0)
+ return r;
+
+ if (r == TEST_FOUND)
+ r = direction == DIRECTION_DOWN ? TEST_RIGHT : TEST_LEFT;
+
+ if (r == TEST_RIGHT)
+ right = x;
+ else
+ left = x + 1;
+ }
+
+ if (last_index < right) {
+ uint64_t y = last_index + 1;
+
+ p = le64toh(array->entry_array.items[y]);
+ if (p <= 0)
+ return -EBADMSG;
+
+ r = test_object(f, p, needle);
+ if (r < 0)
+ return r;
+
+ if (r == TEST_FOUND)
+ r = direction == DIRECTION_DOWN ? TEST_RIGHT : TEST_LEFT;
+
+ if (r == TEST_RIGHT)
+ right = y;
+ else
+ left = y + 1;
+ }
+ }
+
+ for (;;) {
+ if (left == right) {
+ if (direction == DIRECTION_UP)
+ subtract_one = true;
+
+ i = left;
+ goto found;
+ }
+
+ assert(left < right);
+ i = (left + right) / 2;
+
+ p = le64toh(array->entry_array.items[i]);
+ if (p <= 0)
+ r = -EBADMSG;
+ else
+ r = test_object(f, p, needle);
+ if (r == -EBADMSG) {
+ log_debug_errno(r, "Encountered invalid entry while bisecting, cutting algorithm short. (2)");
+ right = n = i;
+ continue;
+ }
+ if (r < 0)
+ return r;
+
+ if (r == TEST_FOUND)
+ r = direction == DIRECTION_DOWN ? TEST_RIGHT : TEST_LEFT;
+
+ if (r == TEST_RIGHT)
+ right = i;
+ else
+ left = i + 1;
+ }
+ }
+
+ if (k >= n) {
+ if (direction == DIRECTION_UP) {
+ i = n;
+ subtract_one = true;
+ goto found;
+ }
+
+ return 0;
+ }
+
+ last_p = lp;
+
+ n -= k;
+ t += k;
+ last_index = (uint64_t) -1;
+ a = le64toh(array->entry_array.next_entry_array_offset);
+ }
+
+ return 0;
+
+found:
+ if (subtract_one && t == 0 && i == 0)
+ return 0;
+
+ /* Let's cache this item for the next invocation */
+ chain_cache_put(f->chain_cache, ci, first, a, le64toh(array->entry_array.items[0]), t, subtract_one ? (i > 0 ? i-1 : (uint64_t) -1) : i);
+
+ if (subtract_one && i == 0)
+ p = last_p;
+ else if (subtract_one)
+ p = le64toh(array->entry_array.items[i-1]);
+ else
+ p = le64toh(array->entry_array.items[i]);
+
+ r = journal_file_move_to_object(f, OBJECT_ENTRY, p, &o);
+ if (r < 0)
+ return r;
+
+ if (ret)
+ *ret = o;
+
+ if (offset)
+ *offset = p;
+
+ if (idx)
+ *idx = t + i + (subtract_one ? -1 : 0);
+
+ return 1;
+}
+
+static int generic_array_bisect_plus_one(
+ JournalFile *f,
+ uint64_t extra,
+ uint64_t first,
+ uint64_t n,
+ uint64_t needle,
+ int (*test_object)(JournalFile *f, uint64_t p, uint64_t needle),
+ direction_t direction,
+ Object **ret,
+ uint64_t *offset,
+ uint64_t *idx) {
+
+ int r;
+ bool step_back = false;
+ Object *o;
+
+ assert(f);
+ assert(test_object);
+
+ if (n <= 0)
+ return 0;
+
+ /* This bisects the array in object 'first', but first checks
+ * an extra */
+ r = test_object(f, extra, needle);
+ if (r < 0)
+ return r;
+
+ if (r == TEST_FOUND)
+ r = direction == DIRECTION_DOWN ? TEST_RIGHT : TEST_LEFT;
+
+ /* if we are looking with DIRECTION_UP then we need to first
+ see if in the actual array there is a matching entry, and
+ return the last one of that. But if there isn't any we need
+ to return this one. Hence remember this, and return it
+ below. */
+ if (r == TEST_LEFT)
+ step_back = direction == DIRECTION_UP;
+
+ if (r == TEST_RIGHT) {
+ if (direction == DIRECTION_DOWN)
+ goto found;
+ else
+ return 0;
+ }
+
+ r = generic_array_bisect(f, first, n-1, needle, test_object, direction, ret, offset, idx);
+
+ if (r == 0 && step_back)
+ goto found;
+
+ if (r > 0 && idx)
+ (*idx)++;
+
+ return r;
+
+found:
+ r = journal_file_move_to_object(f, OBJECT_ENTRY, extra, &o);
+ if (r < 0)
+ return r;
+
+ if (ret)
+ *ret = o;
+
+ if (offset)
+ *offset = extra;
+
+ if (idx)
+ *idx = 0;
+
+ return 1;
+}
+
+_pure_ static int test_object_offset(JournalFile *f, uint64_t p, uint64_t needle) {
+ assert(f);
+ assert(p > 0);
+
+ if (p == needle)
+ return TEST_FOUND;
+ else if (p < needle)
+ return TEST_LEFT;
+ else
+ return TEST_RIGHT;
+}
+
+static int test_object_seqnum(JournalFile *f, uint64_t p, uint64_t needle) {
+ Object *o;
+ int r;
+
+ assert(f);
+ assert(p > 0);
+
+ r = journal_file_move_to_object(f, OBJECT_ENTRY, p, &o);
+ if (r < 0)
+ return r;
+
+ if (le64toh(o->entry.seqnum) == needle)
+ return TEST_FOUND;
+ else if (le64toh(o->entry.seqnum) < needle)
+ return TEST_LEFT;
+ else
+ return TEST_RIGHT;
+}
+
+int journal_file_move_to_entry_by_seqnum(
+ JournalFile *f,
+ uint64_t seqnum,
+ direction_t direction,
+ Object **ret,
+ uint64_t *offset) {
+ assert(f);
+ assert(f->header);
+
+ return generic_array_bisect(f,
+ le64toh(f->header->entry_array_offset),
+ le64toh(f->header->n_entries),
+ seqnum,
+ test_object_seqnum,
+ direction,
+ ret, offset, NULL);
+}
+
+static int test_object_realtime(JournalFile *f, uint64_t p, uint64_t needle) {
+ Object *o;
+ int r;
+
+ assert(f);
+ assert(p > 0);
+
+ r = journal_file_move_to_object(f, OBJECT_ENTRY, p, &o);
+ if (r < 0)
+ return r;
+
+ if (le64toh(o->entry.realtime) == needle)
+ return TEST_FOUND;
+ else if (le64toh(o->entry.realtime) < needle)
+ return TEST_LEFT;
+ else
+ return TEST_RIGHT;
+}
+
+int journal_file_move_to_entry_by_realtime(
+ JournalFile *f,
+ uint64_t realtime,
+ direction_t direction,
+ Object **ret,
+ uint64_t *offset) {
+ assert(f);
+ assert(f->header);
+
+ return generic_array_bisect(f,
+ le64toh(f->header->entry_array_offset),
+ le64toh(f->header->n_entries),
+ realtime,
+ test_object_realtime,
+ direction,
+ ret, offset, NULL);
+}
+
+static int test_object_monotonic(JournalFile *f, uint64_t p, uint64_t needle) {
+ Object *o;
+ int r;
+
+ assert(f);
+ assert(p > 0);
+
+ r = journal_file_move_to_object(f, OBJECT_ENTRY, p, &o);
+ if (r < 0)
+ return r;
+
+ if (le64toh(o->entry.monotonic) == needle)
+ return TEST_FOUND;
+ else if (le64toh(o->entry.monotonic) < needle)
+ return TEST_LEFT;
+ else
+ return TEST_RIGHT;
+}
+
+static int find_data_object_by_boot_id(
+ JournalFile *f,
+ sd_id128_t boot_id,
+ Object **o,
+ uint64_t *b) {
+
+ char t[STRLEN("_BOOT_ID=") + 32 + 1] = "_BOOT_ID=";
+
+ sd_id128_to_string(boot_id, t + 9);
+ return journal_file_find_data_object(f, t, sizeof(t) - 1, o, b);
+}
+
+int journal_file_move_to_entry_by_monotonic(
+ JournalFile *f,
+ sd_id128_t boot_id,
+ uint64_t monotonic,
+ direction_t direction,
+ Object **ret,
+ uint64_t *offset) {
+
+ Object *o;
+ int r;
+
+ assert(f);
+
+ r = find_data_object_by_boot_id(f, boot_id, &o, NULL);
+ if (r < 0)
+ return r;
+ if (r == 0)
+ return -ENOENT;
+
+ return generic_array_bisect_plus_one(f,
+ le64toh(o->data.entry_offset),
+ le64toh(o->data.entry_array_offset),
+ le64toh(o->data.n_entries),
+ monotonic,
+ test_object_monotonic,
+ direction,
+ ret, offset, NULL);
+}
+
+void journal_file_reset_location(JournalFile *f) {
+ f->location_type = LOCATION_HEAD;
+ f->current_offset = 0;
+ f->current_seqnum = 0;
+ f->current_realtime = 0;
+ f->current_monotonic = 0;
+ zero(f->current_boot_id);
+ f->current_xor_hash = 0;
+}
+
+void journal_file_save_location(JournalFile *f, Object *o, uint64_t offset) {
+ f->location_type = LOCATION_SEEK;
+ f->current_offset = offset;
+ f->current_seqnum = le64toh(o->entry.seqnum);
+ f->current_realtime = le64toh(o->entry.realtime);
+ f->current_monotonic = le64toh(o->entry.monotonic);
+ f->current_boot_id = o->entry.boot_id;
+ f->current_xor_hash = le64toh(o->entry.xor_hash);
+}
+
+int journal_file_compare_locations(JournalFile *af, JournalFile *bf) {
+ int r;
+
+ assert(af);
+ assert(af->header);
+ assert(bf);
+ assert(bf->header);
+ assert(af->location_type == LOCATION_SEEK);
+ assert(bf->location_type == LOCATION_SEEK);
+
+ /* If contents and timestamps match, these entries are
+ * identical, even if the seqnum does not match */
+ if (sd_id128_equal(af->current_boot_id, bf->current_boot_id) &&
+ af->current_monotonic == bf->current_monotonic &&
+ af->current_realtime == bf->current_realtime &&
+ af->current_xor_hash == bf->current_xor_hash)
+ return 0;
+
+ if (sd_id128_equal(af->header->seqnum_id, bf->header->seqnum_id)) {
+
+ /* If this is from the same seqnum source, compare
+ * seqnums */
+ r = CMP(af->current_seqnum, bf->current_seqnum);
+ if (r != 0)
+ return r;
+
+ /* Wow! This is weird, different data but the same
+ * seqnums? Something is borked, but let's make the
+ * best of it and compare by time. */
+ }
+
+ if (sd_id128_equal(af->current_boot_id, bf->current_boot_id)) {
+
+ /* If the boot id matches, compare monotonic time */
+ r = CMP(af->current_monotonic, bf->current_monotonic);
+ if (r != 0)
+ return r;
+ }
+
+ /* Otherwise, compare UTC time */
+ r = CMP(af->current_realtime, bf->current_realtime);
+ if (r != 0)
+ return r;
+
+ /* Finally, compare by contents */
+ return CMP(af->current_xor_hash, bf->current_xor_hash);
+}
+
+static int bump_array_index(uint64_t *i, direction_t direction, uint64_t n) {
+
+ /* Increase or decrease the specified index, in the right direction. */
+
+ if (direction == DIRECTION_DOWN) {
+ if (*i >= n - 1)
+ return 0;
+
+ (*i) ++;
+ } else {
+ if (*i <= 0)
+ return 0;
+
+ (*i) --;
+ }
+
+ return 1;
+}
+
+static bool check_properly_ordered(uint64_t new_offset, uint64_t old_offset, direction_t direction) {
+
+ /* Consider it an error if any of the two offsets is uninitialized */
+ if (old_offset == 0 || new_offset == 0)
+ return false;
+
+ /* If we go down, the new offset must be larger than the old one. */
+ return direction == DIRECTION_DOWN ?
+ new_offset > old_offset :
+ new_offset < old_offset;
+}
+
+int journal_file_next_entry(
+ JournalFile *f,
+ uint64_t p,
+ direction_t direction,
+ Object **ret, uint64_t *offset) {
+
+ uint64_t i, n, ofs;
+ int r;
+
+ assert(f);
+ assert(f->header);
+
+ n = le64toh(f->header->n_entries);
+ if (n <= 0)
+ return 0;
+
+ if (p == 0)
+ i = direction == DIRECTION_DOWN ? 0 : n - 1;
+ else {
+ r = generic_array_bisect(f,
+ le64toh(f->header->entry_array_offset),
+ le64toh(f->header->n_entries),
+ p,
+ test_object_offset,
+ DIRECTION_DOWN,
+ NULL, NULL,
+ &i);
+ if (r <= 0)
+ return r;
+
+ r = bump_array_index(&i, direction, n);
+ if (r <= 0)
+ return r;
+ }
+
+ /* And jump to it */
+ for (;;) {
+ r = generic_array_get(f,
+ le64toh(f->header->entry_array_offset),
+ i,
+ ret, &ofs);
+ if (r > 0)
+ break;
+ if (r != -EBADMSG)
+ return r;
+
+ /* OK, so this entry is borked. Most likely some entry didn't get synced to disk properly, let's see if
+ * the next one might work for us instead. */
+ log_debug_errno(r, "Entry item %" PRIu64 " is bad, skipping over it.", i);
+
+ r = bump_array_index(&i, direction, n);
+ if (r <= 0)
+ return r;
+ }
+
+ /* Ensure our array is properly ordered. */
+ if (p > 0 && !check_properly_ordered(ofs, p, direction))
+ return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
+ "%s: entry array not properly ordered at entry %" PRIu64,
+ f->path, i);
+
+ if (offset)
+ *offset = ofs;
+
+ return 1;
+}
+
+int journal_file_next_entry_for_data(
+ JournalFile *f,
+ Object *o, uint64_t p,
+ uint64_t data_offset,
+ direction_t direction,
+ Object **ret, uint64_t *offset) {
+
+ uint64_t i, n, ofs;
+ Object *d;
+ int r;
+
+ assert(f);
+ assert(p > 0 || !o);
+
+ r = journal_file_move_to_object(f, OBJECT_DATA, data_offset, &d);
+ if (r < 0)
+ return r;
+
+ n = le64toh(d->data.n_entries);
+ if (n <= 0)
+ return n;
+
+ if (!o)
+ i = direction == DIRECTION_DOWN ? 0 : n - 1;
+ else {
+ if (o->object.type != OBJECT_ENTRY)
+ return -EINVAL;
+
+ r = generic_array_bisect_plus_one(f,
+ le64toh(d->data.entry_offset),
+ le64toh(d->data.entry_array_offset),
+ le64toh(d->data.n_entries),
+ p,
+ test_object_offset,
+ DIRECTION_DOWN,
+ NULL, NULL,
+ &i);
+
+ if (r <= 0)
+ return r;
+
+ r = bump_array_index(&i, direction, n);
+ if (r <= 0)
+ return r;
+ }
+
+ for (;;) {
+ r = generic_array_get_plus_one(f,
+ le64toh(d->data.entry_offset),
+ le64toh(d->data.entry_array_offset),
+ i,
+ ret, &ofs);
+ if (r > 0)
+ break;
+ if (r != -EBADMSG)
+ return r;
+
+ log_debug_errno(r, "Data entry item %" PRIu64 " is bad, skipping over it.", i);
+
+ r = bump_array_index(&i, direction, n);
+ if (r <= 0)
+ return r;
+ }
+
+ /* Ensure our array is properly ordered. */
+ if (p > 0 && check_properly_ordered(ofs, p, direction))
+ return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
+ "%s data entry array not properly ordered at entry %" PRIu64,
+ f->path, i);
+
+ if (offset)
+ *offset = ofs;
+
+ return 1;
+}
+
+int journal_file_move_to_entry_by_offset_for_data(
+ JournalFile *f,
+ uint64_t data_offset,
+ uint64_t p,
+ direction_t direction,
+ Object **ret, uint64_t *offset) {
+
+ int r;
+ Object *d;
+
+ assert(f);
+
+ r = journal_file_move_to_object(f, OBJECT_DATA, data_offset, &d);
+ if (r < 0)
+ return r;
+
+ return generic_array_bisect_plus_one(f,
+ le64toh(d->data.entry_offset),
+ le64toh(d->data.entry_array_offset),
+ le64toh(d->data.n_entries),
+ p,
+ test_object_offset,
+ direction,
+ ret, offset, NULL);
+}
+
+int journal_file_move_to_entry_by_monotonic_for_data(
+ JournalFile *f,
+ uint64_t data_offset,
+ sd_id128_t boot_id,
+ uint64_t monotonic,
+ direction_t direction,
+ Object **ret, uint64_t *offset) {
+
+ Object *o, *d;
+ int r;
+ uint64_t b, z;
+
+ assert(f);
+
+ /* First, seek by time */
+ r = find_data_object_by_boot_id(f, boot_id, &o, &b);
+ if (r < 0)
+ return r;
+ if (r == 0)
+ return -ENOENT;
+
+ r = generic_array_bisect_plus_one(f,
+ le64toh(o->data.entry_offset),
+ le64toh(o->data.entry_array_offset),
+ le64toh(o->data.n_entries),
+ monotonic,
+ test_object_monotonic,
+ direction,
+ NULL, &z, NULL);
+ if (r <= 0)
+ return r;
+
+ /* And now, continue seeking until we find an entry that
+ * exists in both bisection arrays */
+
+ for (;;) {
+ Object *qo;
+ uint64_t p, q;
+
+ r = journal_file_move_to_object(f, OBJECT_DATA, data_offset, &d);
+ if (r < 0)
+ return r;
+
+ r = generic_array_bisect_plus_one(f,
+ le64toh(d->data.entry_offset),
+ le64toh(d->data.entry_array_offset),
+ le64toh(d->data.n_entries),
+ z,
+ test_object_offset,
+ direction,
+ NULL, &p, NULL);
+ if (r <= 0)
+ return r;
+
+ r = journal_file_move_to_object(f, OBJECT_DATA, b, &o);
+ if (r < 0)
+ return r;
+
+ r = generic_array_bisect_plus_one(f,
+ le64toh(o->data.entry_offset),
+ le64toh(o->data.entry_array_offset),
+ le64toh(o->data.n_entries),
+ p,
+ test_object_offset,
+ direction,
+ &qo, &q, NULL);
+
+ if (r <= 0)
+ return r;
+
+ if (p == q) {
+ if (ret)
+ *ret = qo;
+ if (offset)
+ *offset = q;
+
+ return 1;
+ }
+
+ z = q;
+ }
+}
+
+int journal_file_move_to_entry_by_seqnum_for_data(
+ JournalFile *f,
+ uint64_t data_offset,
+ uint64_t seqnum,
+ direction_t direction,
+ Object **ret, uint64_t *offset) {
+
+ Object *d;
+ int r;
+
+ assert(f);
+
+ r = journal_file_move_to_object(f, OBJECT_DATA, data_offset, &d);
+ if (r < 0)
+ return r;
+
+ return generic_array_bisect_plus_one(f,
+ le64toh(d->data.entry_offset),
+ le64toh(d->data.entry_array_offset),
+ le64toh(d->data.n_entries),
+ seqnum,
+ test_object_seqnum,
+ direction,
+ ret, offset, NULL);
+}
+
+int journal_file_move_to_entry_by_realtime_for_data(
+ JournalFile *f,
+ uint64_t data_offset,
+ uint64_t realtime,
+ direction_t direction,
+ Object **ret, uint64_t *offset) {
+
+ Object *d;
+ int r;
+
+ assert(f);
+
+ r = journal_file_move_to_object(f, OBJECT_DATA, data_offset, &d);
+ if (r < 0)
+ return r;
+
+ return generic_array_bisect_plus_one(f,
+ le64toh(d->data.entry_offset),
+ le64toh(d->data.entry_array_offset),
+ le64toh(d->data.n_entries),
+ realtime,
+ test_object_realtime,
+ direction,
+ ret, offset, NULL);
+}
+
+void journal_file_dump(JournalFile *f) {
+ Object *o;
+ int r;
+ uint64_t p;
+
+ assert(f);
+ assert(f->header);
+
+ journal_file_print_header(f);
+
+ p = le64toh(f->header->header_size);
+ while (p != 0) {
+ r = journal_file_move_to_object(f, OBJECT_UNUSED, p, &o);
+ if (r < 0)
+ goto fail;
+
+ switch (o->object.type) {
+
+ case OBJECT_UNUSED:
+ printf("Type: OBJECT_UNUSED\n");
+ break;
+
+ case OBJECT_DATA:
+ printf("Type: OBJECT_DATA\n");
+ break;
+
+ case OBJECT_FIELD:
+ printf("Type: OBJECT_FIELD\n");
+ break;
+
+ case OBJECT_ENTRY:
+ printf("Type: OBJECT_ENTRY seqnum=%"PRIu64" monotonic=%"PRIu64" realtime=%"PRIu64"\n",
+ le64toh(o->entry.seqnum),
+ le64toh(o->entry.monotonic),
+ le64toh(o->entry.realtime));
+ break;
+
+ case OBJECT_FIELD_HASH_TABLE:
+ printf("Type: OBJECT_FIELD_HASH_TABLE\n");
+ break;
+
+ case OBJECT_DATA_HASH_TABLE:
+ printf("Type: OBJECT_DATA_HASH_TABLE\n");
+ break;
+
+ case OBJECT_ENTRY_ARRAY:
+ printf("Type: OBJECT_ENTRY_ARRAY\n");
+ break;
+
+ case OBJECT_TAG:
+ printf("Type: OBJECT_TAG seqnum=%"PRIu64" epoch=%"PRIu64"\n",
+ le64toh(o->tag.seqnum),
+ le64toh(o->tag.epoch));
+ break;
+
+ default:
+ printf("Type: unknown (%i)\n", o->object.type);
+ break;
+ }
+
+ if (o->object.flags & OBJECT_COMPRESSION_MASK)
+ printf("Flags: %s\n",
+ object_compressed_to_string(o->object.flags & OBJECT_COMPRESSION_MASK));
+
+ if (p == le64toh(f->header->tail_object_offset))
+ p = 0;
+ else
+ p = p + ALIGN64(le64toh(o->object.size));
+ }
+
+ return;
+fail:
+ log_error("File corrupt");
+}
+
+static const char* format_timestamp_safe(char *buf, size_t l, usec_t t) {
+ const char *x;
+
+ x = format_timestamp(buf, l, t);
+ if (x)
+ return x;
+ return " --- ";
+}
+
+void journal_file_print_header(JournalFile *f) {
+ char a[33], b[33], c[33], d[33];
+ char x[FORMAT_TIMESTAMP_MAX], y[FORMAT_TIMESTAMP_MAX], z[FORMAT_TIMESTAMP_MAX];
+ struct stat st;
+ char bytes[FORMAT_BYTES_MAX];
+
+ assert(f);
+ assert(f->header);
+
+ printf("File Path: %s\n"
+ "File ID: %s\n"
+ "Machine ID: %s\n"
+ "Boot ID: %s\n"
+ "Sequential Number ID: %s\n"
+ "State: %s\n"
+ "Compatible Flags:%s%s\n"
+ "Incompatible Flags:%s%s%s\n"
+ "Header size: %"PRIu64"\n"
+ "Arena size: %"PRIu64"\n"
+ "Data Hash Table Size: %"PRIu64"\n"
+ "Field Hash Table Size: %"PRIu64"\n"
+ "Rotate Suggested: %s\n"
+ "Head Sequential Number: %"PRIu64" (%"PRIx64")\n"
+ "Tail Sequential Number: %"PRIu64" (%"PRIx64")\n"
+ "Head Realtime Timestamp: %s (%"PRIx64")\n"
+ "Tail Realtime Timestamp: %s (%"PRIx64")\n"
+ "Tail Monotonic Timestamp: %s (%"PRIx64")\n"
+ "Objects: %"PRIu64"\n"
+ "Entry Objects: %"PRIu64"\n",
+ f->path,
+ sd_id128_to_string(f->header->file_id, a),
+ sd_id128_to_string(f->header->machine_id, b),
+ sd_id128_to_string(f->header->boot_id, c),
+ sd_id128_to_string(f->header->seqnum_id, d),
+ f->header->state == STATE_OFFLINE ? "OFFLINE" :
+ f->header->state == STATE_ONLINE ? "ONLINE" :
+ f->header->state == STATE_ARCHIVED ? "ARCHIVED" : "UNKNOWN",
+ JOURNAL_HEADER_SEALED(f->header) ? " SEALED" : "",
+ (le32toh(f->header->compatible_flags) & ~HEADER_COMPATIBLE_ANY) ? " ???" : "",
+ JOURNAL_HEADER_COMPRESSED_XZ(f->header) ? " COMPRESSED-XZ" : "",
+ JOURNAL_HEADER_COMPRESSED_LZ4(f->header) ? " COMPRESSED-LZ4" : "",
+ (le32toh(f->header->incompatible_flags) & ~HEADER_INCOMPATIBLE_ANY) ? " ???" : "",
+ le64toh(f->header->header_size),
+ le64toh(f->header->arena_size),
+ le64toh(f->header->data_hash_table_size) / sizeof(HashItem),
+ le64toh(f->header->field_hash_table_size) / sizeof(HashItem),
+ yes_no(journal_file_rotate_suggested(f, 0)),
+ le64toh(f->header->head_entry_seqnum), le64toh(f->header->head_entry_seqnum),
+ le64toh(f->header->tail_entry_seqnum), le64toh(f->header->tail_entry_seqnum),
+ format_timestamp_safe(x, sizeof(x), le64toh(f->header->head_entry_realtime)), le64toh(f->header->head_entry_realtime),
+ format_timestamp_safe(y, sizeof(y), le64toh(f->header->tail_entry_realtime)), le64toh(f->header->tail_entry_realtime),
+ format_timespan(z, sizeof(z), le64toh(f->header->tail_entry_monotonic), USEC_PER_MSEC), le64toh(f->header->tail_entry_monotonic),
+ le64toh(f->header->n_objects),
+ le64toh(f->header->n_entries));
+
+ if (JOURNAL_HEADER_CONTAINS(f->header, n_data))
+ printf("Data Objects: %"PRIu64"\n"
+ "Data Hash Table Fill: %.1f%%\n",
+ le64toh(f->header->n_data),
+ 100.0 * (double) le64toh(f->header->n_data) / ((double) (le64toh(f->header->data_hash_table_size) / sizeof(HashItem))));
+
+ if (JOURNAL_HEADER_CONTAINS(f->header, n_fields))
+ printf("Field Objects: %"PRIu64"\n"
+ "Field Hash Table Fill: %.1f%%\n",
+ le64toh(f->header->n_fields),
+ 100.0 * (double) le64toh(f->header->n_fields) / ((double) (le64toh(f->header->field_hash_table_size) / sizeof(HashItem))));
+
+ if (JOURNAL_HEADER_CONTAINS(f->header, n_tags))
+ printf("Tag Objects: %"PRIu64"\n",
+ le64toh(f->header->n_tags));
+ if (JOURNAL_HEADER_CONTAINS(f->header, n_entry_arrays))
+ printf("Entry Array Objects: %"PRIu64"\n",
+ le64toh(f->header->n_entry_arrays));
+
+ if (fstat(f->fd, &st) >= 0)
+ printf("Disk usage: %s\n", format_bytes(bytes, sizeof(bytes), (uint64_t) st.st_blocks * 512ULL));
+}
+
+static int journal_file_warn_btrfs(JournalFile *f) {
+ unsigned attrs;
+ int r;
+
+ assert(f);
+
+ /* Before we write anything, check if the COW logic is turned
+ * off on btrfs. Given our write pattern that is quite
+ * unfriendly to COW file systems this should greatly improve
+ * performance on COW file systems, such as btrfs, at the
+ * expense of data integrity features (which shouldn't be too
+ * bad, given that we do our own checksumming). */
+
+ r = btrfs_is_filesystem(f->fd);
+ if (r < 0)
+ return log_warning_errno(r, "Failed to determine if journal is on btrfs: %m");
+ if (!r)
+ return 0;
+
+ r = read_attr_fd(f->fd, &attrs);
+ if (r < 0)
+ return log_warning_errno(r, "Failed to read file attributes: %m");
+
+ if (attrs & FS_NOCOW_FL) {
+ log_debug("Detected btrfs file system with copy-on-write disabled, all is good.");
+ return 0;
+ }
+
+ log_notice("Creating journal file %s on a btrfs file system, and copy-on-write is enabled. "
+ "This is likely to slow down journal access substantially, please consider turning "
+ "off the copy-on-write file attribute on the journal directory, using chattr +C.", f->path);
+
+ return 1;
+}
+
+int journal_file_open(
+ int fd,
+ const char *fname,
+ int flags,
+ mode_t mode,
+ bool compress,
+ uint64_t compress_threshold_bytes,
+ bool seal,
+ JournalMetrics *metrics,
+ MMapCache *mmap_cache,
+ Set *deferred_closes,
+ JournalFile *template,
+ JournalFile **ret) {
+
+ bool newly_created = false;
+ JournalFile *f;
+ void *h;
+ int r;
+ char bytes[FORMAT_BYTES_MAX];
+
+ assert(ret);
+ assert(fd >= 0 || fname);
+
+ if (!IN_SET((flags & O_ACCMODE), O_RDONLY, O_RDWR))
+ return -EINVAL;
+
+ if (fname && (flags & O_CREAT) && !endswith(fname, ".journal"))
+ return -EINVAL;
+
+ f = new(JournalFile, 1);
+ if (!f)
+ return -ENOMEM;
+
+ *f = (JournalFile) {
+ .fd = fd,
+ .mode = mode,
+
+ .flags = flags,
+ .prot = prot_from_flags(flags),
+ .writable = (flags & O_ACCMODE) != O_RDONLY,
+
+#if HAVE_LZ4
+ .compress_lz4 = compress,
+#elif HAVE_XZ
+ .compress_xz = compress,
+#endif
+ .compress_threshold_bytes = compress_threshold_bytes == (uint64_t) -1 ?
+ DEFAULT_COMPRESS_THRESHOLD :
+ MAX(MIN_COMPRESS_THRESHOLD, compress_threshold_bytes),
+#if HAVE_GCRYPT
+ .seal = seal,
+#endif
+ };
+
+ log_debug("Journal effective settings seal=%s compress=%s compress_threshold_bytes=%s",
+ yes_no(f->seal), yes_no(JOURNAL_FILE_COMPRESS(f)),
+ format_bytes(bytes, sizeof(bytes), f->compress_threshold_bytes));
+
+ if (mmap_cache)
+ f->mmap = mmap_cache_ref(mmap_cache);
+ else {
+ f->mmap = mmap_cache_new();
+ if (!f->mmap) {
+ r = -ENOMEM;
+ goto fail;
+ }
+ }
+
+ if (fname) {
+ f->path = strdup(fname);
+ if (!f->path) {
+ r = -ENOMEM;
+ goto fail;
+ }
+ } else {
+ assert(fd >= 0);
+
+ /* If we don't know the path, fill in something explanatory and vaguely useful */
+ if (asprintf(&f->path, "/proc/self/%i", fd) < 0) {
+ r = -ENOMEM;
+ goto fail;
+ }
+ }
+
+ f->chain_cache = ordered_hashmap_new(&uint64_hash_ops);
+ if (!f->chain_cache) {
+ r = -ENOMEM;
+ goto fail;
+ }
+
+ if (f->fd < 0) {
+ /* We pass O_NONBLOCK here, so that in case somebody pointed us to some character device node or FIFO
+ * or so, we likely fail quickly than block for long. For regular files O_NONBLOCK has no effect, hence
+ * it doesn't hurt in that case. */
+
+ f->fd = open(f->path, f->flags|O_CLOEXEC|O_NONBLOCK, f->mode);
+ if (f->fd < 0) {
+ r = -errno;
+ goto fail;
+ }
+
+ /* fds we opened here by us should also be closed by us. */
+ f->close_fd = true;
+
+ r = fd_nonblock(f->fd, false);
+ if (r < 0)
+ goto fail;
+ }
+
+ f->cache_fd = mmap_cache_add_fd(f->mmap, f->fd);
+ if (!f->cache_fd) {
+ r = -ENOMEM;
+ goto fail;
+ }
+
+ r = journal_file_fstat(f);
+ if (r < 0)
+ goto fail;
+
+ if (f->last_stat.st_size == 0 && f->writable) {
+
+ (void) journal_file_warn_btrfs(f);
+
+ /* Let's attach the creation time to the journal file, so that the vacuuming code knows the age of this
+ * file even if the file might end up corrupted one day... Ideally we'd just use the creation time many
+ * file systems maintain for each file, but the API to query this is very new, hence let's emulate this
+ * via extended attributes. If extended attributes are not supported we'll just skip this, and rely
+ * solely on mtime/atime/ctime of the file. */
+ (void) fd_setcrtime(f->fd, 0);
+
+#if HAVE_GCRYPT
+ /* Try to load the FSPRG state, and if we can't, then
+ * just don't do sealing */
+ if (f->seal) {
+ r = journal_file_fss_load(f);
+ if (r < 0)
+ f->seal = false;
+ }
+#endif
+
+ r = journal_file_init_header(f, template);
+ if (r < 0)
+ goto fail;
+
+ r = journal_file_fstat(f);
+ if (r < 0)
+ goto fail;
+
+ newly_created = true;
+ }
+
+ if (f->last_stat.st_size < (off_t) HEADER_SIZE_MIN) {
+ r = -ENODATA;
+ goto fail;
+ }
+
+ r = mmap_cache_get(f->mmap, f->cache_fd, f->prot, CONTEXT_HEADER, true, 0, PAGE_ALIGN(sizeof(Header)), &f->last_stat, &h, NULL);
+ if (r < 0)
+ goto fail;
+
+ f->header = h;
+
+ if (!newly_created) {
+ set_clear_with_destructor(deferred_closes, journal_file_close);
+
+ r = journal_file_verify_header(f);
+ if (r < 0)
+ goto fail;
+ }
+
+#if HAVE_GCRYPT
+ if (!newly_created && f->writable) {
+ r = journal_file_fss_load(f);
+ if (r < 0)
+ goto fail;
+ }
+#endif
+
+ if (f->writable) {
+ if (metrics) {
+ journal_default_metrics(metrics, f->fd);
+ f->metrics = *metrics;
+ } else if (template)
+ f->metrics = template->metrics;
+
+ r = journal_file_refresh_header(f);
+ if (r < 0)
+ goto fail;
+ }
+
+#if HAVE_GCRYPT
+ r = journal_file_hmac_setup(f);
+ if (r < 0)
+ goto fail;
+#endif
+
+ if (newly_created) {
+ r = journal_file_setup_field_hash_table(f);
+ if (r < 0)
+ goto fail;
+
+ r = journal_file_setup_data_hash_table(f);
+ if (r < 0)
+ goto fail;
+
+#if HAVE_GCRYPT
+ r = journal_file_append_first_tag(f);
+ if (r < 0)
+ goto fail;
+#endif
+ }
+
+ if (mmap_cache_got_sigbus(f->mmap, f->cache_fd)) {
+ r = -EIO;
+ goto fail;
+ }
+
+ if (template && template->post_change_timer) {
+ r = journal_file_enable_post_change_timer(
+ f,
+ sd_event_source_get_event(template->post_change_timer),
+ template->post_change_timer_period);
+
+ if (r < 0)
+ goto fail;
+ }
+
+ /* The file is opened now successfully, thus we take possession of any passed in fd. */
+ f->close_fd = true;
+
+ *ret = f;
+ return 0;
+
+fail:
+ if (f->cache_fd && mmap_cache_got_sigbus(f->mmap, f->cache_fd))
+ r = -EIO;
+
+ (void) journal_file_close(f);
+
+ return r;
+}
+
+int journal_file_archive(JournalFile *f) {
+ _cleanup_free_ char *p = NULL;
+
+ assert(f);
+
+ if (!f->writable)
+ return -EINVAL;
+
+ /* Is this a journal file that was passed to us as fd? If so, we synthesized a path name for it, and we refuse
+ * rotation, since we don't know the actual path, and couldn't rename the file hence. */
+ if (path_startswith(f->path, "/proc/self/fd"))
+ return -EINVAL;
+
+ if (!endswith(f->path, ".journal"))
+ return -EINVAL;
+
+ if (asprintf(&p, "%.*s@" SD_ID128_FORMAT_STR "-%016"PRIx64"-%016"PRIx64".journal",
+ (int) strlen(f->path) - 8, f->path,
+ SD_ID128_FORMAT_VAL(f->header->seqnum_id),
+ le64toh(f->header->head_entry_seqnum),
+ le64toh(f->header->head_entry_realtime)) < 0)
+ return -ENOMEM;
+
+ /* Try to rename the file to the archived version. If the file already was deleted, we'll get ENOENT, let's
+ * ignore that case. */
+ if (rename(f->path, p) < 0 && errno != ENOENT)
+ return -errno;
+
+ /* Sync the rename to disk */
+ (void) fsync_directory_of_file(f->fd);
+
+ /* Set as archive so offlining commits w/state=STATE_ARCHIVED. Previously we would set old_file->header->state
+ * to STATE_ARCHIVED directly here, but journal_file_set_offline() short-circuits when state != STATE_ONLINE,
+ * which would result in the rotated journal never getting fsync() called before closing. Now we simply queue
+ * the archive state by setting an archive bit, leaving the state as STATE_ONLINE so proper offlining
+ * occurs. */
+ f->archive = true;
+
+ /* Currently, btrfs is not very good with out write patterns and fragments heavily. Let's defrag our journal
+ * files when we archive them */
+ f->defrag_on_close = true;
+
+ return 0;
+}
+
+JournalFile* journal_initiate_close(
+ JournalFile *f,
+ Set *deferred_closes) {
+
+ int r;
+
+ assert(f);
+
+ if (deferred_closes) {
+
+ r = set_put(deferred_closes, f);
+ if (r < 0)
+ log_debug_errno(r, "Failed to add file to deferred close set, closing immediately.");
+ else {
+ (void) journal_file_set_offline(f, false);
+ return NULL;
+ }
+ }
+
+ return journal_file_close(f);
+}
+
+int journal_file_rotate(
+ JournalFile **f,
+ bool compress,
+ uint64_t compress_threshold_bytes,
+ bool seal,
+ Set *deferred_closes) {
+
+ JournalFile *new_file = NULL;
+ int r;
+
+ assert(f);
+ assert(*f);
+
+ r = journal_file_archive(*f);
+ if (r < 0)
+ return r;
+
+ r = journal_file_open(
+ -1,
+ (*f)->path,
+ (*f)->flags,
+ (*f)->mode,
+ compress,
+ compress_threshold_bytes,
+ seal,
+ NULL, /* metrics */
+ (*f)->mmap,
+ deferred_closes,
+ *f, /* template */
+ &new_file);
+
+ journal_initiate_close(*f, deferred_closes);
+ *f = new_file;
+
+ return r;
+}
+
+int journal_file_dispose(int dir_fd, const char *fname) {
+ _cleanup_free_ char *p = NULL;
+ _cleanup_close_ int fd = -1;
+
+ assert(fname);
+
+ /* Renames a journal file to *.journal~, i.e. to mark it as corruped or otherwise uncleanly shutdown. Note that
+ * this is done without looking into the file or changing any of its contents. The idea is that this is called
+ * whenever something is suspicious and we want to move the file away and make clear that it is not accessed
+ * for writing anymore. */
+
+ if (!endswith(fname, ".journal"))
+ return -EINVAL;
+
+ if (asprintf(&p, "%.*s@%016" PRIx64 "-%016" PRIx64 ".journal~",
+ (int) strlen(fname) - 8, fname,
+ now(CLOCK_REALTIME),
+ random_u64()) < 0)
+ return -ENOMEM;
+
+ if (renameat(dir_fd, fname, dir_fd, p) < 0)
+ return -errno;
+
+ /* btrfs doesn't cope well with our write pattern and fragments heavily. Let's defrag all files we rotate */
+ fd = openat(dir_fd, p, O_RDONLY|O_CLOEXEC|O_NOCTTY|O_NOFOLLOW);
+ if (fd < 0)
+ log_debug_errno(errno, "Failed to open file for defragmentation/FS_NOCOW_FL, ignoring: %m");
+ else {
+ (void) chattr_fd(fd, 0, FS_NOCOW_FL, NULL);
+ (void) btrfs_defrag_fd(fd);
+ }
+
+ return 0;
+}
+
+int journal_file_open_reliably(
+ const char *fname,
+ int flags,
+ mode_t mode,
+ bool compress,
+ uint64_t compress_threshold_bytes,
+ bool seal,
+ JournalMetrics *metrics,
+ MMapCache *mmap_cache,
+ Set *deferred_closes,
+ JournalFile *template,
+ JournalFile **ret) {
+
+ int r;
+
+ r = journal_file_open(-1, fname, flags, mode, compress, compress_threshold_bytes, seal, metrics, mmap_cache,
+ deferred_closes, template, ret);
+ if (!IN_SET(r,
+ -EBADMSG, /* Corrupted */
+ -ENODATA, /* Truncated */
+ -EHOSTDOWN, /* Other machine */
+ -EPROTONOSUPPORT, /* Incompatible feature */
+ -EBUSY, /* Unclean shutdown */
+ -ESHUTDOWN, /* Already archived */
+ -EIO, /* IO error, including SIGBUS on mmap */
+ -EIDRM, /* File has been deleted */
+ -ETXTBSY)) /* File is from the future */
+ return r;
+
+ if ((flags & O_ACCMODE) == O_RDONLY)
+ return r;
+
+ if (!(flags & O_CREAT))
+ return r;
+
+ if (!endswith(fname, ".journal"))
+ return r;
+
+ /* The file is corrupted. Rotate it away and try it again (but only once) */
+ log_warning_errno(r, "File %s corrupted or uncleanly shut down, renaming and replacing.", fname);
+
+ r = journal_file_dispose(AT_FDCWD, fname);
+ if (r < 0)
+ return r;
+
+ return journal_file_open(-1, fname, flags, mode, compress, compress_threshold_bytes, seal, metrics, mmap_cache,
+ deferred_closes, template, ret);
+}
+
+int journal_file_copy_entry(JournalFile *from, JournalFile *to, Object *o, uint64_t p) {
+ uint64_t i, n;
+ uint64_t q, xor_hash = 0;
+ int r;
+ EntryItem *items;
+ dual_timestamp ts;
+ const sd_id128_t *boot_id;
+
+ assert(from);
+ assert(to);
+ assert(o);
+ assert(p);
+
+ if (!to->writable)
+ return -EPERM;
+
+ ts.monotonic = le64toh(o->entry.monotonic);
+ ts.realtime = le64toh(o->entry.realtime);
+ boot_id = &o->entry.boot_id;
+
+ n = journal_file_entry_n_items(o);
+ /* alloca() can't take 0, hence let's allocate at least one */
+ items = newa(EntryItem, MAX(1u, n));
+
+ for (i = 0; i < n; i++) {
+ uint64_t l, h;
+ le64_t le_hash;
+ size_t t;
+ void *data;
+ Object *u;
+
+ q = le64toh(o->entry.items[i].object_offset);
+ le_hash = o->entry.items[i].hash;
+
+ r = journal_file_move_to_object(from, OBJECT_DATA, q, &o);
+ if (r < 0)
+ return r;
+
+ if (le_hash != o->data.hash)
+ return -EBADMSG;
+
+ l = le64toh(o->object.size) - offsetof(Object, data.payload);
+ t = (size_t) l;
+
+ /* We hit the limit on 32bit machines */
+ if ((uint64_t) t != l)
+ return -E2BIG;
+
+ if (o->object.flags & OBJECT_COMPRESSION_MASK) {
+#if HAVE_XZ || HAVE_LZ4
+ size_t rsize = 0;
+
+ r = decompress_blob(o->object.flags & OBJECT_COMPRESSION_MASK,
+ o->data.payload, l, &from->compress_buffer, &from->compress_buffer_size, &rsize, 0);
+ if (r < 0)
+ return r;
+
+ data = from->compress_buffer;
+ l = rsize;
+#else
+ return -EPROTONOSUPPORT;
+#endif
+ } else
+ data = o->data.payload;
+
+ r = journal_file_append_data(to, data, l, &u, &h);
+ if (r < 0)
+ return r;
+
+ xor_hash ^= le64toh(u->data.hash);
+ items[i].object_offset = htole64(h);
+ items[i].hash = u->data.hash;
+
+ r = journal_file_move_to_object(from, OBJECT_ENTRY, p, &o);
+ if (r < 0)
+ return r;
+ }
+
+ r = journal_file_append_entry_internal(to, &ts, boot_id, xor_hash, items, n,
+ NULL, NULL, NULL);
+
+ if (mmap_cache_got_sigbus(to->mmap, to->cache_fd))
+ return -EIO;
+
+ return r;
+}
+
+void journal_reset_metrics(JournalMetrics *m) {
+ assert(m);
+
+ /* Set everything to "pick automatic values". */
+
+ *m = (JournalMetrics) {
+ .min_use = (uint64_t) -1,
+ .max_use = (uint64_t) -1,
+ .min_size = (uint64_t) -1,
+ .max_size = (uint64_t) -1,
+ .keep_free = (uint64_t) -1,
+ .n_max_files = (uint64_t) -1,
+ };
+}
+
+void journal_default_metrics(JournalMetrics *m, int fd) {
+ char a[FORMAT_BYTES_MAX], b[FORMAT_BYTES_MAX], c[FORMAT_BYTES_MAX], d[FORMAT_BYTES_MAX], e[FORMAT_BYTES_MAX];
+ struct statvfs ss;
+ uint64_t fs_size;
+
+ assert(m);
+ assert(fd >= 0);
+
+ if (fstatvfs(fd, &ss) >= 0)
+ fs_size = ss.f_frsize * ss.f_blocks;
+ else {
+ log_debug_errno(errno, "Failed to determine disk size: %m");
+ fs_size = 0;
+ }
+
+ if (m->max_use == (uint64_t) -1) {
+
+ if (fs_size > 0) {
+ m->max_use = PAGE_ALIGN(fs_size / 10); /* 10% of file system size */
+
+ if (m->max_use > DEFAULT_MAX_USE_UPPER)
+ m->max_use = DEFAULT_MAX_USE_UPPER;
+
+ if (m->max_use < DEFAULT_MAX_USE_LOWER)
+ m->max_use = DEFAULT_MAX_USE_LOWER;
+ } else
+ m->max_use = DEFAULT_MAX_USE_LOWER;
+ } else {
+ m->max_use = PAGE_ALIGN(m->max_use);
+
+ if (m->max_use != 0 && m->max_use < JOURNAL_FILE_SIZE_MIN*2)
+ m->max_use = JOURNAL_FILE_SIZE_MIN*2;
+ }
+
+ if (m->min_use == (uint64_t) -1)
+ m->min_use = DEFAULT_MIN_USE;
+
+ if (m->min_use > m->max_use)
+ m->min_use = m->max_use;
+
+ if (m->max_size == (uint64_t) -1) {
+ m->max_size = PAGE_ALIGN(m->max_use / 8); /* 8 chunks */
+
+ if (m->max_size > DEFAULT_MAX_SIZE_UPPER)
+ m->max_size = DEFAULT_MAX_SIZE_UPPER;
+ } else
+ m->max_size = PAGE_ALIGN(m->max_size);
+
+ if (m->max_size != 0) {
+ if (m->max_size < JOURNAL_FILE_SIZE_MIN)
+ m->max_size = JOURNAL_FILE_SIZE_MIN;
+
+ if (m->max_use != 0 && m->max_size*2 > m->max_use)
+ m->max_use = m->max_size*2;
+ }
+
+ if (m->min_size == (uint64_t) -1)
+ m->min_size = JOURNAL_FILE_SIZE_MIN;
+ else {
+ m->min_size = PAGE_ALIGN(m->min_size);
+
+ if (m->min_size < JOURNAL_FILE_SIZE_MIN)
+ m->min_size = JOURNAL_FILE_SIZE_MIN;
+
+ if (m->max_size != 0 && m->min_size > m->max_size)
+ m->max_size = m->min_size;
+ }
+
+ if (m->keep_free == (uint64_t) -1) {
+
+ if (fs_size > 0) {
+ m->keep_free = PAGE_ALIGN(fs_size * 3 / 20); /* 15% of file system size */
+
+ if (m->keep_free > DEFAULT_KEEP_FREE_UPPER)
+ m->keep_free = DEFAULT_KEEP_FREE_UPPER;
+
+ } else
+ m->keep_free = DEFAULT_KEEP_FREE;
+ }
+
+ if (m->n_max_files == (uint64_t) -1)
+ m->n_max_files = DEFAULT_N_MAX_FILES;
+
+ log_debug("Fixed min_use=%s max_use=%s max_size=%s min_size=%s keep_free=%s n_max_files=%" PRIu64,
+ format_bytes(a, sizeof(a), m->min_use),
+ format_bytes(b, sizeof(b), m->max_use),
+ format_bytes(c, sizeof(c), m->max_size),
+ format_bytes(d, sizeof(d), m->min_size),
+ format_bytes(e, sizeof(e), m->keep_free),
+ m->n_max_files);
+}
+
+int journal_file_get_cutoff_realtime_usec(JournalFile *f, usec_t *from, usec_t *to) {
+ assert(f);
+ assert(f->header);
+ assert(from || to);
+
+ if (from) {
+ if (f->header->head_entry_realtime == 0)
+ return -ENOENT;
+
+ *from = le64toh(f->header->head_entry_realtime);
+ }
+
+ if (to) {
+ if (f->header->tail_entry_realtime == 0)
+ return -ENOENT;
+
+ *to = le64toh(f->header->tail_entry_realtime);
+ }
+
+ return 1;
+}
+
+int journal_file_get_cutoff_monotonic_usec(JournalFile *f, sd_id128_t boot_id, usec_t *from, usec_t *to) {
+ Object *o;
+ uint64_t p;
+ int r;
+
+ assert(f);
+ assert(from || to);
+
+ r = find_data_object_by_boot_id(f, boot_id, &o, &p);
+ if (r <= 0)
+ return r;
+
+ if (le64toh(o->data.n_entries) <= 0)
+ return 0;
+
+ if (from) {
+ r = journal_file_move_to_object(f, OBJECT_ENTRY, le64toh(o->data.entry_offset), &o);
+ if (r < 0)
+ return r;
+
+ *from = le64toh(o->entry.monotonic);
+ }
+
+ if (to) {
+ r = journal_file_move_to_object(f, OBJECT_DATA, p, &o);
+ if (r < 0)
+ return r;
+
+ r = generic_array_get_plus_one(f,
+ le64toh(o->data.entry_offset),
+ le64toh(o->data.entry_array_offset),
+ le64toh(o->data.n_entries)-1,
+ &o, NULL);
+ if (r <= 0)
+ return r;
+
+ *to = le64toh(o->entry.monotonic);
+ }
+
+ return 1;
+}
+
+bool journal_file_rotate_suggested(JournalFile *f, usec_t max_file_usec) {
+ assert(f);
+ assert(f->header);
+
+ /* If we gained new header fields we gained new features,
+ * hence suggest a rotation */
+ if (le64toh(f->header->header_size) < sizeof(Header)) {
+ log_debug("%s uses an outdated header, suggesting rotation.", f->path);
+ return true;
+ }
+
+ /* Let's check if the hash tables grew over a certain fill
+ * level (75%, borrowing this value from Java's hash table
+ * implementation), and if so suggest a rotation. To calculate
+ * the fill level we need the n_data field, which only exists
+ * in newer versions. */
+
+ if (JOURNAL_HEADER_CONTAINS(f->header, n_data))
+ if (le64toh(f->header->n_data) * 4ULL > (le64toh(f->header->data_hash_table_size) / sizeof(HashItem)) * 3ULL) {
+ log_debug("Data hash table of %s has a fill level at %.1f (%"PRIu64" of %"PRIu64" items, %llu file size, %"PRIu64" bytes per hash table item), suggesting rotation.",
+ f->path,
+ 100.0 * (double) le64toh(f->header->n_data) / ((double) (le64toh(f->header->data_hash_table_size) / sizeof(HashItem))),
+ le64toh(f->header->n_data),
+ le64toh(f->header->data_hash_table_size) / sizeof(HashItem),
+ (unsigned long long) f->last_stat.st_size,
+ f->last_stat.st_size / le64toh(f->header->n_data));
+ return true;
+ }
+
+ if (JOURNAL_HEADER_CONTAINS(f->header, n_fields))
+ if (le64toh(f->header->n_fields) * 4ULL > (le64toh(f->header->field_hash_table_size) / sizeof(HashItem)) * 3ULL) {
+ log_debug("Field hash table of %s has a fill level at %.1f (%"PRIu64" of %"PRIu64" items), suggesting rotation.",
+ f->path,
+ 100.0 * (double) le64toh(f->header->n_fields) / ((double) (le64toh(f->header->field_hash_table_size) / sizeof(HashItem))),
+ le64toh(f->header->n_fields),
+ le64toh(f->header->field_hash_table_size) / sizeof(HashItem));
+ return true;
+ }
+
+ /* Are the data objects properly indexed by field objects? */
+ if (JOURNAL_HEADER_CONTAINS(f->header, n_data) &&
+ JOURNAL_HEADER_CONTAINS(f->header, n_fields) &&
+ le64toh(f->header->n_data) > 0 &&
+ le64toh(f->header->n_fields) == 0)
+ return true;
+
+ if (max_file_usec > 0) {
+ usec_t t, h;
+
+ h = le64toh(f->header->head_entry_realtime);
+ t = now(CLOCK_REALTIME);
+
+ if (h > 0 && t > h + max_file_usec)
+ return true;
+ }
+
+ return false;
+}
diff --git a/src/journal/journal-file.h b/src/journal/journal-file.h
new file mode 100644
index 0000000..29e324d
--- /dev/null
+++ b/src/journal/journal-file.h
@@ -0,0 +1,261 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+#pragma once
+
+#include <inttypes.h>
+
+#if HAVE_GCRYPT
+# include <gcrypt.h>
+#endif
+
+#include "sd-event.h"
+#include "sd-id128.h"
+
+#include "hashmap.h"
+#include "journal-def.h"
+#include "macro.h"
+#include "mmap-cache.h"
+#include "sparse-endian.h"
+
+typedef struct JournalMetrics {
+ /* For all these: -1 means "pick automatically", and 0 means "no limit enforced" */
+ uint64_t max_size; /* how large journal files grow at max */
+ uint64_t min_size; /* how large journal files grow at least */
+ uint64_t max_use; /* how much disk space to use in total at max, keep_free permitting */
+ uint64_t min_use; /* how much disk space to use in total at least, even if keep_free says not to */
+ uint64_t keep_free; /* how much to keep free on disk */
+ uint64_t n_max_files; /* how many files to keep around at max */
+} JournalMetrics;
+
+typedef enum direction {
+ DIRECTION_UP,
+ DIRECTION_DOWN
+} direction_t;
+
+typedef enum LocationType {
+ /* The first and last entries, resp. */
+ LOCATION_HEAD,
+ LOCATION_TAIL,
+
+ /* We already read the entry we currently point to, and the
+ * next one to read should probably not be this one again. */
+ LOCATION_DISCRETE,
+
+ /* We should seek to the precise location specified, and
+ * return it, as we haven't read it yet. */
+ LOCATION_SEEK
+} LocationType;
+
+typedef enum OfflineState {
+ OFFLINE_JOINED,
+ OFFLINE_SYNCING,
+ OFFLINE_OFFLINING,
+ OFFLINE_CANCEL,
+ OFFLINE_AGAIN_FROM_SYNCING,
+ OFFLINE_AGAIN_FROM_OFFLINING,
+ OFFLINE_DONE
+} OfflineState;
+
+typedef struct JournalFile {
+ int fd;
+ MMapFileDescriptor *cache_fd;
+
+ mode_t mode;
+
+ int flags;
+ int prot;
+ bool writable:1;
+ bool compress_xz:1;
+ bool compress_lz4:1;
+ bool seal:1;
+ bool defrag_on_close:1;
+ bool close_fd:1;
+ bool archive:1;
+
+ direction_t last_direction;
+ LocationType location_type;
+ uint64_t last_n_entries;
+
+ char *path;
+ struct stat last_stat;
+ usec_t last_stat_usec;
+
+ Header *header;
+ HashItem *data_hash_table;
+ HashItem *field_hash_table;
+
+ uint64_t current_offset;
+ uint64_t current_seqnum;
+ uint64_t current_realtime;
+ uint64_t current_monotonic;
+ sd_id128_t current_boot_id;
+ uint64_t current_xor_hash;
+
+ JournalMetrics metrics;
+ MMapCache *mmap;
+
+ sd_event_source *post_change_timer;
+ usec_t post_change_timer_period;
+
+ OrderedHashmap *chain_cache;
+
+ pthread_t offline_thread;
+ volatile OfflineState offline_state;
+
+ unsigned last_seen_generation;
+
+ uint64_t compress_threshold_bytes;
+#if HAVE_XZ || HAVE_LZ4
+ void *compress_buffer;
+ size_t compress_buffer_size;
+#endif
+
+#if HAVE_GCRYPT
+ gcry_md_hd_t hmac;
+ bool hmac_running;
+
+ FSSHeader *fss_file;
+ size_t fss_file_size;
+
+ uint64_t fss_start_usec;
+ uint64_t fss_interval_usec;
+
+ void *fsprg_state;
+ size_t fsprg_state_size;
+
+ void *fsprg_seed;
+ size_t fsprg_seed_size;
+#endif
+} JournalFile;
+
+int journal_file_open(
+ int fd,
+ const char *fname,
+ int flags,
+ mode_t mode,
+ bool compress,
+ uint64_t compress_threshold_bytes,
+ bool seal,
+ JournalMetrics *metrics,
+ MMapCache *mmap_cache,
+ Set *deferred_closes,
+ JournalFile *template,
+ JournalFile **ret);
+
+int journal_file_set_offline(JournalFile *f, bool wait);
+bool journal_file_is_offlining(JournalFile *f);
+JournalFile* journal_file_close(JournalFile *j);
+
+int journal_file_open_reliably(
+ const char *fname,
+ int flags,
+ mode_t mode,
+ bool compress,
+ uint64_t compress_threshold_bytes,
+ bool seal,
+ JournalMetrics *metrics,
+ MMapCache *mmap_cache,
+ Set *deferred_closes,
+ JournalFile *template,
+ JournalFile **ret);
+
+#define ALIGN64(x) (((x) + 7ULL) & ~7ULL)
+#define VALID64(x) (((x) & 7ULL) == 0ULL)
+
+/* Use six characters to cover the offsets common in smallish journal
+ * files without adding too many zeros. */
+#define OFSfmt "%06"PRIx64
+
+static inline bool VALID_REALTIME(uint64_t u) {
+ /* This considers timestamps until the year 3112 valid. That should be plenty room... */
+ return u > 0 && u < (1ULL << 55);
+}
+
+static inline bool VALID_MONOTONIC(uint64_t u) {
+ /* This considers timestamps until 1142 years of runtime valid. */
+ return u < (1ULL << 55);
+}
+
+static inline bool VALID_EPOCH(uint64_t u) {
+ /* This allows changing the key for 1142 years, every usec. */
+ return u < (1ULL << 55);
+}
+
+#define JOURNAL_HEADER_CONTAINS(h, field) \
+ (le64toh((h)->header_size) >= offsetof(Header, field) + sizeof((h)->field))
+
+#define JOURNAL_HEADER_SEALED(h) \
+ (!!(le32toh((h)->compatible_flags) & HEADER_COMPATIBLE_SEALED))
+
+#define JOURNAL_HEADER_COMPRESSED_XZ(h) \
+ (!!(le32toh((h)->incompatible_flags) & HEADER_INCOMPATIBLE_COMPRESSED_XZ))
+
+#define JOURNAL_HEADER_COMPRESSED_LZ4(h) \
+ (!!(le32toh((h)->incompatible_flags) & HEADER_INCOMPATIBLE_COMPRESSED_LZ4))
+
+int journal_file_move_to_object(JournalFile *f, ObjectType type, uint64_t offset, Object **ret);
+
+uint64_t journal_file_entry_n_items(Object *o) _pure_;
+uint64_t journal_file_entry_array_n_items(Object *o) _pure_;
+uint64_t journal_file_hash_table_n_items(Object *o) _pure_;
+
+int journal_file_append_object(JournalFile *f, ObjectType type, uint64_t size, Object **ret, uint64_t *offset);
+int journal_file_append_entry(
+ JournalFile *f,
+ const dual_timestamp *ts,
+ const sd_id128_t *boot_id,
+ const struct iovec iovec[], unsigned n_iovec,
+ uint64_t *seqno,
+ Object **ret,
+ uint64_t *offset);
+
+int journal_file_find_data_object(JournalFile *f, const void *data, uint64_t size, Object **ret, uint64_t *offset);
+int journal_file_find_data_object_with_hash(JournalFile *f, const void *data, uint64_t size, uint64_t hash, Object **ret, uint64_t *offset);
+
+int journal_file_find_field_object(JournalFile *f, const void *field, uint64_t size, Object **ret, uint64_t *offset);
+int journal_file_find_field_object_with_hash(JournalFile *f, const void *field, uint64_t size, uint64_t hash, Object **ret, uint64_t *offset);
+
+void journal_file_reset_location(JournalFile *f);
+void journal_file_save_location(JournalFile *f, Object *o, uint64_t offset);
+int journal_file_compare_locations(JournalFile *af, JournalFile *bf);
+int journal_file_next_entry(JournalFile *f, uint64_t p, direction_t direction, Object **ret, uint64_t *offset);
+
+int journal_file_next_entry_for_data(JournalFile *f, Object *o, uint64_t p, uint64_t data_offset, direction_t direction, Object **ret, uint64_t *offset);
+
+int journal_file_move_to_entry_by_seqnum(JournalFile *f, uint64_t seqnum, direction_t direction, Object **ret, uint64_t *offset);
+int journal_file_move_to_entry_by_realtime(JournalFile *f, uint64_t realtime, direction_t direction, Object **ret, uint64_t *offset);
+int journal_file_move_to_entry_by_monotonic(JournalFile *f, sd_id128_t boot_id, uint64_t monotonic, direction_t direction, Object **ret, uint64_t *offset);
+
+int journal_file_move_to_entry_by_offset_for_data(JournalFile *f, uint64_t data_offset, uint64_t p, direction_t direction, Object **ret, uint64_t *offset);
+int journal_file_move_to_entry_by_seqnum_for_data(JournalFile *f, uint64_t data_offset, uint64_t seqnum, direction_t direction, Object **ret, uint64_t *offset);
+int journal_file_move_to_entry_by_realtime_for_data(JournalFile *f, uint64_t data_offset, uint64_t realtime, direction_t direction, Object **ret, uint64_t *offset);
+int journal_file_move_to_entry_by_monotonic_for_data(JournalFile *f, uint64_t data_offset, sd_id128_t boot_id, uint64_t monotonic, direction_t direction, Object **ret, uint64_t *offset);
+
+int journal_file_copy_entry(JournalFile *from, JournalFile *to, Object *o, uint64_t p);
+
+void journal_file_dump(JournalFile *f);
+void journal_file_print_header(JournalFile *f);
+
+int journal_file_archive(JournalFile *f);
+JournalFile* journal_initiate_close(JournalFile *f, Set *deferred_closes);
+int journal_file_rotate(JournalFile **f, bool compress, uint64_t compress_threshold_bytes, bool seal, Set *deferred_closes);
+
+int journal_file_dispose(int dir_fd, const char *fname);
+
+void journal_file_post_change(JournalFile *f);
+int journal_file_enable_post_change_timer(JournalFile *f, sd_event *e, usec_t t);
+
+void journal_reset_metrics(JournalMetrics *m);
+void journal_default_metrics(JournalMetrics *m, int fd);
+
+int journal_file_get_cutoff_realtime_usec(JournalFile *f, usec_t *from, usec_t *to);
+int journal_file_get_cutoff_monotonic_usec(JournalFile *f, sd_id128_t boot, usec_t *from, usec_t *to);
+
+bool journal_file_rotate_suggested(JournalFile *f, usec_t max_file_usec);
+
+int journal_file_map_data_hash_table(JournalFile *f);
+int journal_file_map_field_hash_table(JournalFile *f);
+
+static inline bool JOURNAL_FILE_COMPRESS(JournalFile *f) {
+ assert(f);
+ return f->compress_xz || f->compress_lz4;
+}
diff --git a/src/journal/journal-internal.h b/src/journal/journal-internal.h
new file mode 100644
index 0000000..80a69da
--- /dev/null
+++ b/src/journal/journal-internal.h
@@ -0,0 +1,128 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+#pragma once
+
+#include <inttypes.h>
+#include <stdbool.h>
+#include <sys/types.h>
+
+#include "sd-id128.h"
+#include "sd-journal.h"
+
+#include "hashmap.h"
+#include "journal-def.h"
+#include "journal-file.h"
+#include "list.h"
+#include "set.h"
+
+typedef struct Match Match;
+typedef struct Location Location;
+typedef struct Directory Directory;
+
+typedef enum MatchType {
+ MATCH_DISCRETE,
+ MATCH_OR_TERM,
+ MATCH_AND_TERM
+} MatchType;
+
+struct Match {
+ MatchType type;
+ Match *parent;
+ LIST_FIELDS(Match, matches);
+
+ /* For concrete matches */
+ char *data;
+ size_t size;
+ le64_t le_hash;
+
+ /* For terms */
+ LIST_HEAD(Match, matches);
+};
+
+struct Location {
+ LocationType type;
+
+ bool seqnum_set;
+ bool realtime_set;
+ bool monotonic_set;
+ bool xor_hash_set;
+
+ uint64_t seqnum;
+ sd_id128_t seqnum_id;
+
+ uint64_t realtime;
+
+ uint64_t monotonic;
+ sd_id128_t boot_id;
+
+ uint64_t xor_hash;
+};
+
+struct Directory {
+ char *path;
+ int wd;
+ bool is_root;
+ unsigned last_seen_generation;
+};
+
+struct sd_journal {
+ int toplevel_fd;
+
+ char *path;
+ char *prefix;
+
+ OrderedHashmap *files;
+ IteratedCache *files_cache;
+ MMapCache *mmap;
+
+ Location current_location;
+
+ JournalFile *current_file;
+ uint64_t current_field;
+
+ Match *level0, *level1, *level2;
+
+ pid_t original_pid;
+
+ int inotify_fd;
+ unsigned current_invalidate_counter, last_invalidate_counter;
+ usec_t last_process_usec;
+ unsigned generation;
+
+ /* Iterating through unique fields and their data values */
+ char *unique_field;
+ JournalFile *unique_file;
+ uint64_t unique_offset;
+
+ /* Iterating through known fields */
+ JournalFile *fields_file;
+ uint64_t fields_offset;
+ uint64_t fields_hash_table_index;
+ char *fields_buffer;
+ size_t fields_buffer_allocated;
+
+ int flags;
+
+ bool on_network:1;
+ bool no_new_files:1;
+ bool no_inotify:1;
+ bool unique_file_lost:1; /* File we were iterating over got
+ removed, and there were no more
+ files, so sd_j_enumerate_unique
+ will return a value equal to 0. */
+ bool fields_file_lost:1;
+ bool has_runtime_files:1;
+ bool has_persistent_files:1;
+
+ size_t data_threshold;
+
+ Hashmap *directories_by_path;
+ Hashmap *directories_by_wd;
+
+ Hashmap *errors;
+};
+
+char *journal_make_match_string(sd_journal *j);
+void journal_print_header(sd_journal *j);
+
+#define JOURNAL_FOREACH_DATA_RETVAL(j, data, l, retval) \
+ for (sd_journal_restart_data(j); ((retval) = sd_journal_enumerate_data((j), &(data), &(l))) > 0; )
diff --git a/src/journal/journal-qrcode.c b/src/journal/journal-qrcode.c
new file mode 100644
index 0000000..35d6ec4
--- /dev/null
+++ b/src/journal/journal-qrcode.c
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+
+#include <errno.h>
+#include <qrencode.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdio_ext.h>
+#include <stdlib.h>
+
+#include "journal-qrcode.h"
+#include "macro.h"
+
+#define WHITE_ON_BLACK "\033[40;37;1m"
+#define NORMAL "\033[0m"
+
+static void print_border(FILE *output, unsigned width) {
+ unsigned x, y;
+
+ /* Four rows of border */
+ for (y = 0; y < 4; y += 2) {
+ fputs(WHITE_ON_BLACK, output);
+
+ for (x = 0; x < 4 + width + 4; x++)
+ fputs("\342\226\210", output);
+
+ fputs(NORMAL "\n", output);
+ }
+}
+
+int print_qr_code(
+ FILE *output,
+ const void *seed,
+ size_t seed_size,
+ uint64_t start,
+ uint64_t interval,
+ const char *hn,
+ sd_id128_t machine) {
+
+ FILE *f;
+ char *url = NULL;
+ size_t url_size = 0, i;
+ QRcode* qr;
+ unsigned x, y;
+
+ assert(seed);
+ assert(seed_size > 0);
+
+ f = open_memstream(&url, &url_size);
+ if (!f)
+ return -ENOMEM;
+
+ (void) __fsetlocking(f, FSETLOCKING_BYCALLER);
+
+ fputs("fss://", f);
+
+ for (i = 0; i < seed_size; i++) {
+ if (i > 0 && i % 3 == 0)
+ fputc('-', f);
+ fprintf(f, "%02x", ((uint8_t*) seed)[i]);
+ }
+
+ fprintf(f, "/%"PRIx64"-%"PRIx64"?machine=" SD_ID128_FORMAT_STR,
+ start,
+ interval,
+ SD_ID128_FORMAT_VAL(machine));
+
+ if (hn)
+ fprintf(f, ";hostname=%s", hn);
+
+ if (ferror(f)) {
+ fclose(f);
+ free(url);
+ return -ENOMEM;
+ }
+
+ fclose(f);
+
+ qr = QRcode_encodeString(url, 0, QR_ECLEVEL_L, QR_MODE_8, 1);
+ free(url);
+
+ if (!qr)
+ return -ENOMEM;
+
+ print_border(output, qr->width);
+
+ for (y = 0; y < (unsigned) qr->width; y += 2) {
+ const uint8_t *row1, *row2;
+
+ row1 = qr->data + qr->width * y;
+ row2 = row1 + qr->width;
+
+ fputs(WHITE_ON_BLACK, output);
+ for (x = 0; x < 4; x++)
+ fputs("\342\226\210", output);
+
+ for (x = 0; x < (unsigned) qr->width; x ++) {
+ bool a, b;
+
+ a = row1[x] & 1;
+ b = (y+1) < (unsigned) qr->width ? (row2[x] & 1) : false;
+
+ if (a && b)
+ fputc(' ', output);
+ else if (a)
+ fputs("\342\226\204", output);
+ else if (b)
+ fputs("\342\226\200", output);
+ else
+ fputs("\342\226\210", output);
+ }
+
+ for (x = 0; x < 4; x++)
+ fputs("\342\226\210", output);
+ fputs(NORMAL "\n", output);
+ }
+
+ print_border(output, qr->width);
+
+ QRcode_free(qr);
+ return 0;
+}
diff --git a/src/journal/journal-qrcode.h b/src/journal/journal-qrcode.h
new file mode 100644
index 0000000..0774608
--- /dev/null
+++ b/src/journal/journal-qrcode.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+#pragma once
+
+#include <inttypes.h>
+#include <stdio.h>
+
+#include "sd-id128.h"
+
+int print_qr_code(FILE *f, const void *seed, size_t seed_size, uint64_t start, uint64_t interval, const char *hn, sd_id128_t machine);
diff --git a/src/journal/journal-send.c b/src/journal/journal-send.c
new file mode 100644
index 0000000..8618454
--- /dev/null
+++ b/src/journal/journal-send.c
@@ -0,0 +1,543 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+
+#include <errno.h>
+#include <fcntl.h>
+#include <printf.h>
+#include <stddef.h>
+#include <sys/socket.h>
+#include <sys/un.h>
+#include <unistd.h>
+
+#define SD_JOURNAL_SUPPRESS_LOCATION
+
+#include "sd-journal.h"
+
+#include "alloc-util.h"
+#include "fd-util.h"
+#include "io-util.h"
+#include "memfd-util.h"
+#include "socket-util.h"
+#include "stdio-util.h"
+#include "string-util.h"
+#include "tmpfile-util.h"
+#include "util.h"
+
+#define SNDBUF_SIZE (8*1024*1024)
+
+#define ALLOCA_CODE_FUNC(f, func) \
+ do { \
+ size_t _fl; \
+ const char *_func = (func); \
+ char **_f = &(f); \
+ _fl = strlen(_func) + 1; \
+ *_f = newa(char, _fl + 10); \
+ memcpy(*_f, "CODE_FUNC=", 10); \
+ memcpy(*_f + 10, _func, _fl); \
+ } while (false)
+
+/* We open a single fd, and we'll share it with the current process,
+ * all its threads, and all its subprocesses. This means we need to
+ * initialize it atomically, and need to operate on it atomically
+ * never assuming we are the only user */
+
+static int journal_fd(void) {
+ int fd;
+ static int fd_plus_one = 0;
+
+retry:
+ if (fd_plus_one > 0)
+ return fd_plus_one - 1;
+
+ fd = socket(AF_UNIX, SOCK_DGRAM|SOCK_CLOEXEC, 0);
+ if (fd < 0)
+ return -errno;
+
+ fd_inc_sndbuf(fd, SNDBUF_SIZE);
+
+ if (!__sync_bool_compare_and_swap(&fd_plus_one, 0, fd+1)) {
+ safe_close(fd);
+ goto retry;
+ }
+
+ return fd;
+}
+
+_public_ int sd_journal_print(int priority, const char *format, ...) {
+ int r;
+ va_list ap;
+
+ va_start(ap, format);
+ r = sd_journal_printv(priority, format, ap);
+ va_end(ap);
+
+ return r;
+}
+
+_public_ int sd_journal_printv(int priority, const char *format, va_list ap) {
+
+ /* FIXME: Instead of limiting things to LINE_MAX we could do a
+ C99 variable-length array on the stack here in a loop. */
+
+ char buffer[8 + LINE_MAX], p[STRLEN("PRIORITY=") + DECIMAL_STR_MAX(int) + 1];
+ struct iovec iov[2];
+
+ assert_return(priority >= 0, -EINVAL);
+ assert_return(priority <= 7, -EINVAL);
+ assert_return(format, -EINVAL);
+
+ xsprintf(p, "PRIORITY=%i", priority & LOG_PRIMASK);
+
+ memcpy(buffer, "MESSAGE=", 8);
+ vsnprintf(buffer+8, sizeof(buffer) - 8, format, ap);
+
+ /* Strip trailing whitespace, keep prefix whitespace. */
+ (void) strstrip(buffer);
+
+ /* Suppress empty lines */
+ if (isempty(buffer+8))
+ return 0;
+
+ iov[0] = IOVEC_MAKE_STRING(buffer);
+ iov[1] = IOVEC_MAKE_STRING(p);
+
+ return sd_journal_sendv(iov, 2);
+}
+
+_printf_(1, 0) static int fill_iovec_sprintf(const char *format, va_list ap, int extra, struct iovec **_iov) {
+ PROTECT_ERRNO;
+ int r, n = 0, i = 0, j;
+ struct iovec *iov = NULL;
+
+ assert(_iov);
+
+ if (extra > 0) {
+ n = MAX(extra * 2, extra + 4);
+ iov = malloc0(n * sizeof(struct iovec));
+ if (!iov) {
+ r = -ENOMEM;
+ goto fail;
+ }
+
+ i = extra;
+ }
+
+ while (format) {
+ struct iovec *c;
+ char *buffer;
+ va_list aq;
+
+ if (i >= n) {
+ n = MAX(i*2, 4);
+ c = realloc(iov, n * sizeof(struct iovec));
+ if (!c) {
+ r = -ENOMEM;
+ goto fail;
+ }
+
+ iov = c;
+ }
+
+ va_copy(aq, ap);
+ if (vasprintf(&buffer, format, aq) < 0) {
+ va_end(aq);
+ r = -ENOMEM;
+ goto fail;
+ }
+ va_end(aq);
+
+ VA_FORMAT_ADVANCE(format, ap);
+
+ (void) strstrip(buffer); /* strip trailing whitespace, keep prefixing whitespace */
+
+ iov[i++] = IOVEC_MAKE_STRING(buffer);
+
+ format = va_arg(ap, char *);
+ }
+
+ *_iov = iov;
+
+ return i;
+
+fail:
+ for (j = 0; j < i; j++)
+ free(iov[j].iov_base);
+
+ free(iov);
+
+ return r;
+}
+
+_public_ int sd_journal_send(const char *format, ...) {
+ int r, i, j;
+ va_list ap;
+ struct iovec *iov = NULL;
+
+ va_start(ap, format);
+ i = fill_iovec_sprintf(format, ap, 0, &iov);
+ va_end(ap);
+
+ if (_unlikely_(i < 0)) {
+ r = i;
+ goto finish;
+ }
+
+ r = sd_journal_sendv(iov, i);
+
+finish:
+ for (j = 0; j < i; j++)
+ free(iov[j].iov_base);
+
+ free(iov);
+
+ return r;
+}
+
+_public_ int sd_journal_sendv(const struct iovec *iov, int n) {
+ PROTECT_ERRNO;
+ int fd, r;
+ _cleanup_close_ int buffer_fd = -1;
+ struct iovec *w;
+ uint64_t *l;
+ int i, j = 0;
+ static const union sockaddr_union sa = {
+ .un.sun_family = AF_UNIX,
+ .un.sun_path = "/run/systemd/journal/socket",
+ };
+ struct msghdr mh = {
+ .msg_name = (struct sockaddr*) &sa.sa,
+ .msg_namelen = SOCKADDR_UN_LEN(sa.un),
+ };
+ ssize_t k;
+ bool have_syslog_identifier = false;
+ bool seal = true;
+
+ assert_return(iov, -EINVAL);
+ assert_return(n > 0, -EINVAL);
+
+ w = newa(struct iovec, n * 5 + 3);
+ l = newa(uint64_t, n);
+
+ for (i = 0; i < n; i++) {
+ char *c, *nl;
+
+ if (_unlikely_(!iov[i].iov_base || iov[i].iov_len <= 1))
+ return -EINVAL;
+
+ c = memchr(iov[i].iov_base, '=', iov[i].iov_len);
+ if (_unlikely_(!c || c == iov[i].iov_base))
+ return -EINVAL;
+
+ have_syslog_identifier = have_syslog_identifier ||
+ (c == (char *) iov[i].iov_base + 17 &&
+ startswith(iov[i].iov_base, "SYSLOG_IDENTIFIER"));
+
+ nl = memchr(iov[i].iov_base, '\n', iov[i].iov_len);
+ if (nl) {
+ if (_unlikely_(nl < c))
+ return -EINVAL;
+
+ /* Already includes a newline? Bummer, then
+ * let's write the variable name, then a
+ * newline, then the size (64bit LE), followed
+ * by the data and a final newline */
+
+ w[j++] = IOVEC_MAKE(iov[i].iov_base, c - (char*) iov[i].iov_base);
+ w[j++] = IOVEC_MAKE_STRING("\n");
+
+ l[i] = htole64(iov[i].iov_len - (c - (char*) iov[i].iov_base) - 1);
+ w[j++] = IOVEC_MAKE(&l[i], sizeof(uint64_t));
+
+ w[j++] = IOVEC_MAKE(c + 1, iov[i].iov_len - (c - (char*) iov[i].iov_base) - 1);
+ } else
+ /* Nothing special? Then just add the line and
+ * append a newline */
+ w[j++] = iov[i];
+
+ w[j++] = IOVEC_MAKE_STRING("\n");
+ }
+
+ if (!have_syslog_identifier &&
+ string_is_safe(program_invocation_short_name)) {
+
+ /* Implicitly add program_invocation_short_name, if it
+ * is not set explicitly. We only do this for
+ * program_invocation_short_name, and nothing else
+ * since everything else is much nicer to retrieve
+ * from the outside. */
+
+ w[j++] = IOVEC_MAKE_STRING("SYSLOG_IDENTIFIER=");
+ w[j++] = IOVEC_MAKE_STRING(program_invocation_short_name);
+ w[j++] = IOVEC_MAKE_STRING("\n");
+ }
+
+ fd = journal_fd();
+ if (_unlikely_(fd < 0))
+ return fd;
+
+ mh.msg_iov = w;
+ mh.msg_iovlen = j;
+
+ k = sendmsg(fd, &mh, MSG_NOSIGNAL);
+ if (k >= 0)
+ return 0;
+
+ /* Fail silently if the journal is not available */
+ if (errno == ENOENT)
+ return 0;
+
+ if (!IN_SET(errno, EMSGSIZE, ENOBUFS))
+ return -errno;
+
+ /* Message doesn't fit... Let's dump the data in a memfd or
+ * temporary file and just pass a file descriptor of it to the
+ * other side.
+ *
+ * For the temporary files we use /dev/shm instead of /tmp
+ * here, since we want this to be a tmpfs, and one that is
+ * available from early boot on and where unprivileged users
+ * can create files. */
+ buffer_fd = memfd_new(NULL);
+ if (buffer_fd < 0) {
+ if (buffer_fd == -ENOSYS) {
+ buffer_fd = open_tmpfile_unlinkable("/dev/shm", O_RDWR | O_CLOEXEC);
+ if (buffer_fd < 0)
+ return buffer_fd;
+
+ seal = false;
+ } else
+ return buffer_fd;
+ }
+
+ n = writev(buffer_fd, w, j);
+ if (n < 0)
+ return -errno;
+
+ if (seal) {
+ r = memfd_set_sealed(buffer_fd);
+ if (r < 0)
+ return r;
+ }
+
+ r = send_one_fd_sa(fd, buffer_fd, mh.msg_name, mh.msg_namelen, 0);
+ if (r == -ENOENT)
+ /* Fail silently if the journal is not available */
+ return 0;
+ return r;
+}
+
+static int fill_iovec_perror_and_send(const char *message, int skip, struct iovec iov[]) {
+ PROTECT_ERRNO;
+ size_t n, k;
+
+ k = isempty(message) ? 0 : strlen(message) + 2;
+ n = 8 + k + 256 + 1;
+
+ for (;;) {
+ char buffer[n];
+ char* j;
+
+ errno = 0;
+ j = strerror_r(_saved_errno_, buffer + 8 + k, n - 8 - k);
+ if (errno == 0) {
+ char error[STRLEN("ERRNO=") + DECIMAL_STR_MAX(int) + 1];
+
+ if (j != buffer + 8 + k)
+ memmove(buffer + 8 + k, j, strlen(j)+1);
+
+ memcpy(buffer, "MESSAGE=", 8);
+
+ if (k > 0) {
+ memcpy(buffer + 8, message, k - 2);
+ memcpy(buffer + 8 + k - 2, ": ", 2);
+ }
+
+ xsprintf(error, "ERRNO=%i", _saved_errno_);
+
+ assert_cc(3 == LOG_ERR);
+ iov[skip+0] = IOVEC_MAKE_STRING("PRIORITY=3");
+ iov[skip+1] = IOVEC_MAKE_STRING(buffer);
+ iov[skip+2] = IOVEC_MAKE_STRING(error);
+
+ return sd_journal_sendv(iov, skip + 3);
+ }
+
+ if (errno != ERANGE)
+ return -errno;
+
+ n *= 2;
+ }
+}
+
+_public_ int sd_journal_perror(const char *message) {
+ struct iovec iovec[3];
+
+ return fill_iovec_perror_and_send(message, 0, iovec);
+}
+
+_public_ int sd_journal_stream_fd(const char *identifier, int priority, int level_prefix) {
+ static const union sockaddr_union sa = {
+ .un.sun_family = AF_UNIX,
+ .un.sun_path = "/run/systemd/journal/stdout",
+ };
+ _cleanup_close_ int fd = -1;
+ char *header;
+ size_t l;
+ int r;
+
+ assert_return(priority >= 0, -EINVAL);
+ assert_return(priority <= 7, -EINVAL);
+
+ fd = socket(AF_UNIX, SOCK_STREAM|SOCK_CLOEXEC, 0);
+ if (fd < 0)
+ return -errno;
+
+ r = connect(fd, &sa.sa, SOCKADDR_UN_LEN(sa.un));
+ if (r < 0)
+ return -errno;
+
+ if (shutdown(fd, SHUT_RD) < 0)
+ return -errno;
+
+ (void) fd_inc_sndbuf(fd, SNDBUF_SIZE);
+
+ identifier = strempty(identifier);
+
+ l = strlen(identifier);
+ header = newa(char, l + 1 + 1 + 2 + 2 + 2 + 2 + 2);
+
+ memcpy(header, identifier, l);
+ header[l++] = '\n';
+ header[l++] = '\n'; /* unit id */
+ header[l++] = '0' + priority;
+ header[l++] = '\n';
+ header[l++] = '0' + !!level_prefix;
+ header[l++] = '\n';
+ header[l++] = '0';
+ header[l++] = '\n';
+ header[l++] = '0';
+ header[l++] = '\n';
+ header[l++] = '0';
+ header[l++] = '\n';
+
+ r = loop_write(fd, header, l, false);
+ if (r < 0)
+ return r;
+
+ return TAKE_FD(fd);
+}
+
+_public_ int sd_journal_print_with_location(int priority, const char *file, const char *line, const char *func, const char *format, ...) {
+ int r;
+ va_list ap;
+
+ va_start(ap, format);
+ r = sd_journal_printv_with_location(priority, file, line, func, format, ap);
+ va_end(ap);
+
+ return r;
+}
+
+_public_ int sd_journal_printv_with_location(int priority, const char *file, const char *line, const char *func, const char *format, va_list ap) {
+ char buffer[8 + LINE_MAX], p[STRLEN("PRIORITY=") + DECIMAL_STR_MAX(int) + 1];
+ struct iovec iov[5];
+ char *f;
+
+ assert_return(priority >= 0, -EINVAL);
+ assert_return(priority <= 7, -EINVAL);
+ assert_return(format, -EINVAL);
+
+ xsprintf(p, "PRIORITY=%i", priority & LOG_PRIMASK);
+
+ memcpy(buffer, "MESSAGE=", 8);
+ vsnprintf(buffer+8, sizeof(buffer) - 8, format, ap);
+
+ /* Strip trailing whitespace, keep prefixing whitespace */
+ (void) strstrip(buffer);
+
+ /* Suppress empty lines */
+ if (isempty(buffer+8))
+ return 0;
+
+ /* func is initialized from __func__ which is not a macro, but
+ * a static const char[], hence cannot easily be prefixed with
+ * CODE_FUNC=, hence let's do it manually here. */
+ ALLOCA_CODE_FUNC(f, func);
+
+ iov[0] = IOVEC_MAKE_STRING(buffer);
+ iov[1] = IOVEC_MAKE_STRING(p);
+ iov[2] = IOVEC_MAKE_STRING(file);
+ iov[3] = IOVEC_MAKE_STRING(line);
+ iov[4] = IOVEC_MAKE_STRING(f);
+
+ return sd_journal_sendv(iov, ELEMENTSOF(iov));
+}
+
+_public_ int sd_journal_send_with_location(const char *file, const char *line, const char *func, const char *format, ...) {
+ _cleanup_free_ struct iovec *iov = NULL;
+ int r, i, j;
+ va_list ap;
+ char *f;
+
+ va_start(ap, format);
+ i = fill_iovec_sprintf(format, ap, 3, &iov);
+ va_end(ap);
+
+ if (_unlikely_(i < 0)) {
+ r = i;
+ goto finish;
+ }
+
+ ALLOCA_CODE_FUNC(f, func);
+
+ iov[0] = IOVEC_MAKE_STRING(file);
+ iov[1] = IOVEC_MAKE_STRING(line);
+ iov[2] = IOVEC_MAKE_STRING(f);
+
+ r = sd_journal_sendv(iov, i);
+
+finish:
+ for (j = 3; j < i; j++)
+ free(iov[j].iov_base);
+
+ return r;
+}
+
+_public_ int sd_journal_sendv_with_location(
+ const char *file, const char *line,
+ const char *func,
+ const struct iovec *iov, int n) {
+
+ struct iovec *niov;
+ char *f;
+
+ assert_return(iov, -EINVAL);
+ assert_return(n > 0, -EINVAL);
+
+ niov = newa(struct iovec, n + 3);
+ memcpy(niov, iov, sizeof(struct iovec) * n);
+
+ ALLOCA_CODE_FUNC(f, func);
+
+ niov[n++] = IOVEC_MAKE_STRING(file);
+ niov[n++] = IOVEC_MAKE_STRING(line);
+ niov[n++] = IOVEC_MAKE_STRING(f);
+
+ return sd_journal_sendv(niov, n);
+}
+
+_public_ int sd_journal_perror_with_location(
+ const char *file, const char *line,
+ const char *func,
+ const char *message) {
+
+ struct iovec iov[6];
+ char *f;
+
+ ALLOCA_CODE_FUNC(f, func);
+
+ iov[0] = IOVEC_MAKE_STRING(file);
+ iov[1] = IOVEC_MAKE_STRING(line);
+ iov[2] = IOVEC_MAKE_STRING(f);
+
+ return fill_iovec_perror_and_send(message, 3, iov);
+}
diff --git a/src/journal/journal-vacuum.c b/src/journal/journal-vacuum.c
new file mode 100644
index 0000000..2778ce4
--- /dev/null
+++ b/src/journal/journal-vacuum.c
@@ -0,0 +1,321 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+
+#include <fcntl.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+#include "sd-id128.h"
+
+#include "alloc-util.h"
+#include "dirent-util.h"
+#include "fd-util.h"
+#include "fs-util.h"
+#include "journal-def.h"
+#include "journal-file.h"
+#include "journal-vacuum.h"
+#include "parse-util.h"
+#include "string-util.h"
+#include "time-util.h"
+#include "util.h"
+#include "xattr-util.h"
+
+struct vacuum_info {
+ uint64_t usage;
+ char *filename;
+
+ uint64_t realtime;
+
+ sd_id128_t seqnum_id;
+ uint64_t seqnum;
+ bool have_seqnum;
+};
+
+static int vacuum_compare(const struct vacuum_info *a, const struct vacuum_info *b) {
+ int r;
+
+ if (a->have_seqnum && b->have_seqnum &&
+ sd_id128_equal(a->seqnum_id, b->seqnum_id))
+ return CMP(a->seqnum, b->seqnum);
+
+ r = CMP(a->realtime, b->realtime);
+ if (r != 0)
+ return r;
+
+ if (a->have_seqnum && b->have_seqnum)
+ return memcmp(&a->seqnum_id, &b->seqnum_id, 16);
+
+ return strcmp(a->filename, b->filename);
+}
+
+static void patch_realtime(
+ int fd,
+ const char *fn,
+ const struct stat *st,
+ unsigned long long *realtime) {
+
+ usec_t x, crtime = 0;
+
+ /* The timestamp was determined by the file name, but let's
+ * see if the file might actually be older than the file name
+ * suggested... */
+
+ assert(fd >= 0);
+ assert(fn);
+ assert(st);
+ assert(realtime);
+
+ x = timespec_load(&st->st_ctim);
+ if (x > 0 && x != USEC_INFINITY && x < *realtime)
+ *realtime = x;
+
+ x = timespec_load(&st->st_atim);
+ if (x > 0 && x != USEC_INFINITY && x < *realtime)
+ *realtime = x;
+
+ x = timespec_load(&st->st_mtim);
+ if (x > 0 && x != USEC_INFINITY && x < *realtime)
+ *realtime = x;
+
+ /* Let's read the original creation time, if possible. Ideally
+ * we'd just query the creation time the FS might provide, but
+ * unfortunately there's currently no sane API to query
+ * it. Hence let's implement this manually... */
+
+ if (fd_getcrtime_at(fd, fn, &crtime, 0) >= 0) {
+ if (crtime < *realtime)
+ *realtime = crtime;
+ }
+}
+
+static int journal_file_empty(int dir_fd, const char *name) {
+ _cleanup_close_ int fd;
+ struct stat st;
+ le64_t n_entries;
+ ssize_t n;
+
+ fd = openat(dir_fd, name, O_RDONLY|O_CLOEXEC|O_NOFOLLOW|O_NONBLOCK|O_NOATIME);
+ if (fd < 0) {
+ /* Maybe failed due to O_NOATIME and lack of privileges? */
+ fd = openat(dir_fd, name, O_RDONLY|O_CLOEXEC|O_NOFOLLOW|O_NONBLOCK);
+ if (fd < 0)
+ return -errno;
+ }
+
+ if (fstat(fd, &st) < 0)
+ return -errno;
+
+ /* If an offline file doesn't even have a header we consider it empty */
+ if (st.st_size < (off_t) sizeof(Header))
+ return 1;
+
+ /* If the number of entries is empty, we consider it empty, too */
+ n = pread(fd, &n_entries, sizeof(n_entries), offsetof(Header, n_entries));
+ if (n < 0)
+ return -errno;
+ if (n != sizeof(n_entries))
+ return -EIO;
+
+ return le64toh(n_entries) <= 0;
+}
+
+int journal_directory_vacuum(
+ const char *directory,
+ uint64_t max_use,
+ uint64_t n_max_files,
+ usec_t max_retention_usec,
+ usec_t *oldest_usec,
+ bool verbose) {
+
+ uint64_t sum = 0, freed = 0, n_active_files = 0;
+ size_t n_list = 0, n_allocated = 0, i;
+ _cleanup_closedir_ DIR *d = NULL;
+ struct vacuum_info *list = NULL;
+ usec_t retention_limit = 0;
+ char sbytes[FORMAT_BYTES_MAX];
+ struct dirent *de;
+ int r;
+
+ assert(directory);
+
+ if (max_use <= 0 && max_retention_usec <= 0 && n_max_files <= 0)
+ return 0;
+
+ if (max_retention_usec > 0)
+ retention_limit = usec_sub_unsigned(now(CLOCK_REALTIME), max_retention_usec);
+
+ d = opendir(directory);
+ if (!d)
+ return -errno;
+
+ FOREACH_DIRENT_ALL(de, d, r = -errno; goto finish) {
+
+ unsigned long long seqnum = 0, realtime;
+ _cleanup_free_ char *p = NULL;
+ sd_id128_t seqnum_id;
+ bool have_seqnum;
+ uint64_t size;
+ struct stat st;
+ size_t q;
+
+ if (fstatat(dirfd(d), de->d_name, &st, AT_SYMLINK_NOFOLLOW) < 0) {
+ log_debug_errno(errno, "Failed to stat file %s while vacuuming, ignoring: %m", de->d_name);
+ continue;
+ }
+
+ if (!S_ISREG(st.st_mode))
+ continue;
+
+ q = strlen(de->d_name);
+
+ if (endswith(de->d_name, ".journal")) {
+
+ /* Vacuum archived files. Active files are
+ * left around */
+
+ if (q < 1 + 32 + 1 + 16 + 1 + 16 + 8) {
+ n_active_files++;
+ continue;
+ }
+
+ if (de->d_name[q-8-16-1] != '-' ||
+ de->d_name[q-8-16-1-16-1] != '-' ||
+ de->d_name[q-8-16-1-16-1-32-1] != '@') {
+ n_active_files++;
+ continue;
+ }
+
+ p = strdup(de->d_name);
+ if (!p) {
+ r = -ENOMEM;
+ goto finish;
+ }
+
+ de->d_name[q-8-16-1-16-1] = 0;
+ if (sd_id128_from_string(de->d_name + q-8-16-1-16-1-32, &seqnum_id) < 0) {
+ n_active_files++;
+ continue;
+ }
+
+ if (sscanf(de->d_name + q-8-16-1-16, "%16llx-%16llx.journal", &seqnum, &realtime) != 2) {
+ n_active_files++;
+ continue;
+ }
+
+ have_seqnum = true;
+
+ } else if (endswith(de->d_name, ".journal~")) {
+ unsigned long long tmp;
+
+ /* Vacuum corrupted files */
+
+ if (q < 1 + 16 + 1 + 16 + 8 + 1) {
+ n_active_files++;
+ continue;
+ }
+
+ if (de->d_name[q-1-8-16-1] != '-' ||
+ de->d_name[q-1-8-16-1-16-1] != '@') {
+ n_active_files++;
+ continue;
+ }
+
+ p = strdup(de->d_name);
+ if (!p) {
+ r = -ENOMEM;
+ goto finish;
+ }
+
+ if (sscanf(de->d_name + q-1-8-16-1-16, "%16llx-%16llx.journal~", &realtime, &tmp) != 2) {
+ n_active_files++;
+ continue;
+ }
+
+ have_seqnum = false;
+ } else {
+ /* We do not vacuum unknown files! */
+ log_debug("Not vacuuming unknown file %s.", de->d_name);
+ continue;
+ }
+
+ size = 512UL * (uint64_t) st.st_blocks;
+
+ r = journal_file_empty(dirfd(d), p);
+ if (r < 0) {
+ log_debug_errno(r, "Failed check if %s is empty, ignoring: %m", p);
+ continue;
+ }
+ if (r > 0) {
+ /* Always vacuum empty non-online files. */
+
+ r = unlinkat_deallocate(dirfd(d), p, 0);
+ if (r >= 0) {
+
+ log_full(verbose ? LOG_INFO : LOG_DEBUG,
+ "Deleted empty archived journal %s/%s (%s).", directory, p, format_bytes(sbytes, sizeof(sbytes), size));
+
+ freed += size;
+ } else if (r != -ENOENT)
+ log_warning_errno(r, "Failed to delete empty archived journal %s/%s: %m", directory, p);
+
+ continue;
+ }
+
+ patch_realtime(dirfd(d), p, &st, &realtime);
+
+ if (!GREEDY_REALLOC(list, n_allocated, n_list + 1)) {
+ r = -ENOMEM;
+ goto finish;
+ }
+
+ list[n_list++] = (struct vacuum_info) {
+ .filename = TAKE_PTR(p),
+ .usage = size,
+ .seqnum = seqnum,
+ .realtime = realtime,
+ .seqnum_id = seqnum_id,
+ .have_seqnum = have_seqnum,
+ };
+
+ sum += size;
+ }
+
+ typesafe_qsort(list, n_list, vacuum_compare);
+
+ for (i = 0; i < n_list; i++) {
+ uint64_t left;
+
+ left = n_active_files + n_list - i;
+
+ if ((max_retention_usec <= 0 || list[i].realtime >= retention_limit) &&
+ (max_use <= 0 || sum <= max_use) &&
+ (n_max_files <= 0 || left <= n_max_files))
+ break;
+
+ r = unlinkat_deallocate(dirfd(d), list[i].filename, 0);
+ if (r >= 0) {
+ log_full(verbose ? LOG_INFO : LOG_DEBUG, "Deleted archived journal %s/%s (%s).", directory, list[i].filename, format_bytes(sbytes, sizeof(sbytes), list[i].usage));
+ freed += list[i].usage;
+
+ if (list[i].usage < sum)
+ sum -= list[i].usage;
+ else
+ sum = 0;
+
+ } else if (r != -ENOENT)
+ log_warning_errno(r, "Failed to delete archived journal %s/%s: %m", directory, list[i].filename);
+ }
+
+ if (oldest_usec && i < n_list && (*oldest_usec == 0 || list[i].realtime < *oldest_usec))
+ *oldest_usec = list[i].realtime;
+
+ r = 0;
+
+finish:
+ for (i = 0; i < n_list; i++)
+ free(list[i].filename);
+ free(list);
+
+ log_full(verbose ? LOG_INFO : LOG_DEBUG, "Vacuuming done, freed %s of archived journals from %s.", format_bytes(sbytes, sizeof(sbytes), freed), directory);
+
+ return r;
+}
diff --git a/src/journal/journal-vacuum.h b/src/journal/journal-vacuum.h
new file mode 100644
index 0000000..0b336ac
--- /dev/null
+++ b/src/journal/journal-vacuum.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+#pragma once
+
+#include <inttypes.h>
+#include <stdbool.h>
+
+#include "time-util.h"
+
+int journal_directory_vacuum(const char *directory, uint64_t max_use, uint64_t n_max_files, usec_t max_retention_usec, usec_t *oldest_usec, bool verbose);
diff --git a/src/journal/journal-verify.c b/src/journal/journal-verify.c
new file mode 100644
index 0000000..5eff80a
--- /dev/null
+++ b/src/journal/journal-verify.c
@@ -0,0 +1,1320 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+
+#include <fcntl.h>
+#include <stddef.h>
+#include <sys/mman.h>
+#include <unistd.h>
+
+#include "alloc-util.h"
+#include "compress.h"
+#include "fd-util.h"
+#include "fileio.h"
+#include "fs-util.h"
+#include "journal-authenticate.h"
+#include "journal-def.h"
+#include "journal-file.h"
+#include "journal-verify.h"
+#include "lookup3.h"
+#include "macro.h"
+#include "terminal-util.h"
+#include "tmpfile-util.h"
+#include "util.h"
+
+static void draw_progress(uint64_t p, usec_t *last_usec) {
+ unsigned n, i, j, k;
+ usec_t z, x;
+
+ if (!on_tty())
+ return;
+
+ z = now(CLOCK_MONOTONIC);
+ x = *last_usec;
+
+ if (x != 0 && x + 40 * USEC_PER_MSEC > z)
+ return;
+
+ *last_usec = z;
+
+ n = (3 * columns()) / 4;
+ j = (n * (unsigned) p) / 65535ULL;
+ k = n - j;
+
+ fputs("\r", stdout);
+ if (colors_enabled())
+ fputs("\x1B[?25l" ANSI_HIGHLIGHT_GREEN, stdout);
+
+ for (i = 0; i < j; i++)
+ fputs("\xe2\x96\x88", stdout);
+
+ fputs(ANSI_NORMAL, stdout);
+
+ for (i = 0; i < k; i++)
+ fputs("\xe2\x96\x91", stdout);
+
+ printf(" %3"PRIu64"%%", 100U * p / 65535U);
+
+ fputs("\r", stdout);
+ if (colors_enabled())
+ fputs("\x1B[?25h", stdout);
+
+ fflush(stdout);
+}
+
+static uint64_t scale_progress(uint64_t scale, uint64_t p, uint64_t m) {
+ /* Calculates scale * p / m, but handles m == 0 safely, and saturates.
+ * Currently all callers use m >= 1, but we keep the check to be defensive.
+ */
+
+ if (p >= m || m == 0) /* lgtm [cpp/constant-comparison] */
+ return scale;
+
+ return scale * p / m;
+}
+
+static void flush_progress(void) {
+ unsigned n, i;
+
+ if (!on_tty())
+ return;
+
+ n = (3 * columns()) / 4;
+
+ putchar('\r');
+
+ for (i = 0; i < n + 5; i++)
+ putchar(' ');
+
+ putchar('\r');
+ fflush(stdout);
+}
+
+#define debug(_offset, _fmt, ...) do { \
+ flush_progress(); \
+ log_debug(OFSfmt": " _fmt, _offset, ##__VA_ARGS__); \
+ } while (0)
+
+#define warning(_offset, _fmt, ...) do { \
+ flush_progress(); \
+ log_warning(OFSfmt": " _fmt, _offset, ##__VA_ARGS__); \
+ } while (0)
+
+#define error(_offset, _fmt, ...) do { \
+ flush_progress(); \
+ log_error(OFSfmt": " _fmt, (uint64_t)_offset, ##__VA_ARGS__); \
+ } while (0)
+
+#define error_errno(_offset, error, _fmt, ...) do { \
+ flush_progress(); \
+ log_error_errno(error, OFSfmt": " _fmt, (uint64_t)_offset, ##__VA_ARGS__); \
+ } while (0)
+
+static int journal_file_object_verify(JournalFile *f, uint64_t offset, Object *o) {
+ uint64_t i;
+
+ assert(f);
+ assert(offset);
+ assert(o);
+
+ /* This does various superficial tests about the length an
+ * possible field values. It does not follow any references to
+ * other objects. */
+
+ if ((o->object.flags & OBJECT_COMPRESSED_XZ) &&
+ o->object.type != OBJECT_DATA) {
+ error(offset, "Found compressed object that isn't of type DATA, which is not allowed.");
+ return -EBADMSG;
+ }
+
+ switch (o->object.type) {
+
+ case OBJECT_DATA: {
+ uint64_t h1, h2;
+ int compression, r;
+
+ if (le64toh(o->data.entry_offset) == 0)
+ warning(offset, "Unused data (entry_offset==0)");
+
+ if ((le64toh(o->data.entry_offset) == 0) ^ (le64toh(o->data.n_entries) == 0)) {
+ error(offset, "Bad n_entries: %"PRIu64, le64toh(o->data.n_entries));
+ return -EBADMSG;
+ }
+
+ if (le64toh(o->object.size) - offsetof(DataObject, payload) <= 0) {
+ error(offset, "Bad object size (<= %zu): %"PRIu64,
+ offsetof(DataObject, payload),
+ le64toh(o->object.size));
+ return -EBADMSG;
+ }
+
+ h1 = le64toh(o->data.hash);
+
+ compression = o->object.flags & OBJECT_COMPRESSION_MASK;
+ if (compression) {
+ _cleanup_free_ void *b = NULL;
+ size_t alloc = 0, b_size;
+
+ r = decompress_blob(compression,
+ o->data.payload,
+ le64toh(o->object.size) - offsetof(Object, data.payload),
+ &b, &alloc, &b_size, 0);
+ if (r < 0) {
+ error_errno(offset, r, "%s decompression failed: %m",
+ object_compressed_to_string(compression));
+ return r;
+ }
+
+ h2 = hash64(b, b_size);
+ } else
+ h2 = hash64(o->data.payload, le64toh(o->object.size) - offsetof(Object, data.payload));
+
+ if (h1 != h2) {
+ error(offset, "Invalid hash (%08"PRIx64" vs. %08"PRIx64, h1, h2);
+ return -EBADMSG;
+ }
+
+ if (!VALID64(le64toh(o->data.next_hash_offset)) ||
+ !VALID64(le64toh(o->data.next_field_offset)) ||
+ !VALID64(le64toh(o->data.entry_offset)) ||
+ !VALID64(le64toh(o->data.entry_array_offset))) {
+ error(offset, "Invalid offset (next_hash_offset="OFSfmt", next_field_offset="OFSfmt", entry_offset="OFSfmt", entry_array_offset="OFSfmt,
+ le64toh(o->data.next_hash_offset),
+ le64toh(o->data.next_field_offset),
+ le64toh(o->data.entry_offset),
+ le64toh(o->data.entry_array_offset));
+ return -EBADMSG;
+ }
+
+ break;
+ }
+
+ case OBJECT_FIELD:
+ if (le64toh(o->object.size) - offsetof(FieldObject, payload) <= 0) {
+ error(offset,
+ "Bad field size (<= %zu): %"PRIu64,
+ offsetof(FieldObject, payload),
+ le64toh(o->object.size));
+ return -EBADMSG;
+ }
+
+ if (!VALID64(le64toh(o->field.next_hash_offset)) ||
+ !VALID64(le64toh(o->field.head_data_offset))) {
+ error(offset,
+ "Invalid offset (next_hash_offset="OFSfmt", head_data_offset="OFSfmt,
+ le64toh(o->field.next_hash_offset),
+ le64toh(o->field.head_data_offset));
+ return -EBADMSG;
+ }
+ break;
+
+ case OBJECT_ENTRY:
+ if ((le64toh(o->object.size) - offsetof(EntryObject, items)) % sizeof(EntryItem) != 0) {
+ error(offset,
+ "Bad entry size (<= %zu): %"PRIu64,
+ offsetof(EntryObject, items),
+ le64toh(o->object.size));
+ return -EBADMSG;
+ }
+
+ if ((le64toh(o->object.size) - offsetof(EntryObject, items)) / sizeof(EntryItem) <= 0) {
+ error(offset,
+ "Invalid number items in entry: %"PRIu64,
+ (le64toh(o->object.size) - offsetof(EntryObject, items)) / sizeof(EntryItem));
+ return -EBADMSG;
+ }
+
+ if (le64toh(o->entry.seqnum) <= 0) {
+ error(offset,
+ "Invalid entry seqnum: %"PRIx64,
+ le64toh(o->entry.seqnum));
+ return -EBADMSG;
+ }
+
+ if (!VALID_REALTIME(le64toh(o->entry.realtime))) {
+ error(offset,
+ "Invalid entry realtime timestamp: %"PRIu64,
+ le64toh(o->entry.realtime));
+ return -EBADMSG;
+ }
+
+ if (!VALID_MONOTONIC(le64toh(o->entry.monotonic))) {
+ error(offset,
+ "Invalid entry monotonic timestamp: %"PRIu64,
+ le64toh(o->entry.monotonic));
+ return -EBADMSG;
+ }
+
+ for (i = 0; i < journal_file_entry_n_items(o); i++) {
+ if (le64toh(o->entry.items[i].object_offset) == 0 ||
+ !VALID64(le64toh(o->entry.items[i].object_offset))) {
+ error(offset,
+ "Invalid entry item (%"PRIu64"/%"PRIu64" offset: "OFSfmt,
+ i, journal_file_entry_n_items(o),
+ le64toh(o->entry.items[i].object_offset));
+ return -EBADMSG;
+ }
+ }
+
+ break;
+
+ case OBJECT_DATA_HASH_TABLE:
+ case OBJECT_FIELD_HASH_TABLE:
+ if ((le64toh(o->object.size) - offsetof(HashTableObject, items)) % sizeof(HashItem) != 0 ||
+ (le64toh(o->object.size) - offsetof(HashTableObject, items)) / sizeof(HashItem) <= 0) {
+ error(offset,
+ "Invalid %s hash table size: %"PRIu64,
+ o->object.type == OBJECT_DATA_HASH_TABLE ? "data" : "field",
+ le64toh(o->object.size));
+ return -EBADMSG;
+ }
+
+ for (i = 0; i < journal_file_hash_table_n_items(o); i++) {
+ if (o->hash_table.items[i].head_hash_offset != 0 &&
+ !VALID64(le64toh(o->hash_table.items[i].head_hash_offset))) {
+ error(offset,
+ "Invalid %s hash table item (%"PRIu64"/%"PRIu64") head_hash_offset: "OFSfmt,
+ o->object.type == OBJECT_DATA_HASH_TABLE ? "data" : "field",
+ i, journal_file_hash_table_n_items(o),
+ le64toh(o->hash_table.items[i].head_hash_offset));
+ return -EBADMSG;
+ }
+ if (o->hash_table.items[i].tail_hash_offset != 0 &&
+ !VALID64(le64toh(o->hash_table.items[i].tail_hash_offset))) {
+ error(offset,
+ "Invalid %s hash table item (%"PRIu64"/%"PRIu64") tail_hash_offset: "OFSfmt,
+ o->object.type == OBJECT_DATA_HASH_TABLE ? "data" : "field",
+ i, journal_file_hash_table_n_items(o),
+ le64toh(o->hash_table.items[i].tail_hash_offset));
+ return -EBADMSG;
+ }
+
+ if ((o->hash_table.items[i].head_hash_offset != 0) !=
+ (o->hash_table.items[i].tail_hash_offset != 0)) {
+ error(offset,
+ "Invalid %s hash table item (%"PRIu64"/%"PRIu64"): head_hash_offset="OFSfmt" tail_hash_offset="OFSfmt,
+ o->object.type == OBJECT_DATA_HASH_TABLE ? "data" : "field",
+ i, journal_file_hash_table_n_items(o),
+ le64toh(o->hash_table.items[i].head_hash_offset),
+ le64toh(o->hash_table.items[i].tail_hash_offset));
+ return -EBADMSG;
+ }
+ }
+
+ break;
+
+ case OBJECT_ENTRY_ARRAY:
+ if ((le64toh(o->object.size) - offsetof(EntryArrayObject, items)) % sizeof(le64_t) != 0 ||
+ (le64toh(o->object.size) - offsetof(EntryArrayObject, items)) / sizeof(le64_t) <= 0) {
+ error(offset,
+ "Invalid object entry array size: %"PRIu64,
+ le64toh(o->object.size));
+ return -EBADMSG;
+ }
+
+ if (!VALID64(le64toh(o->entry_array.next_entry_array_offset))) {
+ error(offset,
+ "Invalid object entry array next_entry_array_offset: "OFSfmt,
+ le64toh(o->entry_array.next_entry_array_offset));
+ return -EBADMSG;
+ }
+
+ for (i = 0; i < journal_file_entry_array_n_items(o); i++)
+ if (le64toh(o->entry_array.items[i]) != 0 &&
+ !VALID64(le64toh(o->entry_array.items[i]))) {
+ error(offset,
+ "Invalid object entry array item (%"PRIu64"/%"PRIu64"): "OFSfmt,
+ i, journal_file_entry_array_n_items(o),
+ le64toh(o->entry_array.items[i]));
+ return -EBADMSG;
+ }
+
+ break;
+
+ case OBJECT_TAG:
+ if (le64toh(o->object.size) != sizeof(TagObject)) {
+ error(offset,
+ "Invalid object tag size: %"PRIu64,
+ le64toh(o->object.size));
+ return -EBADMSG;
+ }
+
+ if (!VALID_EPOCH(le64toh(o->tag.epoch))) {
+ error(offset,
+ "Invalid object tag epoch: %"PRIu64,
+ le64toh(o->tag.epoch));
+ return -EBADMSG;
+ }
+
+ break;
+ }
+
+ return 0;
+}
+
+static int write_uint64(int fd, uint64_t p) {
+ ssize_t k;
+
+ k = write(fd, &p, sizeof(p));
+ if (k < 0)
+ return -errno;
+ if (k != sizeof(p))
+ return -EIO;
+
+ return 0;
+}
+
+static int contains_uint64(MMapCache *m, MMapFileDescriptor *f, uint64_t n, uint64_t p) {
+ uint64_t a, b;
+ int r;
+
+ assert(m);
+ assert(f);
+
+ /* Bisection ... */
+
+ a = 0; b = n;
+ while (a < b) {
+ uint64_t c, *z;
+
+ c = (a + b) / 2;
+
+ r = mmap_cache_get(m, f, PROT_READ|PROT_WRITE, 0, false, c * sizeof(uint64_t), sizeof(uint64_t), NULL, (void **) &z, NULL);
+ if (r < 0)
+ return r;
+
+ if (*z == p)
+ return 1;
+
+ if (a + 1 >= b)
+ return 0;
+
+ if (p < *z)
+ b = c;
+ else
+ a = c;
+ }
+
+ return 0;
+}
+
+static int entry_points_to_data(
+ JournalFile *f,
+ MMapFileDescriptor *cache_entry_fd,
+ uint64_t n_entries,
+ uint64_t entry_p,
+ uint64_t data_p) {
+
+ int r;
+ uint64_t i, n, a;
+ Object *o;
+ bool found = false;
+
+ assert(f);
+ assert(cache_entry_fd);
+
+ if (!contains_uint64(f->mmap, cache_entry_fd, n_entries, entry_p)) {
+ error(data_p, "Data object references invalid entry at "OFSfmt, entry_p);
+ return -EBADMSG;
+ }
+
+ r = journal_file_move_to_object(f, OBJECT_ENTRY, entry_p, &o);
+ if (r < 0)
+ return r;
+
+ n = journal_file_entry_n_items(o);
+ for (i = 0; i < n; i++)
+ if (le64toh(o->entry.items[i].object_offset) == data_p) {
+ found = true;
+ break;
+ }
+
+ if (!found) {
+ error(entry_p, "Data object at "OFSfmt" not referenced by linked entry", data_p);
+ return -EBADMSG;
+ }
+
+ /* Check if this entry is also in main entry array. Since the
+ * main entry array has already been verified we can rely on
+ * its consistency. */
+
+ i = 0;
+ n = le64toh(f->header->n_entries);
+ a = le64toh(f->header->entry_array_offset);
+
+ while (i < n) {
+ uint64_t m, u;
+
+ r = journal_file_move_to_object(f, OBJECT_ENTRY_ARRAY, a, &o);
+ if (r < 0)
+ return r;
+
+ m = journal_file_entry_array_n_items(o);
+ u = MIN(n - i, m);
+
+ if (entry_p <= le64toh(o->entry_array.items[u-1])) {
+ uint64_t x, y, z;
+
+ x = 0;
+ y = u;
+
+ while (x < y) {
+ z = (x + y) / 2;
+
+ if (le64toh(o->entry_array.items[z]) == entry_p)
+ return 0;
+
+ if (x + 1 >= y)
+ break;
+
+ if (entry_p < le64toh(o->entry_array.items[z]))
+ y = z;
+ else
+ x = z;
+ }
+
+ error(entry_p, "Entry object doesn't exist in main entry array");
+ return -EBADMSG;
+ }
+
+ i += u;
+ a = le64toh(o->entry_array.next_entry_array_offset);
+ }
+
+ return 0;
+}
+
+static int verify_data(
+ JournalFile *f,
+ Object *o, uint64_t p,
+ MMapFileDescriptor *cache_entry_fd, uint64_t n_entries,
+ MMapFileDescriptor *cache_entry_array_fd, uint64_t n_entry_arrays) {
+
+ uint64_t i, n, a, last, q;
+ int r;
+
+ assert(f);
+ assert(o);
+ assert(cache_entry_fd);
+ assert(cache_entry_array_fd);
+
+ n = le64toh(o->data.n_entries);
+ a = le64toh(o->data.entry_array_offset);
+
+ /* Entry array means at least two objects */
+ if (a && n < 2) {
+ error(p, "Entry array present (entry_array_offset="OFSfmt", but n_entries=%"PRIu64")", a, n);
+ return -EBADMSG;
+ }
+
+ if (n == 0)
+ return 0;
+
+ /* We already checked that earlier */
+ assert(o->data.entry_offset);
+
+ last = q = le64toh(o->data.entry_offset);
+ r = entry_points_to_data(f, cache_entry_fd, n_entries, q, p);
+ if (r < 0)
+ return r;
+
+ i = 1;
+ while (i < n) {
+ uint64_t next, m, j;
+
+ if (a == 0) {
+ error(p, "Array chain too short");
+ return -EBADMSG;
+ }
+
+ if (!contains_uint64(f->mmap, cache_entry_array_fd, n_entry_arrays, a)) {
+ error(p, "Invalid array offset "OFSfmt, a);
+ return -EBADMSG;
+ }
+
+ r = journal_file_move_to_object(f, OBJECT_ENTRY_ARRAY, a, &o);
+ if (r < 0)
+ return r;
+
+ next = le64toh(o->entry_array.next_entry_array_offset);
+ if (next != 0 && next <= a) {
+ error(p, "Array chain has cycle (jumps back from "OFSfmt" to "OFSfmt")", a, next);
+ return -EBADMSG;
+ }
+
+ m = journal_file_entry_array_n_items(o);
+ for (j = 0; i < n && j < m; i++, j++) {
+
+ q = le64toh(o->entry_array.items[j]);
+ if (q <= last) {
+ error(p, "Data object's entry array not sorted");
+ return -EBADMSG;
+ }
+ last = q;
+
+ r = entry_points_to_data(f, cache_entry_fd, n_entries, q, p);
+ if (r < 0)
+ return r;
+
+ /* Pointer might have moved, reposition */
+ r = journal_file_move_to_object(f, OBJECT_ENTRY_ARRAY, a, &o);
+ if (r < 0)
+ return r;
+ }
+
+ a = next;
+ }
+
+ return 0;
+}
+
+static int verify_hash_table(
+ JournalFile *f,
+ MMapFileDescriptor *cache_data_fd, uint64_t n_data,
+ MMapFileDescriptor *cache_entry_fd, uint64_t n_entries,
+ MMapFileDescriptor *cache_entry_array_fd, uint64_t n_entry_arrays,
+ usec_t *last_usec,
+ bool show_progress) {
+
+ uint64_t i, n;
+ int r;
+
+ assert(f);
+ assert(cache_data_fd);
+ assert(cache_entry_fd);
+ assert(cache_entry_array_fd);
+ assert(last_usec);
+
+ n = le64toh(f->header->data_hash_table_size) / sizeof(HashItem);
+ if (n <= 0)
+ return 0;
+
+ r = journal_file_map_data_hash_table(f);
+ if (r < 0)
+ return log_error_errno(r, "Failed to map data hash table: %m");
+
+ for (i = 0; i < n; i++) {
+ uint64_t last = 0, p;
+
+ if (show_progress)
+ draw_progress(0xC000 + scale_progress(0x3FFF, i, n), last_usec);
+
+ p = le64toh(f->data_hash_table[i].head_hash_offset);
+ while (p != 0) {
+ Object *o;
+ uint64_t next;
+
+ if (!contains_uint64(f->mmap, cache_data_fd, n_data, p)) {
+ error(p, "Invalid data object at hash entry %"PRIu64" of %"PRIu64, i, n);
+ return -EBADMSG;
+ }
+
+ r = journal_file_move_to_object(f, OBJECT_DATA, p, &o);
+ if (r < 0)
+ return r;
+
+ next = le64toh(o->data.next_hash_offset);
+ if (next != 0 && next <= p) {
+ error(p, "Hash chain has a cycle in hash entry %"PRIu64" of %"PRIu64, i, n);
+ return -EBADMSG;
+ }
+
+ if (le64toh(o->data.hash) % n != i) {
+ error(p, "Hash value mismatch in hash entry %"PRIu64" of %"PRIu64, i, n);
+ return -EBADMSG;
+ }
+
+ r = verify_data(f, o, p, cache_entry_fd, n_entries, cache_entry_array_fd, n_entry_arrays);
+ if (r < 0)
+ return r;
+
+ last = p;
+ p = next;
+ }
+
+ if (last != le64toh(f->data_hash_table[i].tail_hash_offset)) {
+ error(p, "Tail hash pointer mismatch in hash table");
+ return -EBADMSG;
+ }
+ }
+
+ return 0;
+}
+
+static int data_object_in_hash_table(JournalFile *f, uint64_t hash, uint64_t p) {
+ uint64_t n, h, q;
+ int r;
+ assert(f);
+
+ n = le64toh(f->header->data_hash_table_size) / sizeof(HashItem);
+ if (n <= 0)
+ return 0;
+
+ r = journal_file_map_data_hash_table(f);
+ if (r < 0)
+ return log_error_errno(r, "Failed to map data hash table: %m");
+
+ h = hash % n;
+
+ q = le64toh(f->data_hash_table[h].head_hash_offset);
+ while (q != 0) {
+ Object *o;
+
+ if (p == q)
+ return 1;
+
+ r = journal_file_move_to_object(f, OBJECT_DATA, q, &o);
+ if (r < 0)
+ return r;
+
+ q = le64toh(o->data.next_hash_offset);
+ }
+
+ return 0;
+}
+
+static int verify_entry(
+ JournalFile *f,
+ Object *o, uint64_t p,
+ MMapFileDescriptor *cache_data_fd, uint64_t n_data) {
+
+ uint64_t i, n;
+ int r;
+
+ assert(f);
+ assert(o);
+ assert(cache_data_fd);
+
+ n = journal_file_entry_n_items(o);
+ for (i = 0; i < n; i++) {
+ uint64_t q, h;
+ Object *u;
+
+ q = le64toh(o->entry.items[i].object_offset);
+ h = le64toh(o->entry.items[i].hash);
+
+ if (!contains_uint64(f->mmap, cache_data_fd, n_data, q)) {
+ error(p, "Invalid data object of entry");
+ return -EBADMSG;
+ }
+
+ r = journal_file_move_to_object(f, OBJECT_DATA, q, &u);
+ if (r < 0)
+ return r;
+
+ if (le64toh(u->data.hash) != h) {
+ error(p, "Hash mismatch for data object of entry");
+ return -EBADMSG;
+ }
+
+ r = data_object_in_hash_table(f, h, q);
+ if (r < 0)
+ return r;
+ if (r == 0) {
+ error(p, "Data object missing from hash table");
+ return -EBADMSG;
+ }
+ }
+
+ return 0;
+}
+
+static int verify_entry_array(
+ JournalFile *f,
+ MMapFileDescriptor *cache_data_fd, uint64_t n_data,
+ MMapFileDescriptor *cache_entry_fd, uint64_t n_entries,
+ MMapFileDescriptor *cache_entry_array_fd, uint64_t n_entry_arrays,
+ usec_t *last_usec,
+ bool show_progress) {
+
+ uint64_t i = 0, a, n, last = 0;
+ int r;
+
+ assert(f);
+ assert(cache_data_fd);
+ assert(cache_entry_fd);
+ assert(cache_entry_array_fd);
+ assert(last_usec);
+
+ n = le64toh(f->header->n_entries);
+ a = le64toh(f->header->entry_array_offset);
+ while (i < n) {
+ uint64_t next, m, j;
+ Object *o;
+
+ if (show_progress)
+ draw_progress(0x8000 + scale_progress(0x3FFF, i, n), last_usec);
+
+ if (a == 0) {
+ error(a, "Array chain too short at %"PRIu64" of %"PRIu64, i, n);
+ return -EBADMSG;
+ }
+
+ if (!contains_uint64(f->mmap, cache_entry_array_fd, n_entry_arrays, a)) {
+ error(a, "Invalid array %"PRIu64" of %"PRIu64, i, n);
+ return -EBADMSG;
+ }
+
+ r = journal_file_move_to_object(f, OBJECT_ENTRY_ARRAY, a, &o);
+ if (r < 0)
+ return r;
+
+ next = le64toh(o->entry_array.next_entry_array_offset);
+ if (next != 0 && next <= a) {
+ error(a, "Array chain has cycle at %"PRIu64" of %"PRIu64" (jumps back from to "OFSfmt")", i, n, next);
+ return -EBADMSG;
+ }
+
+ m = journal_file_entry_array_n_items(o);
+ for (j = 0; i < n && j < m; i++, j++) {
+ uint64_t p;
+
+ p = le64toh(o->entry_array.items[j]);
+ if (p <= last) {
+ error(a, "Entry array not sorted at %"PRIu64" of %"PRIu64, i, n);
+ return -EBADMSG;
+ }
+ last = p;
+
+ if (!contains_uint64(f->mmap, cache_entry_fd, n_entries, p)) {
+ error(a, "Invalid array entry at %"PRIu64" of %"PRIu64, i, n);
+ return -EBADMSG;
+ }
+
+ r = journal_file_move_to_object(f, OBJECT_ENTRY, p, &o);
+ if (r < 0)
+ return r;
+
+ r = verify_entry(f, o, p, cache_data_fd, n_data);
+ if (r < 0)
+ return r;
+
+ /* Pointer might have moved, reposition */
+ r = journal_file_move_to_object(f, OBJECT_ENTRY_ARRAY, a, &o);
+ if (r < 0)
+ return r;
+ }
+
+ a = next;
+ }
+
+ return 0;
+}
+
+int journal_file_verify(
+ JournalFile *f,
+ const char *key,
+ usec_t *first_contained, usec_t *last_validated, usec_t *last_contained,
+ bool show_progress) {
+ int r;
+ Object *o;
+ uint64_t p = 0, last_epoch = 0, last_tag_realtime = 0, last_sealed_realtime = 0;
+
+ uint64_t entry_seqnum = 0, entry_monotonic = 0, entry_realtime = 0;
+ sd_id128_t entry_boot_id;
+ bool entry_seqnum_set = false, entry_monotonic_set = false, entry_realtime_set = false, found_main_entry_array = false;
+ uint64_t n_weird = 0, n_objects = 0, n_entries = 0, n_data = 0, n_fields = 0, n_data_hash_tables = 0, n_field_hash_tables = 0, n_entry_arrays = 0, n_tags = 0;
+ usec_t last_usec = 0;
+ int data_fd = -1, entry_fd = -1, entry_array_fd = -1;
+ MMapFileDescriptor *cache_data_fd = NULL, *cache_entry_fd = NULL, *cache_entry_array_fd = NULL;
+ unsigned i;
+ bool found_last = false;
+ const char *tmp_dir = NULL;
+
+#if HAVE_GCRYPT
+ uint64_t last_tag = 0;
+#endif
+ assert(f);
+
+ if (key) {
+#if HAVE_GCRYPT
+ r = journal_file_parse_verification_key(f, key);
+ if (r < 0) {
+ log_error("Failed to parse seed.");
+ return r;
+ }
+#else
+ return -EOPNOTSUPP;
+#endif
+ } else if (f->seal)
+ return -ENOKEY;
+
+ r = var_tmp_dir(&tmp_dir);
+ if (r < 0) {
+ log_error_errno(r, "Failed to determine temporary directory: %m");
+ goto fail;
+ }
+
+ data_fd = open_tmpfile_unlinkable(tmp_dir, O_RDWR | O_CLOEXEC);
+ if (data_fd < 0) {
+ r = log_error_errno(data_fd, "Failed to create data file: %m");
+ goto fail;
+ }
+
+ entry_fd = open_tmpfile_unlinkable(tmp_dir, O_RDWR | O_CLOEXEC);
+ if (entry_fd < 0) {
+ r = log_error_errno(entry_fd, "Failed to create entry file: %m");
+ goto fail;
+ }
+
+ entry_array_fd = open_tmpfile_unlinkable(tmp_dir, O_RDWR | O_CLOEXEC);
+ if (entry_array_fd < 0) {
+ r = log_error_errno(entry_array_fd,
+ "Failed to create entry array file: %m");
+ goto fail;
+ }
+
+ cache_data_fd = mmap_cache_add_fd(f->mmap, data_fd);
+ if (!cache_data_fd) {
+ r = log_oom();
+ goto fail;
+ }
+
+ cache_entry_fd = mmap_cache_add_fd(f->mmap, entry_fd);
+ if (!cache_entry_fd) {
+ r = log_oom();
+ goto fail;
+ }
+
+ cache_entry_array_fd = mmap_cache_add_fd(f->mmap, entry_array_fd);
+ if (!cache_entry_array_fd) {
+ r = log_oom();
+ goto fail;
+ }
+
+ if (le32toh(f->header->compatible_flags) & ~HEADER_COMPATIBLE_SUPPORTED) {
+ log_error("Cannot verify file with unknown extensions.");
+ r = -EOPNOTSUPP;
+ goto fail;
+ }
+
+ for (i = 0; i < sizeof(f->header->reserved); i++)
+ if (f->header->reserved[i] != 0) {
+ error(offsetof(Header, reserved[i]), "Reserved field is non-zero");
+ r = -EBADMSG;
+ goto fail;
+ }
+
+ /* First iteration: we go through all objects, verify the
+ * superficial structure, headers, hashes. */
+
+ p = le64toh(f->header->header_size);
+ for (;;) {
+ /* Early exit if there are no objects in the file, at all */
+ if (le64toh(f->header->tail_object_offset) == 0)
+ break;
+
+ if (show_progress)
+ draw_progress(scale_progress(0x7FFF, p, le64toh(f->header->tail_object_offset)), &last_usec);
+
+ r = journal_file_move_to_object(f, OBJECT_UNUSED, p, &o);
+ if (r < 0) {
+ error(p, "Invalid object");
+ goto fail;
+ }
+
+ if (p > le64toh(f->header->tail_object_offset)) {
+ error(offsetof(Header, tail_object_offset), "Invalid tail object pointer");
+ r = -EBADMSG;
+ goto fail;
+ }
+
+ n_objects++;
+
+ r = journal_file_object_verify(f, p, o);
+ if (r < 0) {
+ error_errno(p, r, "Invalid object contents: %m");
+ goto fail;
+ }
+
+ if ((o->object.flags & OBJECT_COMPRESSED_XZ) &&
+ (o->object.flags & OBJECT_COMPRESSED_LZ4)) {
+ error(p, "Objected with double compression");
+ r = -EINVAL;
+ goto fail;
+ }
+
+ if ((o->object.flags & OBJECT_COMPRESSED_XZ) && !JOURNAL_HEADER_COMPRESSED_XZ(f->header)) {
+ error(p, "XZ compressed object in file without XZ compression");
+ r = -EBADMSG;
+ goto fail;
+ }
+
+ if ((o->object.flags & OBJECT_COMPRESSED_LZ4) && !JOURNAL_HEADER_COMPRESSED_LZ4(f->header)) {
+ error(p, "LZ4 compressed object in file without LZ4 compression");
+ r = -EBADMSG;
+ goto fail;
+ }
+
+ switch (o->object.type) {
+
+ case OBJECT_DATA:
+ r = write_uint64(data_fd, p);
+ if (r < 0)
+ goto fail;
+
+ n_data++;
+ break;
+
+ case OBJECT_FIELD:
+ n_fields++;
+ break;
+
+ case OBJECT_ENTRY:
+ if (JOURNAL_HEADER_SEALED(f->header) && n_tags <= 0) {
+ error(p, "First entry before first tag");
+ r = -EBADMSG;
+ goto fail;
+ }
+
+ r = write_uint64(entry_fd, p);
+ if (r < 0)
+ goto fail;
+
+ if (le64toh(o->entry.realtime) < last_tag_realtime) {
+ error(p, "Older entry after newer tag");
+ r = -EBADMSG;
+ goto fail;
+ }
+
+ if (!entry_seqnum_set &&
+ le64toh(o->entry.seqnum) != le64toh(f->header->head_entry_seqnum)) {
+ error(p, "Head entry sequence number incorrect");
+ r = -EBADMSG;
+ goto fail;
+ }
+
+ if (entry_seqnum_set &&
+ entry_seqnum >= le64toh(o->entry.seqnum)) {
+ error(p, "Entry sequence number out of synchronization");
+ r = -EBADMSG;
+ goto fail;
+ }
+
+ entry_seqnum = le64toh(o->entry.seqnum);
+ entry_seqnum_set = true;
+
+ if (entry_monotonic_set &&
+ sd_id128_equal(entry_boot_id, o->entry.boot_id) &&
+ entry_monotonic > le64toh(o->entry.monotonic)) {
+ error(p, "Entry timestamp out of synchronization");
+ r = -EBADMSG;
+ goto fail;
+ }
+
+ entry_monotonic = le64toh(o->entry.monotonic);
+ entry_boot_id = o->entry.boot_id;
+ entry_monotonic_set = true;
+
+ if (!entry_realtime_set &&
+ le64toh(o->entry.realtime) != le64toh(f->header->head_entry_realtime)) {
+ error(p, "Head entry realtime timestamp incorrect");
+ r = -EBADMSG;
+ goto fail;
+ }
+
+ entry_realtime = le64toh(o->entry.realtime);
+ entry_realtime_set = true;
+
+ n_entries++;
+ break;
+
+ case OBJECT_DATA_HASH_TABLE:
+ if (n_data_hash_tables > 1) {
+ error(p, "More than one data hash table");
+ r = -EBADMSG;
+ goto fail;
+ }
+
+ if (le64toh(f->header->data_hash_table_offset) != p + offsetof(HashTableObject, items) ||
+ le64toh(f->header->data_hash_table_size) != le64toh(o->object.size) - offsetof(HashTableObject, items)) {
+ error(p, "header fields for data hash table invalid");
+ r = -EBADMSG;
+ goto fail;
+ }
+
+ n_data_hash_tables++;
+ break;
+
+ case OBJECT_FIELD_HASH_TABLE:
+ if (n_field_hash_tables > 1) {
+ error(p, "More than one field hash table");
+ r = -EBADMSG;
+ goto fail;
+ }
+
+ if (le64toh(f->header->field_hash_table_offset) != p + offsetof(HashTableObject, items) ||
+ le64toh(f->header->field_hash_table_size) != le64toh(o->object.size) - offsetof(HashTableObject, items)) {
+ error(p, "Header fields for field hash table invalid");
+ r = -EBADMSG;
+ goto fail;
+ }
+
+ n_field_hash_tables++;
+ break;
+
+ case OBJECT_ENTRY_ARRAY:
+ r = write_uint64(entry_array_fd, p);
+ if (r < 0)
+ goto fail;
+
+ if (p == le64toh(f->header->entry_array_offset)) {
+ if (found_main_entry_array) {
+ error(p, "More than one main entry array");
+ r = -EBADMSG;
+ goto fail;
+ }
+
+ found_main_entry_array = true;
+ }
+
+ n_entry_arrays++;
+ break;
+
+ case OBJECT_TAG:
+ if (!JOURNAL_HEADER_SEALED(f->header)) {
+ error(p, "Tag object in file without sealing");
+ r = -EBADMSG;
+ goto fail;
+ }
+
+ if (le64toh(o->tag.seqnum) != n_tags + 1) {
+ error(p, "Tag sequence number out of synchronization");
+ r = -EBADMSG;
+ goto fail;
+ }
+
+ if (le64toh(o->tag.epoch) < last_epoch) {
+ error(p, "Epoch sequence out of synchronization");
+ r = -EBADMSG;
+ goto fail;
+ }
+
+#if HAVE_GCRYPT
+ if (f->seal) {
+ uint64_t q, rt;
+
+ debug(p, "Checking tag %"PRIu64"...", le64toh(o->tag.seqnum));
+
+ rt = f->fss_start_usec + le64toh(o->tag.epoch) * f->fss_interval_usec;
+ if (entry_realtime_set && entry_realtime >= rt + f->fss_interval_usec) {
+ error(p, "tag/entry realtime timestamp out of synchronization");
+ r = -EBADMSG;
+ goto fail;
+ }
+
+ /* OK, now we know the epoch. So let's now set
+ * it, and calculate the HMAC for everything
+ * since the last tag. */
+ r = journal_file_fsprg_seek(f, le64toh(o->tag.epoch));
+ if (r < 0)
+ goto fail;
+
+ r = journal_file_hmac_start(f);
+ if (r < 0)
+ goto fail;
+
+ if (last_tag == 0) {
+ r = journal_file_hmac_put_header(f);
+ if (r < 0)
+ goto fail;
+
+ q = le64toh(f->header->header_size);
+ } else
+ q = last_tag;
+
+ while (q <= p) {
+ r = journal_file_move_to_object(f, OBJECT_UNUSED, q, &o);
+ if (r < 0)
+ goto fail;
+
+ r = journal_file_hmac_put_object(f, OBJECT_UNUSED, o, q);
+ if (r < 0)
+ goto fail;
+
+ q = q + ALIGN64(le64toh(o->object.size));
+ }
+
+ /* Position might have changed, let's reposition things */
+ r = journal_file_move_to_object(f, OBJECT_UNUSED, p, &o);
+ if (r < 0)
+ goto fail;
+
+ if (memcmp(o->tag.tag, gcry_md_read(f->hmac, 0), TAG_LENGTH) != 0) {
+ error(p, "Tag failed verification");
+ r = -EBADMSG;
+ goto fail;
+ }
+
+ f->hmac_running = false;
+ last_tag_realtime = rt;
+ last_sealed_realtime = entry_realtime;
+ }
+
+ last_tag = p + ALIGN64(le64toh(o->object.size));
+#endif
+
+ last_epoch = le64toh(o->tag.epoch);
+
+ n_tags++;
+ break;
+
+ default:
+ n_weird++;
+ }
+
+ if (p == le64toh(f->header->tail_object_offset)) {
+ found_last = true;
+ break;
+ }
+
+ p = p + ALIGN64(le64toh(o->object.size));
+ };
+
+ if (!found_last && le64toh(f->header->tail_object_offset) != 0) {
+ error(le64toh(f->header->tail_object_offset), "Tail object pointer dead");
+ r = -EBADMSG;
+ goto fail;
+ }
+
+ if (n_objects != le64toh(f->header->n_objects)) {
+ error(offsetof(Header, n_objects), "Object number mismatch");
+ r = -EBADMSG;
+ goto fail;
+ }
+
+ if (n_entries != le64toh(f->header->n_entries)) {
+ error(offsetof(Header, n_entries), "Entry number mismatch");
+ r = -EBADMSG;
+ goto fail;
+ }
+
+ if (JOURNAL_HEADER_CONTAINS(f->header, n_data) &&
+ n_data != le64toh(f->header->n_data)) {
+ error(offsetof(Header, n_data), "Data number mismatch");
+ r = -EBADMSG;
+ goto fail;
+ }
+
+ if (JOURNAL_HEADER_CONTAINS(f->header, n_fields) &&
+ n_fields != le64toh(f->header->n_fields)) {
+ error(offsetof(Header, n_fields), "Field number mismatch");
+ r = -EBADMSG;
+ goto fail;
+ }
+
+ if (JOURNAL_HEADER_CONTAINS(f->header, n_tags) &&
+ n_tags != le64toh(f->header->n_tags)) {
+ error(offsetof(Header, n_tags), "Tag number mismatch");
+ r = -EBADMSG;
+ goto fail;
+ }
+
+ if (JOURNAL_HEADER_CONTAINS(f->header, n_entry_arrays) &&
+ n_entry_arrays != le64toh(f->header->n_entry_arrays)) {
+ error(offsetof(Header, n_entry_arrays), "Entry array number mismatch");
+ r = -EBADMSG;
+ goto fail;
+ }
+
+ if (!found_main_entry_array && le64toh(f->header->entry_array_offset) != 0) {
+ error(0, "Missing entry array");
+ r = -EBADMSG;
+ goto fail;
+ }
+
+ if (entry_seqnum_set &&
+ entry_seqnum != le64toh(f->header->tail_entry_seqnum)) {
+ error(offsetof(Header, tail_entry_seqnum), "Invalid tail seqnum");
+ r = -EBADMSG;
+ goto fail;
+ }
+
+ if (entry_monotonic_set &&
+ (sd_id128_equal(entry_boot_id, f->header->boot_id) &&
+ entry_monotonic != le64toh(f->header->tail_entry_monotonic))) {
+ error(0, "Invalid tail monotonic timestamp");
+ r = -EBADMSG;
+ goto fail;
+ }
+
+ if (entry_realtime_set && entry_realtime != le64toh(f->header->tail_entry_realtime)) {
+ error(0, "Invalid tail realtime timestamp");
+ r = -EBADMSG;
+ goto fail;
+ }
+
+ /* Second iteration: we follow all objects referenced from the
+ * two entry points: the object hash table and the entry
+ * array. We also check that everything referenced (directly
+ * or indirectly) in the data hash table also exists in the
+ * entry array, and vice versa. Note that we do not care for
+ * unreferenced objects. We only care that everything that is
+ * referenced is consistent. */
+
+ r = verify_entry_array(f,
+ cache_data_fd, n_data,
+ cache_entry_fd, n_entries,
+ cache_entry_array_fd, n_entry_arrays,
+ &last_usec,
+ show_progress);
+ if (r < 0)
+ goto fail;
+
+ r = verify_hash_table(f,
+ cache_data_fd, n_data,
+ cache_entry_fd, n_entries,
+ cache_entry_array_fd, n_entry_arrays,
+ &last_usec,
+ show_progress);
+ if (r < 0)
+ goto fail;
+
+ if (show_progress)
+ flush_progress();
+
+ mmap_cache_free_fd(f->mmap, cache_data_fd);
+ mmap_cache_free_fd(f->mmap, cache_entry_fd);
+ mmap_cache_free_fd(f->mmap, cache_entry_array_fd);
+
+ safe_close(data_fd);
+ safe_close(entry_fd);
+ safe_close(entry_array_fd);
+
+ if (first_contained)
+ *first_contained = le64toh(f->header->head_entry_realtime);
+ if (last_validated)
+ *last_validated = last_sealed_realtime;
+ if (last_contained)
+ *last_contained = le64toh(f->header->tail_entry_realtime);
+
+ return 0;
+
+fail:
+ if (show_progress)
+ flush_progress();
+
+ log_error("File corruption detected at %s:"OFSfmt" (of %llu bytes, %"PRIu64"%%).",
+ f->path,
+ p,
+ (unsigned long long) f->last_stat.st_size,
+ 100 * p / f->last_stat.st_size);
+
+ if (data_fd >= 0)
+ safe_close(data_fd);
+
+ if (entry_fd >= 0)
+ safe_close(entry_fd);
+
+ if (entry_array_fd >= 0)
+ safe_close(entry_array_fd);
+
+ if (cache_data_fd)
+ mmap_cache_free_fd(f->mmap, cache_data_fd);
+
+ if (cache_entry_fd)
+ mmap_cache_free_fd(f->mmap, cache_entry_fd);
+
+ if (cache_entry_array_fd)
+ mmap_cache_free_fd(f->mmap, cache_entry_array_fd);
+
+ return r;
+}
diff --git a/src/journal/journal-verify.h b/src/journal/journal-verify.h
new file mode 100644
index 0000000..f0ea31a
--- /dev/null
+++ b/src/journal/journal-verify.h
@@ -0,0 +1,6 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+#pragma once
+
+#include "journal-file.h"
+
+int journal_file_verify(JournalFile *f, const char *key, usec_t *first_contained, usec_t *last_validated, usec_t *last_contained, bool show_progress);
diff --git a/src/journal/journalctl.c b/src/journal/journalctl.c
new file mode 100644
index 0000000..14a02ed
--- /dev/null
+++ b/src/journal/journalctl.c
@@ -0,0 +1,2706 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+
+#include <errno.h>
+#include <fcntl.h>
+#include <fnmatch.h>
+#include <getopt.h>
+#include <linux/fs.h>
+#include <locale.h>
+#include <poll.h>
+#include <signal.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/inotify.h>
+#include <sys/stat.h>
+#include <unistd.h>
+
+#if HAVE_PCRE2
+# define PCRE2_CODE_UNIT_WIDTH 8
+# include <pcre2.h>
+#endif
+
+#include "sd-bus.h"
+#include "sd-device.h"
+#include "sd-journal.h"
+
+#include "acl-util.h"
+#include "alloc-util.h"
+#include "bus-error.h"
+#include "bus-util.h"
+#include "catalog.h"
+#include "chattr-util.h"
+#include "def.h"
+#include "device-private.h"
+#include "fd-util.h"
+#include "fileio.h"
+#include "fs-util.h"
+#include "fsprg.h"
+#include "glob-util.h"
+#include "hostname-util.h"
+#include "id128-print.h"
+#include "io-util.h"
+#include "journal-def.h"
+#include "journal-internal.h"
+#include "journal-qrcode.h"
+#include "journal-util.h"
+#include "journal-vacuum.h"
+#include "journal-verify.h"
+#include "locale-util.h"
+#include "log.h"
+#include "logs-show.h"
+#include "mkdir.h"
+#include "pager.h"
+#include "parse-util.h"
+#include "path-util.h"
+#include "pretty-print.h"
+#include "rlimit-util.h"
+#include "set.h"
+#include "sigbus.h"
+#include "string-table.h"
+#include "strv.h"
+#include "syslog-util.h"
+#include "terminal-util.h"
+#include "tmpfile-util.h"
+#include "unit-name.h"
+#include "user-util.h"
+
+#define DEFAULT_FSS_INTERVAL_USEC (15*USEC_PER_MINUTE)
+
+#define PROCESS_INOTIFY_INTERVAL 1024 /* Every 1,024 messages processed */
+
+#if HAVE_PCRE2
+DEFINE_TRIVIAL_CLEANUP_FUNC(pcre2_match_data*, pcre2_match_data_free);
+DEFINE_TRIVIAL_CLEANUP_FUNC(pcre2_code*, pcre2_code_free);
+
+static int pattern_compile(const char *pattern, unsigned flags, pcre2_code **out) {
+ int errorcode, r;
+ PCRE2_SIZE erroroffset;
+ pcre2_code *p;
+
+ p = pcre2_compile((PCRE2_SPTR8) pattern,
+ PCRE2_ZERO_TERMINATED, flags, &errorcode, &erroroffset, NULL);
+ if (!p) {
+ unsigned char buf[LINE_MAX];
+
+ r = pcre2_get_error_message(errorcode, buf, sizeof buf);
+
+ return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
+ "Bad pattern \"%s\": %s", pattern,
+ r < 0 ? "unknown error" : (char *)buf);
+ }
+
+ *out = p;
+ return 0;
+}
+
+#endif
+
+enum {
+ /* Special values for arg_lines */
+ ARG_LINES_DEFAULT = -2,
+ ARG_LINES_ALL = -1,
+};
+
+static OutputMode arg_output = OUTPUT_SHORT;
+static bool arg_utc = false;
+static bool arg_follow = false;
+static bool arg_full = true;
+static bool arg_all = false;
+static PagerFlags arg_pager_flags = 0;
+static int arg_lines = ARG_LINES_DEFAULT;
+static bool arg_no_tail = false;
+static bool arg_quiet = false;
+static bool arg_merge = false;
+static bool arg_boot = false;
+static sd_id128_t arg_boot_id = {};
+static int arg_boot_offset = 0;
+static bool arg_dmesg = false;
+static bool arg_no_hostname = false;
+static const char *arg_cursor = NULL;
+static const char *arg_after_cursor = NULL;
+static bool arg_show_cursor = false;
+static const char *arg_directory = NULL;
+static char **arg_file = NULL;
+static bool arg_file_stdin = false;
+static int arg_priorities = 0xFF;
+static char *arg_verify_key = NULL;
+#if HAVE_GCRYPT
+static usec_t arg_interval = DEFAULT_FSS_INTERVAL_USEC;
+static bool arg_force = false;
+#endif
+static usec_t arg_since, arg_until;
+static bool arg_since_set = false, arg_until_set = false;
+static char **arg_syslog_identifier = NULL;
+static char **arg_system_units = NULL;
+static char **arg_user_units = NULL;
+static const char *arg_field = NULL;
+static bool arg_catalog = false;
+static bool arg_reverse = false;
+static int arg_journal_type = 0;
+static char *arg_root = NULL;
+static const char *arg_machine = NULL;
+static uint64_t arg_vacuum_size = 0;
+static uint64_t arg_vacuum_n_files = 0;
+static usec_t arg_vacuum_time = 0;
+static char **arg_output_fields = NULL;
+
+#if HAVE_PCRE2
+static const char *arg_pattern = NULL;
+static pcre2_code *arg_compiled_pattern = NULL;
+static int arg_case_sensitive = -1; /* -1 means be smart */
+#endif
+
+static enum {
+ ACTION_SHOW,
+ ACTION_NEW_ID128,
+ ACTION_PRINT_HEADER,
+ ACTION_SETUP_KEYS,
+ ACTION_VERIFY,
+ ACTION_DISK_USAGE,
+ ACTION_LIST_CATALOG,
+ ACTION_DUMP_CATALOG,
+ ACTION_UPDATE_CATALOG,
+ ACTION_LIST_BOOTS,
+ ACTION_FLUSH,
+ ACTION_SYNC,
+ ACTION_ROTATE,
+ ACTION_VACUUM,
+ ACTION_ROTATE_AND_VACUUM,
+ ACTION_LIST_FIELDS,
+ ACTION_LIST_FIELD_NAMES,
+} arg_action = ACTION_SHOW;
+
+typedef struct BootId {
+ sd_id128_t id;
+ uint64_t first;
+ uint64_t last;
+ LIST_FIELDS(struct BootId, boot_list);
+} BootId;
+
+static int add_matches_for_device(sd_journal *j, const char *devpath) {
+ _cleanup_(sd_device_unrefp) sd_device *device = NULL;
+ sd_device *d = NULL;
+ struct stat st;
+ int r;
+
+ assert(j);
+ assert(devpath);
+
+ if (!path_startswith(devpath, "/dev/")) {
+ log_error("Devpath does not start with /dev/");
+ return -EINVAL;
+ }
+
+ if (stat(devpath, &st) < 0)
+ return log_error_errno(errno, "Couldn't stat file: %m");
+
+ r = device_new_from_stat_rdev(&device, &st);
+ if (r < 0)
+ return log_error_errno(r, "Failed to get device from devnum %u:%u: %m", major(st.st_rdev), minor(st.st_rdev));
+
+ for (d = device; d; ) {
+ _cleanup_free_ char *match = NULL;
+ const char *subsys, *sysname, *devnode;
+ sd_device *parent;
+
+ r = sd_device_get_subsystem(d, &subsys);
+ if (r < 0)
+ goto get_parent;
+
+ r = sd_device_get_sysname(d, &sysname);
+ if (r < 0)
+ goto get_parent;
+
+ match = strjoin("_KERNEL_DEVICE=+", subsys, ":", sysname);
+ if (!match)
+ return log_oom();
+
+ r = sd_journal_add_match(j, match, 0);
+ if (r < 0)
+ return log_error_errno(r, "Failed to add match: %m");
+
+ if (sd_device_get_devname(d, &devnode) >= 0) {
+ _cleanup_free_ char *match1 = NULL;
+
+ r = stat(devnode, &st);
+ if (r < 0)
+ return log_error_errno(r, "Failed to stat() device node \"%s\": %m", devnode);
+
+ r = asprintf(&match1, "_KERNEL_DEVICE=%c%u:%u", S_ISBLK(st.st_mode) ? 'b' : 'c', major(st.st_rdev), minor(st.st_rdev));
+ if (r < 0)
+ return log_oom();
+
+ r = sd_journal_add_match(j, match1, 0);
+ if (r < 0)
+ return log_error_errno(r, "Failed to add match: %m");
+ }
+
+get_parent:
+ if (sd_device_get_parent(d, &parent) < 0)
+ break;
+
+ d = parent;
+ }
+
+ r = add_match_this_boot(j, arg_machine);
+ if (r < 0)
+ return log_error_errno(r, "Failed to add match for the current boot: %m");
+
+ return 0;
+}
+
+static char *format_timestamp_maybe_utc(char *buf, size_t l, usec_t t) {
+
+ if (arg_utc)
+ return format_timestamp_utc(buf, l, t);
+
+ return format_timestamp(buf, l, t);
+}
+
+static int parse_boot_descriptor(const char *x, sd_id128_t *boot_id, int *offset) {
+ sd_id128_t id = SD_ID128_NULL;
+ int off = 0, r;
+
+ if (strlen(x) >= 32) {
+ char *t;
+
+ t = strndupa(x, 32);
+ r = sd_id128_from_string(t, &id);
+ if (r >= 0)
+ x += 32;
+
+ if (!IN_SET(*x, 0, '-', '+'))
+ return -EINVAL;
+
+ if (*x != 0) {
+ r = safe_atoi(x, &off);
+ if (r < 0)
+ return r;
+ }
+ } else {
+ r = safe_atoi(x, &off);
+ if (r < 0)
+ return r;
+ }
+
+ if (boot_id)
+ *boot_id = id;
+
+ if (offset)
+ *offset = off;
+
+ return 0;
+}
+
+static int help(void) {
+ _cleanup_free_ char *link = NULL;
+ int r;
+
+ (void) pager_open(arg_pager_flags);
+
+ r = terminal_urlify_man("journalctl", "1", &link);
+ if (r < 0)
+ return log_oom();
+
+ printf("%s [OPTIONS...] [MATCHES...]\n\n"
+ "Query the journal.\n\n"
+ "Options:\n"
+ " --system Show the system journal\n"
+ " --user Show the user journal for the current user\n"
+ " -M --machine=CONTAINER Operate on local container\n"
+ " -S --since=DATE Show entries not older than the specified date\n"
+ " -U --until=DATE Show entries not newer than the specified date\n"
+ " -c --cursor=CURSOR Show entries starting at the specified cursor\n"
+ " --after-cursor=CURSOR Show entries after the specified cursor\n"
+ " --show-cursor Print the cursor after all the entries\n"
+ " -b --boot[=ID] Show current boot or the specified boot\n"
+ " --list-boots Show terse information about recorded boots\n"
+ " -k --dmesg Show kernel message log from the current boot\n"
+ " -u --unit=UNIT Show logs from the specified unit\n"
+ " --user-unit=UNIT Show logs from the specified user unit\n"
+ " -t --identifier=STRING Show entries with the specified syslog identifier\n"
+ " -p --priority=RANGE Show entries with the specified priority\n"
+ " -g --grep=PATTERN Show entries with MESSAGE matching PATTERN\n"
+ " --case-sensitive[=BOOL] Force case sensitive or insenstive matching\n"
+ " -e --pager-end Immediately jump to the end in the pager\n"
+ " -f --follow Follow the journal\n"
+ " -n --lines[=INTEGER] Number of journal entries to show\n"
+ " --no-tail Show all lines, even in follow mode\n"
+ " -r --reverse Show the newest entries first\n"
+ " -o --output=STRING Change journal output mode (short, short-precise,\n"
+ " short-iso, short-iso-precise, short-full,\n"
+ " short-monotonic, short-unix, verbose, export,\n"
+ " json, json-pretty, json-sse, json-seq, cat,\n"
+ " with-unit)\n"
+ " --output-fields=LIST Select fields to print in verbose/export/json modes\n"
+ " --utc Express time in Coordinated Universal Time (UTC)\n"
+ " -x --catalog Add message explanations where available\n"
+ " --no-full Ellipsize fields\n"
+ " -a --all Show all fields, including long and unprintable\n"
+ " -q --quiet Do not show info messages and privilege warning\n"
+ " --no-pager Do not pipe output into a pager\n"
+ " --no-hostname Suppress output of hostname field\n"
+ " -m --merge Show entries from all available journals\n"
+ " -D --directory=PATH Show journal files from directory\n"
+ " --file=PATH Show journal file\n"
+ " --root=ROOT Operate on files below a root directory\n"
+ " --interval=TIME Time interval for changing the FSS sealing key\n"
+ " --verify-key=KEY Specify FSS verification key\n"
+ " --force Override of the FSS key pair with --setup-keys\n"
+ "\nCommands:\n"
+ " -h --help Show this help text\n"
+ " --version Show package version\n"
+ " -N --fields List all field names currently used\n"
+ " -F --field=FIELD List all values that a specified field takes\n"
+ " --disk-usage Show total disk usage of all journal files\n"
+ " --vacuum-size=BYTES Reduce disk usage below specified size\n"
+ " --vacuum-files=INT Leave only the specified number of journal files\n"
+ " --vacuum-time=TIME Remove journal files older than specified time\n"
+ " --verify Verify journal file consistency\n"
+ " --sync Synchronize unwritten journal messages to disk\n"
+ " --flush Flush all journal data from /run into /var\n"
+ " --rotate Request immediate rotation of the journal files\n"
+ " --header Show journal header information\n"
+ " --list-catalog Show all message IDs in the catalog\n"
+ " --dump-catalog Show entries in the message catalog\n"
+ " --update-catalog Update the message catalog database\n"
+ " --setup-keys Generate a new FSS key pair\n"
+ "\nSee the %s for details.\n"
+ , program_invocation_short_name
+ , link
+ );
+
+ return 0;
+}
+
+static int parse_argv(int argc, char *argv[]) {
+
+ enum {
+ ARG_VERSION = 0x100,
+ ARG_NO_PAGER,
+ ARG_NO_FULL,
+ ARG_NO_TAIL,
+ ARG_NEW_ID128,
+ ARG_THIS_BOOT,
+ ARG_LIST_BOOTS,
+ ARG_USER,
+ ARG_SYSTEM,
+ ARG_ROOT,
+ ARG_HEADER,
+ ARG_SETUP_KEYS,
+ ARG_FILE,
+ ARG_INTERVAL,
+ ARG_VERIFY,
+ ARG_VERIFY_KEY,
+ ARG_DISK_USAGE,
+ ARG_AFTER_CURSOR,
+ ARG_SHOW_CURSOR,
+ ARG_USER_UNIT,
+ ARG_LIST_CATALOG,
+ ARG_DUMP_CATALOG,
+ ARG_UPDATE_CATALOG,
+ ARG_FORCE,
+ ARG_CASE_SENSITIVE,
+ ARG_UTC,
+ ARG_SYNC,
+ ARG_FLUSH,
+ ARG_ROTATE,
+ ARG_VACUUM_SIZE,
+ ARG_VACUUM_FILES,
+ ARG_VACUUM_TIME,
+ ARG_NO_HOSTNAME,
+ ARG_OUTPUT_FIELDS,
+ };
+
+ static const struct option options[] = {
+ { "help", no_argument, NULL, 'h' },
+ { "version" , no_argument, NULL, ARG_VERSION },
+ { "no-pager", no_argument, NULL, ARG_NO_PAGER },
+ { "pager-end", no_argument, NULL, 'e' },
+ { "follow", no_argument, NULL, 'f' },
+ { "force", no_argument, NULL, ARG_FORCE },
+ { "output", required_argument, NULL, 'o' },
+ { "all", no_argument, NULL, 'a' },
+ { "full", no_argument, NULL, 'l' },
+ { "no-full", no_argument, NULL, ARG_NO_FULL },
+ { "lines", optional_argument, NULL, 'n' },
+ { "no-tail", no_argument, NULL, ARG_NO_TAIL },
+ { "new-id128", no_argument, NULL, ARG_NEW_ID128 }, /* deprecated */
+ { "quiet", no_argument, NULL, 'q' },
+ { "merge", no_argument, NULL, 'm' },
+ { "this-boot", no_argument, NULL, ARG_THIS_BOOT }, /* deprecated */
+ { "boot", optional_argument, NULL, 'b' },
+ { "list-boots", no_argument, NULL, ARG_LIST_BOOTS },
+ { "dmesg", no_argument, NULL, 'k' },
+ { "system", no_argument, NULL, ARG_SYSTEM },
+ { "user", no_argument, NULL, ARG_USER },
+ { "directory", required_argument, NULL, 'D' },
+ { "file", required_argument, NULL, ARG_FILE },
+ { "root", required_argument, NULL, ARG_ROOT },
+ { "header", no_argument, NULL, ARG_HEADER },
+ { "identifier", required_argument, NULL, 't' },
+ { "priority", required_argument, NULL, 'p' },
+ { "grep", required_argument, NULL, 'g' },
+ { "case-sensitive", optional_argument, NULL, ARG_CASE_SENSITIVE },
+ { "setup-keys", no_argument, NULL, ARG_SETUP_KEYS },
+ { "interval", required_argument, NULL, ARG_INTERVAL },
+ { "verify", no_argument, NULL, ARG_VERIFY },
+ { "verify-key", required_argument, NULL, ARG_VERIFY_KEY },
+ { "disk-usage", no_argument, NULL, ARG_DISK_USAGE },
+ { "cursor", required_argument, NULL, 'c' },
+ { "after-cursor", required_argument, NULL, ARG_AFTER_CURSOR },
+ { "show-cursor", no_argument, NULL, ARG_SHOW_CURSOR },
+ { "since", required_argument, NULL, 'S' },
+ { "until", required_argument, NULL, 'U' },
+ { "unit", required_argument, NULL, 'u' },
+ { "user-unit", required_argument, NULL, ARG_USER_UNIT },
+ { "field", required_argument, NULL, 'F' },
+ { "fields", no_argument, NULL, 'N' },
+ { "catalog", no_argument, NULL, 'x' },
+ { "list-catalog", no_argument, NULL, ARG_LIST_CATALOG },
+ { "dump-catalog", no_argument, NULL, ARG_DUMP_CATALOG },
+ { "update-catalog", no_argument, NULL, ARG_UPDATE_CATALOG },
+ { "reverse", no_argument, NULL, 'r' },
+ { "machine", required_argument, NULL, 'M' },
+ { "utc", no_argument, NULL, ARG_UTC },
+ { "flush", no_argument, NULL, ARG_FLUSH },
+ { "sync", no_argument, NULL, ARG_SYNC },
+ { "rotate", no_argument, NULL, ARG_ROTATE },
+ { "vacuum-size", required_argument, NULL, ARG_VACUUM_SIZE },
+ { "vacuum-files", required_argument, NULL, ARG_VACUUM_FILES },
+ { "vacuum-time", required_argument, NULL, ARG_VACUUM_TIME },
+ { "no-hostname", no_argument, NULL, ARG_NO_HOSTNAME },
+ { "output-fields", required_argument, NULL, ARG_OUTPUT_FIELDS },
+ {}
+ };
+
+ int c, r;
+
+ assert(argc >= 0);
+ assert(argv);
+
+ while ((c = getopt_long(argc, argv, "hefo:aln::qmb::kD:p:g:c:S:U:t:u:NF:xrM:", options, NULL)) >= 0)
+
+ switch (c) {
+
+ case 'h':
+ return help();
+
+ case ARG_VERSION:
+ return version();
+
+ case ARG_NO_PAGER:
+ arg_pager_flags |= PAGER_DISABLE;
+ break;
+
+ case 'e':
+ arg_pager_flags |= PAGER_JUMP_TO_END;
+
+ if (arg_lines == ARG_LINES_DEFAULT)
+ arg_lines = 1000;
+
+ break;
+
+ case 'f':
+ arg_follow = true;
+ break;
+
+ case 'o':
+ if (streq(optarg, "help")) {
+ DUMP_STRING_TABLE(output_mode, OutputMode, _OUTPUT_MODE_MAX);
+ return 0;
+ }
+
+ arg_output = output_mode_from_string(optarg);
+ if (arg_output < 0) {
+ log_error("Unknown output format '%s'.", optarg);
+ return -EINVAL;
+ }
+
+ if (IN_SET(arg_output, OUTPUT_EXPORT, OUTPUT_JSON, OUTPUT_JSON_PRETTY, OUTPUT_JSON_SSE, OUTPUT_JSON_SEQ, OUTPUT_CAT))
+ arg_quiet = true;
+
+ break;
+
+ case 'l':
+ arg_full = true;
+ break;
+
+ case ARG_NO_FULL:
+ arg_full = false;
+ break;
+
+ case 'a':
+ arg_all = true;
+ break;
+
+ case 'n':
+ if (optarg) {
+ if (streq(optarg, "all"))
+ arg_lines = ARG_LINES_ALL;
+ else {
+ r = safe_atoi(optarg, &arg_lines);
+ if (r < 0 || arg_lines < 0) {
+ log_error("Failed to parse lines '%s'", optarg);
+ return -EINVAL;
+ }
+ }
+ } else {
+ arg_lines = 10;
+
+ /* Hmm, no argument? Maybe the next
+ * word on the command line is
+ * supposed to be the argument? Let's
+ * see if there is one, and is
+ * parsable. */
+ if (optind < argc) {
+ int n;
+ if (streq(argv[optind], "all")) {
+ arg_lines = ARG_LINES_ALL;
+ optind++;
+ } else if (safe_atoi(argv[optind], &n) >= 0 && n >= 0) {
+ arg_lines = n;
+ optind++;
+ }
+ }
+ }
+
+ break;
+
+ case ARG_NO_TAIL:
+ arg_no_tail = true;
+ break;
+
+ case ARG_NEW_ID128:
+ arg_action = ACTION_NEW_ID128;
+ break;
+
+ case 'q':
+ arg_quiet = true;
+ break;
+
+ case 'm':
+ arg_merge = true;
+ break;
+
+ case ARG_THIS_BOOT:
+ arg_boot = true;
+ break;
+
+ case 'b':
+ arg_boot = true;
+
+ if (optarg) {
+ r = parse_boot_descriptor(optarg, &arg_boot_id, &arg_boot_offset);
+ if (r < 0) {
+ log_error("Failed to parse boot descriptor '%s'", optarg);
+ return -EINVAL;
+ }
+ } else {
+
+ /* Hmm, no argument? Maybe the next
+ * word on the command line is
+ * supposed to be the argument? Let's
+ * see if there is one and is parsable
+ * as a boot descriptor... */
+
+ if (optind < argc &&
+ parse_boot_descriptor(argv[optind], &arg_boot_id, &arg_boot_offset) >= 0)
+ optind++;
+ }
+
+ break;
+
+ case ARG_LIST_BOOTS:
+ arg_action = ACTION_LIST_BOOTS;
+ break;
+
+ case 'k':
+ arg_boot = arg_dmesg = true;
+ break;
+
+ case ARG_SYSTEM:
+ arg_journal_type |= SD_JOURNAL_SYSTEM;
+ break;
+
+ case ARG_USER:
+ arg_journal_type |= SD_JOURNAL_CURRENT_USER;
+ break;
+
+ case 'M':
+ arg_machine = optarg;
+ break;
+
+ case 'D':
+ arg_directory = optarg;
+ break;
+
+ case ARG_FILE:
+ if (streq(optarg, "-"))
+ /* An undocumented feature: we can read journal files from STDIN. We don't document
+ * this though, since after all we only support this for mmap-able, seekable files, and
+ * not for example pipes which are probably the primary usecase for reading things from
+ * STDIN. To avoid confusion we hence don't document this feature. */
+ arg_file_stdin = true;
+ else {
+ r = glob_extend(&arg_file, optarg);
+ if (r < 0)
+ return log_error_errno(r, "Failed to add paths: %m");
+ }
+ break;
+
+ case ARG_ROOT:
+ r = parse_path_argument_and_warn(optarg, true, &arg_root);
+ if (r < 0)
+ return r;
+ break;
+
+ case 'c':
+ arg_cursor = optarg;
+ break;
+
+ case ARG_AFTER_CURSOR:
+ arg_after_cursor = optarg;
+ break;
+
+ case ARG_SHOW_CURSOR:
+ arg_show_cursor = true;
+ break;
+
+ case ARG_HEADER:
+ arg_action = ACTION_PRINT_HEADER;
+ break;
+
+ case ARG_VERIFY:
+ arg_action = ACTION_VERIFY;
+ break;
+
+ case ARG_DISK_USAGE:
+ arg_action = ACTION_DISK_USAGE;
+ break;
+
+ case ARG_VACUUM_SIZE:
+ r = parse_size(optarg, 1024, &arg_vacuum_size);
+ if (r < 0) {
+ log_error("Failed to parse vacuum size: %s", optarg);
+ return r;
+ }
+
+ arg_action = arg_action == ACTION_ROTATE ? ACTION_ROTATE_AND_VACUUM : ACTION_VACUUM;
+ break;
+
+ case ARG_VACUUM_FILES:
+ r = safe_atou64(optarg, &arg_vacuum_n_files);
+ if (r < 0) {
+ log_error("Failed to parse vacuum files: %s", optarg);
+ return r;
+ }
+
+ arg_action = arg_action == ACTION_ROTATE ? ACTION_ROTATE_AND_VACUUM : ACTION_VACUUM;
+ break;
+
+ case ARG_VACUUM_TIME:
+ r = parse_sec(optarg, &arg_vacuum_time);
+ if (r < 0) {
+ log_error("Failed to parse vacuum time: %s", optarg);
+ return r;
+ }
+
+ arg_action = arg_action == ACTION_ROTATE ? ACTION_ROTATE_AND_VACUUM : ACTION_VACUUM;
+ break;
+
+#if HAVE_GCRYPT
+ case ARG_FORCE:
+ arg_force = true;
+ break;
+
+ case ARG_SETUP_KEYS:
+ arg_action = ACTION_SETUP_KEYS;
+ break;
+
+ case ARG_VERIFY_KEY:
+ arg_action = ACTION_VERIFY;
+ r = free_and_strdup(&arg_verify_key, optarg);
+ if (r < 0)
+ return r;
+ /* Use memset not string_erase so this doesn't look confusing
+ * in ps or htop output. */
+ memset(optarg, 'x', strlen(optarg));
+
+ arg_merge = false;
+ break;
+
+ case ARG_INTERVAL:
+ r = parse_sec(optarg, &arg_interval);
+ if (r < 0 || arg_interval <= 0) {
+ log_error("Failed to parse sealing key change interval: %s", optarg);
+ return -EINVAL;
+ }
+ break;
+#else
+ case ARG_SETUP_KEYS:
+ case ARG_VERIFY_KEY:
+ case ARG_INTERVAL:
+ case ARG_FORCE:
+ log_error("Compiled without forward-secure sealing support.");
+ return -EOPNOTSUPP;
+#endif
+
+ case 'p': {
+ const char *dots;
+
+ dots = strstr(optarg, "..");
+ if (dots) {
+ char *a;
+ int from, to, i;
+
+ /* a range */
+ a = strndup(optarg, dots - optarg);
+ if (!a)
+ return log_oom();
+
+ from = log_level_from_string(a);
+ to = log_level_from_string(dots + 2);
+ free(a);
+
+ if (from < 0 || to < 0) {
+ log_error("Failed to parse log level range %s", optarg);
+ return -EINVAL;
+ }
+
+ arg_priorities = 0;
+
+ if (from < to) {
+ for (i = from; i <= to; i++)
+ arg_priorities |= 1 << i;
+ } else {
+ for (i = to; i <= from; i++)
+ arg_priorities |= 1 << i;
+ }
+
+ } else {
+ int p, i;
+
+ p = log_level_from_string(optarg);
+ if (p < 0) {
+ log_error("Unknown log level %s", optarg);
+ return -EINVAL;
+ }
+
+ arg_priorities = 0;
+
+ for (i = 0; i <= p; i++)
+ arg_priorities |= 1 << i;
+ }
+
+ break;
+ }
+
+#if HAVE_PCRE2
+ case 'g':
+ arg_pattern = optarg;
+ break;
+
+ case ARG_CASE_SENSITIVE:
+ if (optarg) {
+ r = parse_boolean(optarg);
+ if (r < 0)
+ return log_error_errno(r, "Bad --case-sensitive= argument \"%s\": %m", optarg);
+ arg_case_sensitive = r;
+ } else
+ arg_case_sensitive = true;
+
+ break;
+#else
+ case 'g':
+ case ARG_CASE_SENSITIVE:
+ return log_error("Compiled without pattern matching support");
+#endif
+
+ case 'S':
+ r = parse_timestamp(optarg, &arg_since);
+ if (r < 0) {
+ log_error("Failed to parse timestamp: %s", optarg);
+ return -EINVAL;
+ }
+ arg_since_set = true;
+ break;
+
+ case 'U':
+ r = parse_timestamp(optarg, &arg_until);
+ if (r < 0) {
+ log_error("Failed to parse timestamp: %s", optarg);
+ return -EINVAL;
+ }
+ arg_until_set = true;
+ break;
+
+ case 't':
+ r = strv_extend(&arg_syslog_identifier, optarg);
+ if (r < 0)
+ return log_oom();
+ break;
+
+ case 'u':
+ r = strv_extend(&arg_system_units, optarg);
+ if (r < 0)
+ return log_oom();
+ break;
+
+ case ARG_USER_UNIT:
+ r = strv_extend(&arg_user_units, optarg);
+ if (r < 0)
+ return log_oom();
+ break;
+
+ case 'F':
+ arg_action = ACTION_LIST_FIELDS;
+ arg_field = optarg;
+ break;
+
+ case 'N':
+ arg_action = ACTION_LIST_FIELD_NAMES;
+ break;
+
+ case ARG_NO_HOSTNAME:
+ arg_no_hostname = true;
+ break;
+
+ case 'x':
+ arg_catalog = true;
+ break;
+
+ case ARG_LIST_CATALOG:
+ arg_action = ACTION_LIST_CATALOG;
+ break;
+
+ case ARG_DUMP_CATALOG:
+ arg_action = ACTION_DUMP_CATALOG;
+ break;
+
+ case ARG_UPDATE_CATALOG:
+ arg_action = ACTION_UPDATE_CATALOG;
+ break;
+
+ case 'r':
+ arg_reverse = true;
+ break;
+
+ case ARG_UTC:
+ arg_utc = true;
+ break;
+
+ case ARG_FLUSH:
+ arg_action = ACTION_FLUSH;
+ break;
+
+ case ARG_ROTATE:
+ arg_action = arg_action == ACTION_VACUUM ? ACTION_ROTATE_AND_VACUUM : ACTION_ROTATE;
+ break;
+
+ case ARG_SYNC:
+ arg_action = ACTION_SYNC;
+ break;
+
+ case ARG_OUTPUT_FIELDS: {
+ _cleanup_strv_free_ char **v = NULL;
+
+ v = strv_split(optarg, ",");
+ if (!v)
+ return log_oom();
+
+ if (!arg_output_fields)
+ arg_output_fields = TAKE_PTR(v);
+ else {
+ r = strv_extend_strv(&arg_output_fields, v, true);
+ if (r < 0)
+ return log_oom();
+ }
+ break;
+ }
+
+ case '?':
+ return -EINVAL;
+
+ default:
+ assert_not_reached("Unhandled option");
+ }
+
+ if (arg_follow && !arg_no_tail && !arg_since && arg_lines == ARG_LINES_DEFAULT)
+ arg_lines = 10;
+
+ if (!!arg_directory + !!arg_file + !!arg_machine + !!arg_root > 1) {
+ log_error("Please specify at most one of -D/--directory=, --file=, -M/--machine=, --root.");
+ return -EINVAL;
+ }
+
+ if (arg_since_set && arg_until_set && arg_since > arg_until) {
+ log_error("--since= must be before --until=.");
+ return -EINVAL;
+ }
+
+ if (!!arg_cursor + !!arg_after_cursor + !!arg_since_set > 1) {
+ log_error("Please specify only one of --since=, --cursor=, and --after-cursor.");
+ return -EINVAL;
+ }
+
+ if (arg_follow && arg_reverse) {
+ log_error("Please specify either --reverse= or --follow=, not both.");
+ return -EINVAL;
+ }
+
+ if (!IN_SET(arg_action, ACTION_SHOW, ACTION_DUMP_CATALOG, ACTION_LIST_CATALOG) && optind < argc) {
+ log_error("Extraneous arguments starting with '%s'", argv[optind]);
+ return -EINVAL;
+ }
+
+ if ((arg_boot || arg_action == ACTION_LIST_BOOTS) && arg_merge) {
+ log_error("Using --boot or --list-boots with --merge is not supported.");
+ return -EINVAL;
+ }
+
+ if (!strv_isempty(arg_system_units) && arg_journal_type == SD_JOURNAL_CURRENT_USER) {
+ /* Specifying --user and --unit= at the same time makes no sense (as the former excludes the user
+ * journal, but the latter excludes the system journal, thus resulting in empty output). Let's be nice
+ * to users, and automatically turn --unit= into --user-unit= if combined with --user. */
+ r = strv_extend_strv(&arg_user_units, arg_system_units, true);
+ if (r < 0)
+ return r;
+
+ arg_system_units = strv_free(arg_system_units);
+ }
+
+#if HAVE_PCRE2
+ if (arg_pattern) {
+ unsigned flags;
+
+ if (arg_case_sensitive >= 0)
+ flags = !arg_case_sensitive * PCRE2_CASELESS;
+ else {
+ _cleanup_(pcre2_match_data_freep) pcre2_match_data *md = NULL;
+ bool has_case;
+ _cleanup_(pcre2_code_freep) pcre2_code *cs = NULL;
+
+ md = pcre2_match_data_create(1, NULL);
+ if (!md)
+ return log_oom();
+
+ r = pattern_compile("[[:upper:]]", 0, &cs);
+ if (r < 0)
+ return r;
+
+ r = pcre2_match(cs, (PCRE2_SPTR8) arg_pattern, PCRE2_ZERO_TERMINATED, 0, 0, md, NULL);
+ has_case = r >= 0;
+
+ flags = !has_case * PCRE2_CASELESS;
+ }
+
+ log_debug("Doing case %s matching based on %s",
+ flags & PCRE2_CASELESS ? "insensitive" : "sensitive",
+ arg_case_sensitive >= 0 ? "request" : "pattern casing");
+
+ r = pattern_compile(arg_pattern, flags, &arg_compiled_pattern);
+ if (r < 0)
+ return r;
+ }
+#endif
+
+ return 1;
+}
+
+static int add_matches(sd_journal *j, char **args) {
+ char **i;
+ bool have_term = false;
+
+ assert(j);
+
+ STRV_FOREACH(i, args) {
+ int r;
+
+ if (streq(*i, "+")) {
+ if (!have_term)
+ break;
+ r = sd_journal_add_disjunction(j);
+ have_term = false;
+
+ } else if (path_is_absolute(*i)) {
+ _cleanup_free_ char *p = NULL, *t = NULL, *t2 = NULL, *interpreter = NULL;
+ struct stat st;
+
+ r = chase_symlinks(*i, NULL, CHASE_TRAIL_SLASH, &p);
+ if (r < 0)
+ return log_error_errno(r, "Couldn't canonicalize path: %m");
+
+ if (lstat(p, &st) < 0)
+ return log_error_errno(errno, "Couldn't stat file: %m");
+
+ if (S_ISREG(st.st_mode) && (0111 & st.st_mode)) {
+ if (executable_is_script(p, &interpreter) > 0) {
+ _cleanup_free_ char *comm;
+
+ comm = strndup(basename(p), 15);
+ if (!comm)
+ return log_oom();
+
+ t = strappend("_COMM=", comm);
+ if (!t)
+ return log_oom();
+
+ /* Append _EXE only if the interpreter is not a link.
+ Otherwise, it might be outdated often. */
+ if (lstat(interpreter, &st) == 0 && !S_ISLNK(st.st_mode)) {
+ t2 = strappend("_EXE=", interpreter);
+ if (!t2)
+ return log_oom();
+ }
+ } else {
+ t = strappend("_EXE=", p);
+ if (!t)
+ return log_oom();
+ }
+
+ r = sd_journal_add_match(j, t, 0);
+
+ if (r >=0 && t2)
+ r = sd_journal_add_match(j, t2, 0);
+
+ } else if (S_ISCHR(st.st_mode) || S_ISBLK(st.st_mode)) {
+ r = add_matches_for_device(j, p);
+ if (r < 0)
+ return r;
+ } else
+ return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
+ "File is neither a device node, nor regular file, nor executable: %s",
+ *i);
+
+ have_term = true;
+ } else {
+ r = sd_journal_add_match(j, *i, 0);
+ have_term = true;
+ }
+
+ if (r < 0)
+ return log_error_errno(r, "Failed to add match '%s': %m", *i);
+ }
+
+ if (!strv_isempty(args) && !have_term)
+ return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
+ "\"+\" can only be used between terms");
+
+ return 0;
+}
+
+static void boot_id_free_all(BootId *l) {
+
+ while (l) {
+ BootId *i = l;
+ LIST_REMOVE(boot_list, l, i);
+ free(i);
+ }
+}
+
+static int discover_next_boot(sd_journal *j,
+ sd_id128_t previous_boot_id,
+ bool advance_older,
+ BootId **ret) {
+
+ _cleanup_free_ BootId *next_boot = NULL;
+ char match[9+32+1] = "_BOOT_ID=";
+ sd_id128_t boot_id;
+ int r;
+
+ assert(j);
+ assert(ret);
+
+ /* We expect the journal to be on the last position of a boot
+ * (in relation to the direction we are going), so that the next
+ * invocation of sd_journal_next/previous will be from a different
+ * boot. We then collect any information we desire and then jump
+ * to the last location of the new boot by using a _BOOT_ID match
+ * coming from the other journal direction. */
+
+ /* Make sure we aren't restricted by any _BOOT_ID matches, so that
+ * we can actually advance to a *different* boot. */
+ sd_journal_flush_matches(j);
+
+ do {
+ if (advance_older)
+ r = sd_journal_previous(j);
+ else
+ r = sd_journal_next(j);
+ if (r < 0)
+ return r;
+ else if (r == 0)
+ return 0; /* End of journal, yay. */
+
+ r = sd_journal_get_monotonic_usec(j, NULL, &boot_id);
+ if (r < 0)
+ return r;
+
+ /* We iterate through this in a loop, until the boot ID differs from the previous one. Note that
+ * normally, this will only require a single iteration, as we seeked to the last entry of the previous
+ * boot entry already. However, it might happen that the per-journal-field entry arrays are less
+ * complete than the main entry array, and hence might reference an entry that's not actually the last
+ * one of the boot ID as last one. Let's hence use the per-field array is initial seek position to
+ * speed things up, but let's not trust that it is complete, and hence, manually advance as
+ * necessary. */
+
+ } while (sd_id128_equal(boot_id, previous_boot_id));
+
+ next_boot = new0(BootId, 1);
+ if (!next_boot)
+ return -ENOMEM;
+
+ next_boot->id = boot_id;
+
+ r = sd_journal_get_realtime_usec(j, &next_boot->first);
+ if (r < 0)
+ return r;
+
+ /* Now seek to the last occurrence of this boot ID. */
+ sd_id128_to_string(next_boot->id, match + 9);
+ r = sd_journal_add_match(j, match, sizeof(match) - 1);
+ if (r < 0)
+ return r;
+
+ if (advance_older)
+ r = sd_journal_seek_head(j);
+ else
+ r = sd_journal_seek_tail(j);
+ if (r < 0)
+ return r;
+
+ if (advance_older)
+ r = sd_journal_next(j);
+ else
+ r = sd_journal_previous(j);
+ if (r < 0)
+ return r;
+ else if (r == 0)
+ return log_debug_errno(SYNTHETIC_ERRNO(ENODATA),
+ "Whoopsie! We found a boot ID but can't read its last entry."); /* This shouldn't happen. We just came from this very boot ID. */
+
+ r = sd_journal_get_realtime_usec(j, &next_boot->last);
+ if (r < 0)
+ return r;
+
+ *ret = TAKE_PTR(next_boot);
+
+ return 0;
+}
+
+static int get_boots(
+ sd_journal *j,
+ BootId **boots,
+ sd_id128_t *boot_id,
+ int offset) {
+
+ bool skip_once;
+ int r, count = 0;
+ BootId *head = NULL, *tail = NULL, *id;
+ const bool advance_older = boot_id && offset <= 0;
+ sd_id128_t previous_boot_id;
+
+ assert(j);
+
+ /* Adjust for the asymmetry that offset 0 is
+ * the last (and current) boot, while 1 is considered the
+ * (chronological) first boot in the journal. */
+ skip_once = boot_id && sd_id128_is_null(*boot_id) && offset <= 0;
+
+ /* Advance to the earliest/latest occurrence of our reference
+ * boot ID (taking our lookup direction into account), so that
+ * discover_next_boot() can do its job.
+ * If no reference is given, the journal head/tail will do,
+ * they're "virtual" boots after all. */
+ if (boot_id && !sd_id128_is_null(*boot_id)) {
+ char match[9+32+1] = "_BOOT_ID=";
+
+ sd_journal_flush_matches(j);
+
+ sd_id128_to_string(*boot_id, match + 9);
+ r = sd_journal_add_match(j, match, sizeof(match) - 1);
+ if (r < 0)
+ return r;
+
+ if (advance_older)
+ r = sd_journal_seek_head(j); /* seek to oldest */
+ else
+ r = sd_journal_seek_tail(j); /* seek to newest */
+ if (r < 0)
+ return r;
+
+ if (advance_older)
+ r = sd_journal_next(j); /* read the oldest entry */
+ else
+ r = sd_journal_previous(j); /* read the most recently added entry */
+ if (r < 0)
+ return r;
+ else if (r == 0)
+ goto finish;
+ else if (offset == 0) {
+ count = 1;
+ goto finish;
+ }
+
+ /* At this point the read pointer is positioned at the oldest/newest occurence of the reference boot
+ * ID. After flushing the matches, one more invocation of _previous()/_next() will hence place us at
+ * the following entry, which must then have an older/newer boot ID */
+ } else {
+
+ if (advance_older)
+ r = sd_journal_seek_tail(j); /* seek to newest */
+ else
+ r = sd_journal_seek_head(j); /* seek to oldest */
+ if (r < 0)
+ return r;
+
+ /* No sd_journal_next()/_previous() here.
+ *
+ * At this point the read pointer is positioned after the newest/before the oldest entry in the whole
+ * journal. The next invocation of _previous()/_next() will hence position us at the newest/oldest
+ * entry we have. */
+ }
+
+ previous_boot_id = SD_ID128_NULL;
+ for (;;) {
+ _cleanup_free_ BootId *current = NULL;
+
+ r = discover_next_boot(j, previous_boot_id, advance_older, &current);
+ if (r < 0) {
+ boot_id_free_all(head);
+ return r;
+ }
+
+ if (!current)
+ break;
+
+ previous_boot_id = current->id;
+
+ if (boot_id) {
+ if (!skip_once)
+ offset += advance_older ? 1 : -1;
+ skip_once = false;
+
+ if (offset == 0) {
+ count = 1;
+ *boot_id = current->id;
+ break;
+ }
+ } else {
+ LIST_FOREACH(boot_list, id, head) {
+ if (sd_id128_equal(id->id, current->id)) {
+ /* boot id already stored, something wrong with the journal files */
+ /* exiting as otherwise this problem would cause forever loop */
+ goto finish;
+ }
+ }
+ LIST_INSERT_AFTER(boot_list, head, tail, current);
+ tail = TAKE_PTR(current);
+ count++;
+ }
+ }
+
+finish:
+ if (boots)
+ *boots = head;
+
+ sd_journal_flush_matches(j);
+
+ return count;
+}
+
+static int list_boots(sd_journal *j) {
+ int w, i, count;
+ BootId *id, *all_ids;
+
+ assert(j);
+
+ count = get_boots(j, &all_ids, NULL, 0);
+ if (count < 0)
+ return log_error_errno(count, "Failed to determine boots: %m");
+ if (count == 0)
+ return count;
+
+ (void) pager_open(arg_pager_flags);
+
+ /* numbers are one less, but we need an extra char for the sign */
+ w = DECIMAL_STR_WIDTH(count - 1) + 1;
+
+ i = 0;
+ LIST_FOREACH(boot_list, id, all_ids) {
+ char a[FORMAT_TIMESTAMP_MAX], b[FORMAT_TIMESTAMP_MAX];
+
+ printf("% *i " SD_ID128_FORMAT_STR " %s—%s\n",
+ w, i - count + 1,
+ SD_ID128_FORMAT_VAL(id->id),
+ format_timestamp_maybe_utc(a, sizeof(a), id->first),
+ format_timestamp_maybe_utc(b, sizeof(b), id->last));
+ i++;
+ }
+
+ boot_id_free_all(all_ids);
+
+ return 0;
+}
+
+static int add_boot(sd_journal *j) {
+ char match[9+32+1] = "_BOOT_ID=";
+ sd_id128_t boot_id;
+ int r;
+
+ assert(j);
+
+ if (!arg_boot)
+ return 0;
+
+ /* Take a shortcut and use the current boot_id, which we can do very quickly.
+ * We can do this only when we logs are coming from the current machine,
+ * so take the slow path if log location is specified. */
+ if (arg_boot_offset == 0 && sd_id128_is_null(arg_boot_id) &&
+ !arg_directory && !arg_file && !arg_root)
+
+ return add_match_this_boot(j, arg_machine);
+
+ boot_id = arg_boot_id;
+ r = get_boots(j, NULL, &boot_id, arg_boot_offset);
+ assert(r <= 1);
+ if (r <= 0) {
+ const char *reason = (r == 0) ? "No such boot ID in journal" : strerror(-r);
+
+ if (sd_id128_is_null(arg_boot_id))
+ log_error("Data from the specified boot (%+i) is not available: %s",
+ arg_boot_offset, reason);
+ else
+ log_error("Data from the specified boot ("SD_ID128_FORMAT_STR") is not available: %s",
+ SD_ID128_FORMAT_VAL(arg_boot_id), reason);
+
+ return r == 0 ? -ENODATA : r;
+ }
+
+ sd_id128_to_string(boot_id, match + 9);
+
+ r = sd_journal_add_match(j, match, sizeof(match) - 1);
+ if (r < 0)
+ return log_error_errno(r, "Failed to add match: %m");
+
+ r = sd_journal_add_conjunction(j);
+ if (r < 0)
+ return log_error_errno(r, "Failed to add conjunction: %m");
+
+ return 0;
+}
+
+static int add_dmesg(sd_journal *j) {
+ int r;
+ assert(j);
+
+ if (!arg_dmesg)
+ return 0;
+
+ r = sd_journal_add_match(j, "_TRANSPORT=kernel",
+ STRLEN("_TRANSPORT=kernel"));
+ if (r < 0)
+ return log_error_errno(r, "Failed to add match: %m");
+
+ r = sd_journal_add_conjunction(j);
+ if (r < 0)
+ return log_error_errno(r, "Failed to add conjunction: %m");
+
+ return 0;
+}
+
+static int get_possible_units(
+ sd_journal *j,
+ const char *fields,
+ char **patterns,
+ Set **units) {
+
+ _cleanup_set_free_free_ Set *found;
+ const char *field;
+ int r;
+
+ found = set_new(&string_hash_ops);
+ if (!found)
+ return -ENOMEM;
+
+ NULSTR_FOREACH(field, fields) {
+ const void *data;
+ size_t size;
+
+ r = sd_journal_query_unique(j, field);
+ if (r < 0)
+ return r;
+
+ SD_JOURNAL_FOREACH_UNIQUE(j, data, size) {
+ char **pattern, *eq;
+ size_t prefix;
+ _cleanup_free_ char *u = NULL;
+
+ eq = memchr(data, '=', size);
+ if (eq)
+ prefix = eq - (char*) data + 1;
+ else
+ prefix = 0;
+
+ u = strndup((char*) data + prefix, size - prefix);
+ if (!u)
+ return -ENOMEM;
+
+ STRV_FOREACH(pattern, patterns)
+ if (fnmatch(*pattern, u, FNM_NOESCAPE) == 0) {
+ log_debug("Matched %s with pattern %s=%s", u, field, *pattern);
+
+ r = set_consume(found, u);
+ u = NULL;
+ if (r < 0 && r != -EEXIST)
+ return r;
+
+ break;
+ }
+ }
+ }
+
+ *units = TAKE_PTR(found);
+
+ return 0;
+}
+
+/* This list is supposed to return the superset of unit names
+ * possibly matched by rules added with add_matches_for_unit... */
+#define SYSTEM_UNITS \
+ "_SYSTEMD_UNIT\0" \
+ "COREDUMP_UNIT\0" \
+ "UNIT\0" \
+ "OBJECT_SYSTEMD_UNIT\0" \
+ "_SYSTEMD_SLICE\0"
+
+/* ... and add_matches_for_user_unit */
+#define USER_UNITS \
+ "_SYSTEMD_USER_UNIT\0" \
+ "USER_UNIT\0" \
+ "COREDUMP_USER_UNIT\0" \
+ "OBJECT_SYSTEMD_USER_UNIT\0"
+
+static int add_units(sd_journal *j) {
+ _cleanup_strv_free_ char **patterns = NULL;
+ int r, count = 0;
+ char **i;
+
+ assert(j);
+
+ STRV_FOREACH(i, arg_system_units) {
+ _cleanup_free_ char *u = NULL;
+
+ r = unit_name_mangle(*i, UNIT_NAME_MANGLE_GLOB | (arg_quiet ? 0 : UNIT_NAME_MANGLE_WARN), &u);
+ if (r < 0)
+ return r;
+
+ if (string_is_glob(u)) {
+ r = strv_push(&patterns, u);
+ if (r < 0)
+ return r;
+ u = NULL;
+ } else {
+ r = add_matches_for_unit(j, u);
+ if (r < 0)
+ return r;
+ r = sd_journal_add_disjunction(j);
+ if (r < 0)
+ return r;
+ count++;
+ }
+ }
+
+ if (!strv_isempty(patterns)) {
+ _cleanup_set_free_free_ Set *units = NULL;
+ Iterator it;
+ char *u;
+
+ r = get_possible_units(j, SYSTEM_UNITS, patterns, &units);
+ if (r < 0)
+ return r;
+
+ SET_FOREACH(u, units, it) {
+ r = add_matches_for_unit(j, u);
+ if (r < 0)
+ return r;
+ r = sd_journal_add_disjunction(j);
+ if (r < 0)
+ return r;
+ count++;
+ }
+ }
+
+ patterns = strv_free(patterns);
+
+ STRV_FOREACH(i, arg_user_units) {
+ _cleanup_free_ char *u = NULL;
+
+ r = unit_name_mangle(*i, UNIT_NAME_MANGLE_GLOB | (arg_quiet ? 0 : UNIT_NAME_MANGLE_WARN), &u);
+ if (r < 0)
+ return r;
+
+ if (string_is_glob(u)) {
+ r = strv_push(&patterns, u);
+ if (r < 0)
+ return r;
+ u = NULL;
+ } else {
+ r = add_matches_for_user_unit(j, u, getuid());
+ if (r < 0)
+ return r;
+ r = sd_journal_add_disjunction(j);
+ if (r < 0)
+ return r;
+ count++;
+ }
+ }
+
+ if (!strv_isempty(patterns)) {
+ _cleanup_set_free_free_ Set *units = NULL;
+ Iterator it;
+ char *u;
+
+ r = get_possible_units(j, USER_UNITS, patterns, &units);
+ if (r < 0)
+ return r;
+
+ SET_FOREACH(u, units, it) {
+ r = add_matches_for_user_unit(j, u, getuid());
+ if (r < 0)
+ return r;
+ r = sd_journal_add_disjunction(j);
+ if (r < 0)
+ return r;
+ count++;
+ }
+ }
+
+ /* Complain if the user request matches but nothing whatsoever was
+ * found, since otherwise everything would be matched. */
+ if (!(strv_isempty(arg_system_units) && strv_isempty(arg_user_units)) && count == 0)
+ return -ENODATA;
+
+ r = sd_journal_add_conjunction(j);
+ if (r < 0)
+ return r;
+
+ return 0;
+}
+
+static int add_priorities(sd_journal *j) {
+ char match[] = "PRIORITY=0";
+ int i, r;
+ assert(j);
+
+ if (arg_priorities == 0xFF)
+ return 0;
+
+ for (i = LOG_EMERG; i <= LOG_DEBUG; i++)
+ if (arg_priorities & (1 << i)) {
+ match[sizeof(match)-2] = '0' + i;
+
+ r = sd_journal_add_match(j, match, strlen(match));
+ if (r < 0)
+ return log_error_errno(r, "Failed to add match: %m");
+ }
+
+ r = sd_journal_add_conjunction(j);
+ if (r < 0)
+ return log_error_errno(r, "Failed to add conjunction: %m");
+
+ return 0;
+}
+
+static int add_syslog_identifier(sd_journal *j) {
+ int r;
+ char **i;
+
+ assert(j);
+
+ STRV_FOREACH(i, arg_syslog_identifier) {
+ char *u;
+
+ u = strjoina("SYSLOG_IDENTIFIER=", *i);
+ r = sd_journal_add_match(j, u, 0);
+ if (r < 0)
+ return r;
+ r = sd_journal_add_disjunction(j);
+ if (r < 0)
+ return r;
+ }
+
+ r = sd_journal_add_conjunction(j);
+ if (r < 0)
+ return r;
+
+ return 0;
+}
+
+static int setup_keys(void) {
+#if HAVE_GCRYPT
+ size_t mpk_size, seed_size, state_size, i;
+ uint8_t *mpk, *seed, *state;
+ int fd = -1, r;
+ sd_id128_t machine, boot;
+ char *p = NULL, *k = NULL;
+ struct FSSHeader h;
+ uint64_t n;
+ struct stat st;
+
+ r = stat("/var/log/journal", &st);
+ if (r < 0 && !IN_SET(errno, ENOENT, ENOTDIR))
+ return log_error_errno(errno, "stat(\"%s\") failed: %m", "/var/log/journal");
+
+ if (r < 0 || !S_ISDIR(st.st_mode)) {
+ log_error("%s is not a directory, must be using persistent logging for FSS.",
+ "/var/log/journal");
+ return r < 0 ? -errno : -ENOTDIR;
+ }
+
+ r = sd_id128_get_machine(&machine);
+ if (r < 0)
+ return log_error_errno(r, "Failed to get machine ID: %m");
+
+ r = sd_id128_get_boot(&boot);
+ if (r < 0)
+ return log_error_errno(r, "Failed to get boot ID: %m");
+
+ if (asprintf(&p, "/var/log/journal/" SD_ID128_FORMAT_STR "/fss",
+ SD_ID128_FORMAT_VAL(machine)) < 0)
+ return log_oom();
+
+ if (arg_force) {
+ r = unlink(p);
+ if (r < 0 && errno != ENOENT) {
+ r = log_error_errno(errno, "unlink(\"%s\") failed: %m", p);
+ goto finish;
+ }
+ } else if (access(p, F_OK) >= 0) {
+ log_error("Sealing key file %s exists already. Use --force to recreate.", p);
+ r = -EEXIST;
+ goto finish;
+ }
+
+ if (asprintf(&k, "/var/log/journal/" SD_ID128_FORMAT_STR "/fss.tmp.XXXXXX",
+ SD_ID128_FORMAT_VAL(machine)) < 0) {
+ r = log_oom();
+ goto finish;
+ }
+
+ mpk_size = FSPRG_mskinbytes(FSPRG_RECOMMENDED_SECPAR);
+ mpk = alloca(mpk_size);
+
+ seed_size = FSPRG_RECOMMENDED_SEEDLEN;
+ seed = alloca(seed_size);
+
+ state_size = FSPRG_stateinbytes(FSPRG_RECOMMENDED_SECPAR);
+ state = alloca(state_size);
+
+ fd = open("/dev/random", O_RDONLY|O_CLOEXEC|O_NOCTTY);
+ if (fd < 0) {
+ r = log_error_errno(errno, "Failed to open /dev/random: %m");
+ goto finish;
+ }
+
+ log_info("Generating seed...");
+ r = loop_read_exact(fd, seed, seed_size, true);
+ if (r < 0) {
+ log_error_errno(r, "Failed to read random seed: %m");
+ goto finish;
+ }
+
+ log_info("Generating key pair...");
+ FSPRG_GenMK(NULL, mpk, seed, seed_size, FSPRG_RECOMMENDED_SECPAR);
+
+ log_info("Generating sealing key...");
+ FSPRG_GenState0(state, mpk, seed, seed_size);
+
+ assert(arg_interval > 0);
+
+ n = now(CLOCK_REALTIME);
+ n /= arg_interval;
+
+ safe_close(fd);
+ fd = mkostemp_safe(k);
+ if (fd < 0) {
+ r = log_error_errno(fd, "Failed to open %s: %m", k);
+ goto finish;
+ }
+
+ /* Enable secure remove, exclusion from dump, synchronous
+ * writing and in-place updating */
+ r = chattr_fd(fd, FS_SECRM_FL|FS_NODUMP_FL|FS_SYNC_FL|FS_NOCOW_FL, FS_SECRM_FL|FS_NODUMP_FL|FS_SYNC_FL|FS_NOCOW_FL, NULL);
+ if (r < 0)
+ log_warning_errno(r, "Failed to set file attributes: %m");
+
+ zero(h);
+ memcpy(h.signature, "KSHHRHLP", 8);
+ h.machine_id = machine;
+ h.boot_id = boot;
+ h.header_size = htole64(sizeof(h));
+ h.start_usec = htole64(n * arg_interval);
+ h.interval_usec = htole64(arg_interval);
+ h.fsprg_secpar = htole16(FSPRG_RECOMMENDED_SECPAR);
+ h.fsprg_state_size = htole64(state_size);
+
+ r = loop_write(fd, &h, sizeof(h), false);
+ if (r < 0) {
+ log_error_errno(r, "Failed to write header: %m");
+ goto finish;
+ }
+
+ r = loop_write(fd, state, state_size, false);
+ if (r < 0) {
+ log_error_errno(r, "Failed to write state: %m");
+ goto finish;
+ }
+
+ if (link(k, p) < 0) {
+ r = log_error_errno(errno, "Failed to link file: %m");
+ goto finish;
+ }
+
+ if (on_tty()) {
+ fprintf(stderr,
+ "\n"
+ "The new key pair has been generated. The %ssecret sealing key%s has been written to\n"
+ "the following local file. This key file is automatically updated when the\n"
+ "sealing key is advanced. It should not be used on multiple hosts.\n"
+ "\n"
+ "\t%s\n"
+ "\n"
+ "Please write down the following %ssecret verification key%s. It should be stored\n"
+ "at a safe location and should not be saved locally on disk.\n"
+ "\n\t%s",
+ ansi_highlight(), ansi_normal(),
+ p,
+ ansi_highlight(), ansi_normal(),
+ ansi_highlight_red());
+ fflush(stderr);
+ }
+ for (i = 0; i < seed_size; i++) {
+ if (i > 0 && i % 3 == 0)
+ putchar('-');
+ printf("%02x", ((uint8_t*) seed)[i]);
+ }
+
+ printf("/%llx-%llx\n", (unsigned long long) n, (unsigned long long) arg_interval);
+
+ if (on_tty()) {
+ char tsb[FORMAT_TIMESPAN_MAX], *hn;
+
+ fprintf(stderr,
+ "%s\n"
+ "The sealing key is automatically changed every %s.\n",
+ ansi_normal(),
+ format_timespan(tsb, sizeof(tsb), arg_interval, 0));
+
+ hn = gethostname_malloc();
+
+ if (hn) {
+ hostname_cleanup(hn);
+ fprintf(stderr, "\nThe keys have been generated for host %s/" SD_ID128_FORMAT_STR ".\n", hn, SD_ID128_FORMAT_VAL(machine));
+ } else
+ fprintf(stderr, "\nThe keys have been generated for host " SD_ID128_FORMAT_STR ".\n", SD_ID128_FORMAT_VAL(machine));
+
+#if HAVE_QRENCODE
+ /* If this is not an UTF-8 system don't print any QR codes */
+ if (is_locale_utf8()) {
+ fputs("\nTo transfer the verification key to your phone please scan the QR code below:\n\n", stderr);
+ print_qr_code(stderr, seed, seed_size, n, arg_interval, hn, machine);
+ }
+#endif
+ free(hn);
+ }
+
+ r = 0;
+
+finish:
+ safe_close(fd);
+
+ if (k) {
+ unlink(k);
+ free(k);
+ }
+
+ free(p);
+
+ return r;
+#else
+ return log_error_errno(SYNTHETIC_ERRNO(EOPNOTSUPP),
+ "Forward-secure sealing not available.");
+#endif
+}
+
+static int verify(sd_journal *j) {
+ int r = 0;
+ Iterator i;
+ JournalFile *f;
+
+ assert(j);
+
+ log_show_color(true);
+
+ ORDERED_HASHMAP_FOREACH(f, j->files, i) {
+ int k;
+ usec_t first = 0, validated = 0, last = 0;
+
+#if HAVE_GCRYPT
+ if (!arg_verify_key && JOURNAL_HEADER_SEALED(f->header))
+ log_notice("Journal file %s has sealing enabled but verification key has not been passed using --verify-key=.", f->path);
+#endif
+
+ k = journal_file_verify(f, arg_verify_key, &first, &validated, &last, true);
+ if (k == -EINVAL) {
+ /* If the key was invalid give up right-away. */
+ return k;
+ } else if (k < 0) {
+ log_warning_errno(k, "FAIL: %s (%m)", f->path);
+ r = k;
+ } else {
+ char a[FORMAT_TIMESTAMP_MAX], b[FORMAT_TIMESTAMP_MAX], c[FORMAT_TIMESPAN_MAX];
+ log_info("PASS: %s", f->path);
+
+ if (arg_verify_key && JOURNAL_HEADER_SEALED(f->header)) {
+ if (validated > 0) {
+ log_info("=> Validated from %s to %s, final %s entries not sealed.",
+ format_timestamp_maybe_utc(a, sizeof(a), first),
+ format_timestamp_maybe_utc(b, sizeof(b), validated),
+ format_timespan(c, sizeof(c), last > validated ? last - validated : 0, 0));
+ } else if (last > 0)
+ log_info("=> No sealing yet, %s of entries not sealed.",
+ format_timespan(c, sizeof(c), last - first, 0));
+ else
+ log_info("=> No sealing yet, no entries in file.");
+ }
+ }
+ }
+
+ return r;
+}
+
+static int flush_to_var(void) {
+ _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
+ _cleanup_(sd_bus_flush_close_unrefp) sd_bus *bus = NULL;
+ _cleanup_close_ int watch_fd = -1;
+ int r;
+
+ if (arg_machine) {
+ log_error("--flush is not supported in conjunction with --machine=.");
+ return -EOPNOTSUPP;
+ }
+
+ /* Quick exit */
+ if (access("/run/systemd/journal/flushed", F_OK) >= 0)
+ return 0;
+
+ /* OK, let's actually do the full logic, send SIGUSR1 to the
+ * daemon and set up inotify to wait for the flushed file to appear */
+ r = bus_connect_system_systemd(&bus);
+ if (r < 0)
+ return log_error_errno(r, "Failed to get D-Bus connection: %m");
+
+ r = sd_bus_call_method(
+ bus,
+ "org.freedesktop.systemd1",
+ "/org/freedesktop/systemd1",
+ "org.freedesktop.systemd1.Manager",
+ "KillUnit",
+ &error,
+ NULL,
+ "ssi", "systemd-journald.service", "main", SIGUSR1);
+ if (r < 0)
+ return log_error_errno(r, "Failed to kill journal service: %s", bus_error_message(&error, r));
+
+ mkdir_p("/run/systemd/journal", 0755);
+
+ watch_fd = inotify_init1(IN_NONBLOCK|IN_CLOEXEC);
+ if (watch_fd < 0)
+ return log_error_errno(errno, "Failed to create inotify watch: %m");
+
+ r = inotify_add_watch(watch_fd, "/run/systemd/journal", IN_CREATE|IN_DONT_FOLLOW|IN_ONLYDIR);
+ if (r < 0)
+ return log_error_errno(errno, "Failed to watch journal directory: %m");
+
+ for (;;) {
+ if (access("/run/systemd/journal/flushed", F_OK) >= 0)
+ break;
+
+ if (errno != ENOENT)
+ return log_error_errno(errno, "Failed to check for existence of /run/systemd/journal/flushed: %m");
+
+ r = fd_wait_for_event(watch_fd, POLLIN, USEC_INFINITY);
+ if (r < 0)
+ return log_error_errno(r, "Failed to wait for event: %m");
+
+ r = flush_fd(watch_fd);
+ if (r < 0)
+ return log_error_errno(r, "Failed to flush inotify events: %m");
+ }
+
+ return 0;
+}
+
+static int send_signal_and_wait(int sig, const char *watch_path) {
+ _cleanup_(sd_bus_flush_close_unrefp) sd_bus *bus = NULL;
+ _cleanup_close_ int watch_fd = -1;
+ usec_t start;
+ int r;
+
+ if (arg_machine) {
+ log_error("--sync and --rotate are not supported in conjunction with --machine=.");
+ return -EOPNOTSUPP;
+ }
+
+ start = now(CLOCK_MONOTONIC);
+
+ /* This call sends the specified signal to journald, and waits
+ * for acknowledgment by watching the mtime of the specified
+ * flag file. This is used to trigger syncing or rotation and
+ * then wait for the operation to complete. */
+
+ for (;;) {
+ usec_t tstamp;
+
+ /* See if a sync happened by now. */
+ r = read_timestamp_file(watch_path, &tstamp);
+ if (r < 0 && r != -ENOENT)
+ return log_error_errno(r, "Failed to read %s: %m", watch_path);
+ if (r >= 0 && tstamp >= start)
+ return 0;
+
+ /* Let's ask for a sync, but only once. */
+ if (!bus) {
+ _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
+
+ r = bus_connect_system_systemd(&bus);
+ if (r < 0)
+ return log_error_errno(r, "Failed to get D-Bus connection: %m");
+
+ r = sd_bus_call_method(
+ bus,
+ "org.freedesktop.systemd1",
+ "/org/freedesktop/systemd1",
+ "org.freedesktop.systemd1.Manager",
+ "KillUnit",
+ &error,
+ NULL,
+ "ssi", "systemd-journald.service", "main", sig);
+ if (r < 0)
+ return log_error_errno(r, "Failed to kill journal service: %s", bus_error_message(&error, r));
+
+ continue;
+ }
+
+ /* Let's install the inotify watch, if we didn't do that yet. */
+ if (watch_fd < 0) {
+
+ mkdir_p("/run/systemd/journal", 0755);
+
+ watch_fd = inotify_init1(IN_NONBLOCK|IN_CLOEXEC);
+ if (watch_fd < 0)
+ return log_error_errno(errno, "Failed to create inotify watch: %m");
+
+ r = inotify_add_watch(watch_fd, "/run/systemd/journal", IN_MOVED_TO|IN_DONT_FOLLOW|IN_ONLYDIR);
+ if (r < 0)
+ return log_error_errno(errno, "Failed to watch journal directory: %m");
+
+ /* Recheck the flag file immediately, so that we don't miss any event since the last check. */
+ continue;
+ }
+
+ /* OK, all preparatory steps done, let's wait until
+ * inotify reports an event. */
+
+ r = fd_wait_for_event(watch_fd, POLLIN, USEC_INFINITY);
+ if (r < 0)
+ return log_error_errno(r, "Failed to wait for event: %m");
+
+ r = flush_fd(watch_fd);
+ if (r < 0)
+ return log_error_errno(r, "Failed to flush inotify events: %m");
+ }
+
+ return 0;
+}
+
+static int rotate(void) {
+ return send_signal_and_wait(SIGUSR2, "/run/systemd/journal/rotated");
+}
+
+static int sync_journal(void) {
+ return send_signal_and_wait(SIGRTMIN+1, "/run/systemd/journal/synced");
+}
+
+static int wait_for_change(sd_journal *j, int poll_fd) {
+ struct pollfd pollfds[] = {
+ { .fd = poll_fd, .events = POLLIN },
+ { .fd = STDOUT_FILENO },
+ };
+
+ struct timespec ts;
+ usec_t timeout;
+ int r;
+
+ assert(j);
+ assert(poll_fd >= 0);
+
+ /* Much like sd_journal_wait() but also keeps an eye on STDOUT, and exits as soon as we see a POLLHUP on that,
+ * i.e. when it is closed. */
+
+ r = sd_journal_get_timeout(j, &timeout);
+ if (r < 0)
+ return log_error_errno(r, "Failed to determine journal waiting time: %m");
+
+ if (ppoll(pollfds, ELEMENTSOF(pollfds),
+ timeout == USEC_INFINITY ? NULL : timespec_store(&ts, timeout), NULL) < 0) {
+ if (errno == EINTR)
+ return 0;
+
+ return log_error_errno(errno, "Couldn't wait for journal event: %m");
+ }
+
+ if (pollfds[1].revents & (POLLHUP|POLLERR)) /* STDOUT has been closed? */
+ return log_debug_errno(SYNTHETIC_ERRNO(ECANCELED),
+ "Standard output has been closed.");
+
+ r = sd_journal_process(j);
+ if (r < 0)
+ return log_error_errno(r, "Failed to process journal events: %m");
+
+ return 0;
+}
+
+int main(int argc, char *argv[]) {
+ bool previous_boot_id_valid = false, first_line = true, ellipsized = false, need_seek = false;
+ _cleanup_(sd_journal_closep) sd_journal *j = NULL;
+ sd_id128_t previous_boot_id;
+ int n_shown = 0, r, poll_fd = -1;
+
+ setlocale(LC_ALL, "");
+ log_parse_environment();
+ log_open();
+
+ /* Increase max number of open files if we can, we might needs this when browsing journal files, which might be
+ * split up into many files. */
+ (void) rlimit_nofile_bump(HIGH_RLIMIT_NOFILE);
+
+ r = parse_argv(argc, argv);
+ if (r <= 0)
+ goto finish;
+
+ signal(SIGWINCH, columns_lines_cache_reset);
+ sigbus_install();
+
+ switch (arg_action) {
+
+ case ACTION_NEW_ID128:
+ r = id128_print_new(true);
+ goto finish;
+
+ case ACTION_SETUP_KEYS:
+ r = setup_keys();
+ goto finish;
+
+ case ACTION_LIST_CATALOG:
+ case ACTION_DUMP_CATALOG:
+ case ACTION_UPDATE_CATALOG: {
+ _cleanup_free_ char *database;
+
+ database = path_join(arg_root, CATALOG_DATABASE);
+ if (!database) {
+ r = log_oom();
+ goto finish;
+ }
+
+ if (arg_action == ACTION_UPDATE_CATALOG) {
+ r = catalog_update(database, arg_root, catalog_file_dirs);
+ if (r < 0)
+ log_error_errno(r, "Failed to list catalog: %m");
+ } else {
+ bool oneline = arg_action == ACTION_LIST_CATALOG;
+
+ (void) pager_open(arg_pager_flags);
+
+ if (optind < argc)
+ r = catalog_list_items(stdout, database, oneline, argv + optind);
+ else
+ r = catalog_list(stdout, database, oneline);
+ if (r < 0)
+ log_error_errno(r, "Failed to list catalog: %m");
+ }
+
+ goto finish;
+ }
+
+ case ACTION_FLUSH:
+ r = flush_to_var();
+ goto finish;
+
+ case ACTION_SYNC:
+ r = sync_journal();
+ goto finish;
+
+ case ACTION_ROTATE:
+ r = rotate();
+ goto finish;
+
+ case ACTION_SHOW:
+ case ACTION_PRINT_HEADER:
+ case ACTION_VERIFY:
+ case ACTION_DISK_USAGE:
+ case ACTION_LIST_BOOTS:
+ case ACTION_VACUUM:
+ case ACTION_ROTATE_AND_VACUUM:
+ case ACTION_LIST_FIELDS:
+ case ACTION_LIST_FIELD_NAMES:
+ /* These ones require access to the journal files, continue below. */
+ break;
+
+ default:
+ assert_not_reached("Unknown action");
+ }
+
+ if (arg_directory)
+ r = sd_journal_open_directory(&j, arg_directory, arg_journal_type);
+ else if (arg_root)
+ r = sd_journal_open_directory(&j, arg_root, arg_journal_type | SD_JOURNAL_OS_ROOT);
+ else if (arg_file_stdin) {
+ int ifd = STDIN_FILENO;
+ r = sd_journal_open_files_fd(&j, &ifd, 1, 0);
+ } else if (arg_file)
+ r = sd_journal_open_files(&j, (const char**) arg_file, 0);
+ else if (arg_machine) {
+ _cleanup_(sd_bus_error_free) sd_bus_error error = SD_BUS_ERROR_NULL;
+ _cleanup_(sd_bus_message_unrefp) sd_bus_message *reply = NULL;
+ _cleanup_(sd_bus_flush_close_unrefp) sd_bus *bus = NULL;
+ int fd;
+
+ if (geteuid() != 0) {
+ /* The file descriptor returned by OpenMachineRootDirectory() will be owned by users/groups of
+ * the container, thus we need root privileges to override them. */
+ log_error("Using the --machine= switch requires root privileges.");
+ r = -EPERM;
+ goto finish;
+ }
+
+ r = sd_bus_open_system(&bus);
+ if (r < 0) {
+ log_error_errno(r, "Failed to open system bus: %m");
+ goto finish;
+ }
+
+ r = sd_bus_call_method(
+ bus,
+ "org.freedesktop.machine1",
+ "/org/freedesktop/machine1",
+ "org.freedesktop.machine1.Manager",
+ "OpenMachineRootDirectory",
+ &error,
+ &reply,
+ "s", arg_machine);
+ if (r < 0) {
+ log_error_errno(r, "Failed to open root directory: %s", bus_error_message(&error, r));
+ goto finish;
+ }
+
+ r = sd_bus_message_read(reply, "h", &fd);
+ if (r < 0) {
+ bus_log_parse_error(r);
+ goto finish;
+ }
+
+ fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
+ if (fd < 0) {
+ r = log_error_errno(errno, "Failed to duplicate file descriptor: %m");
+ goto finish;
+ }
+
+ r = sd_journal_open_directory_fd(&j, fd, SD_JOURNAL_OS_ROOT);
+ if (r < 0)
+ safe_close(fd);
+ } else
+ r = sd_journal_open(&j, !arg_merge*SD_JOURNAL_LOCAL_ONLY + arg_journal_type);
+ if (r < 0) {
+ log_error_errno(r, "Failed to open %s: %m", arg_directory ?: arg_file ? "files" : "journal");
+ goto finish;
+ }
+
+ r = journal_access_check_and_warn(j, arg_quiet,
+ !(arg_journal_type == SD_JOURNAL_CURRENT_USER || arg_user_units));
+ if (r < 0)
+ goto finish;
+
+ switch (arg_action) {
+
+ case ACTION_NEW_ID128:
+ case ACTION_SETUP_KEYS:
+ case ACTION_LIST_CATALOG:
+ case ACTION_DUMP_CATALOG:
+ case ACTION_UPDATE_CATALOG:
+ case ACTION_FLUSH:
+ case ACTION_SYNC:
+ case ACTION_ROTATE:
+ assert_not_reached("Unexpected action.");
+
+ case ACTION_PRINT_HEADER:
+ journal_print_header(j);
+ r = 0;
+ goto finish;
+
+ case ACTION_VERIFY:
+ r = verify(j);
+ goto finish;
+
+ case ACTION_DISK_USAGE: {
+ uint64_t bytes = 0;
+ char sbytes[FORMAT_BYTES_MAX];
+
+ r = sd_journal_get_usage(j, &bytes);
+ if (r < 0)
+ goto finish;
+
+ printf("Archived and active journals take up %s in the file system.\n",
+ format_bytes(sbytes, sizeof(sbytes), bytes));
+ goto finish;
+ }
+
+ case ACTION_LIST_BOOTS:
+ r = list_boots(j);
+ goto finish;
+
+ case ACTION_ROTATE_AND_VACUUM:
+
+ r = rotate();
+ if (r < 0)
+ goto finish;
+
+ _fallthrough_;
+
+ case ACTION_VACUUM: {
+ Directory *d;
+ Iterator i;
+
+ HASHMAP_FOREACH(d, j->directories_by_path, i) {
+ int q;
+
+ if (d->is_root)
+ continue;
+
+ q = journal_directory_vacuum(d->path, arg_vacuum_size, arg_vacuum_n_files, arg_vacuum_time, NULL, !arg_quiet);
+ if (q < 0) {
+ log_error_errno(q, "Failed to vacuum %s: %m", d->path);
+ r = q;
+ }
+ }
+
+ goto finish;
+ }
+
+ case ACTION_LIST_FIELD_NAMES: {
+ const char *field;
+
+ SD_JOURNAL_FOREACH_FIELD(j, field) {
+ printf("%s\n", field);
+ n_shown++;
+ }
+
+ r = 0;
+ goto finish;
+ }
+
+ case ACTION_SHOW:
+ case ACTION_LIST_FIELDS:
+ break;
+
+ default:
+ assert_not_reached("Unknown action");
+ }
+
+ if (arg_boot_offset != 0 &&
+ sd_journal_has_runtime_files(j) > 0 &&
+ sd_journal_has_persistent_files(j) == 0) {
+ log_info("Specifying boot ID or boot offset has no effect, no persistent journal was found.");
+ r = 0;
+ goto finish;
+ }
+ /* add_boot() must be called first!
+ * It may need to seek the journal to find parent boot IDs. */
+ r = add_boot(j);
+ if (r < 0)
+ goto finish;
+
+ r = add_dmesg(j);
+ if (r < 0)
+ goto finish;
+
+ r = add_units(j);
+ if (r < 0) {
+ log_error_errno(r, "Failed to add filter for units: %m");
+ goto finish;
+ }
+
+ r = add_syslog_identifier(j);
+ if (r < 0) {
+ log_error_errno(r, "Failed to add filter for syslog identifiers: %m");
+ goto finish;
+ }
+
+ r = add_priorities(j);
+ if (r < 0)
+ goto finish;
+
+ r = add_matches(j, argv + optind);
+ if (r < 0)
+ goto finish;
+
+ if (DEBUG_LOGGING) {
+ _cleanup_free_ char *filter;
+
+ filter = journal_make_match_string(j);
+ if (!filter)
+ return log_oom();
+
+ log_debug("Journal filter: %s", filter);
+ }
+
+ if (arg_action == ACTION_LIST_FIELDS) {
+ const void *data;
+ size_t size;
+
+ assert(arg_field);
+
+ r = sd_journal_set_data_threshold(j, 0);
+ if (r < 0) {
+ log_error_errno(r, "Failed to unset data size threshold: %m");
+ goto finish;
+ }
+
+ r = sd_journal_query_unique(j, arg_field);
+ if (r < 0) {
+ log_error_errno(r, "Failed to query unique data objects: %m");
+ goto finish;
+ }
+
+ SD_JOURNAL_FOREACH_UNIQUE(j, data, size) {
+ const void *eq;
+
+ if (arg_lines >= 0 && n_shown >= arg_lines)
+ break;
+
+ eq = memchr(data, '=', size);
+ if (eq)
+ printf("%.*s\n", (int) (size - ((const uint8_t*) eq - (const uint8_t*) data + 1)), (const char*) eq + 1);
+ else
+ printf("%.*s\n", (int) size, (const char*) data);
+
+ n_shown++;
+ }
+
+ r = 0;
+ goto finish;
+ }
+
+ /* Opening the fd now means the first sd_journal_wait() will actually wait */
+ if (arg_follow) {
+ poll_fd = sd_journal_get_fd(j);
+ if (poll_fd == -EMFILE) {
+ log_warning_errno(poll_fd, "Insufficent watch descriptors available. Reverting to -n.");
+ arg_follow = false;
+ } else if (poll_fd == -EMEDIUMTYPE) {
+ log_error_errno(poll_fd, "The --follow switch is not supported in conjunction with reading from STDIN.");
+ goto finish;
+ } else if (poll_fd < 0) {
+ log_error_errno(poll_fd, "Failed to get journal fd: %m");
+ goto finish;
+ }
+ }
+
+ if (arg_cursor || arg_after_cursor) {
+ r = sd_journal_seek_cursor(j, arg_cursor ?: arg_after_cursor);
+ if (r < 0) {
+ log_error_errno(r, "Failed to seek to cursor: %m");
+ goto finish;
+ }
+
+ if (!arg_reverse)
+ r = sd_journal_next_skip(j, 1 + !!arg_after_cursor);
+ else
+ r = sd_journal_previous_skip(j, 1 + !!arg_after_cursor);
+
+ if (arg_after_cursor && r < 2) {
+ /* We couldn't find the next entry after the cursor. */
+ if (arg_follow)
+ need_seek = true;
+ else
+ arg_lines = 0;
+ }
+
+ } else if (arg_since_set && !arg_reverse) {
+ r = sd_journal_seek_realtime_usec(j, arg_since);
+ if (r < 0) {
+ log_error_errno(r, "Failed to seek to date: %m");
+ goto finish;
+ }
+ r = sd_journal_next(j);
+
+ } else if (arg_until_set && arg_reverse) {
+ r = sd_journal_seek_realtime_usec(j, arg_until);
+ if (r < 0) {
+ log_error_errno(r, "Failed to seek to date: %m");
+ goto finish;
+ }
+ r = sd_journal_previous(j);
+
+ } else if (arg_lines >= 0) {
+ r = sd_journal_seek_tail(j);
+ if (r < 0) {
+ log_error_errno(r, "Failed to seek to tail: %m");
+ goto finish;
+ }
+
+ r = sd_journal_previous_skip(j, arg_lines);
+
+ } else if (arg_reverse) {
+ r = sd_journal_seek_tail(j);
+ if (r < 0) {
+ log_error_errno(r, "Failed to seek to tail: %m");
+ goto finish;
+ }
+
+ r = sd_journal_previous(j);
+
+ } else {
+ r = sd_journal_seek_head(j);
+ if (r < 0) {
+ log_error_errno(r, "Failed to seek to head: %m");
+ goto finish;
+ }
+
+ r = sd_journal_next(j);
+ }
+
+ if (r < 0) {
+ log_error_errno(r, "Failed to iterate through journal: %m");
+ goto finish;
+ }
+ if (r == 0)
+ need_seek = true;
+
+ if (!arg_follow)
+ (void) pager_open(arg_pager_flags);
+
+ if (!arg_quiet && (arg_lines != 0 || arg_follow)) {
+ usec_t start, end;
+ char start_buf[FORMAT_TIMESTAMP_MAX], end_buf[FORMAT_TIMESTAMP_MAX];
+
+ r = sd_journal_get_cutoff_realtime_usec(j, &start, &end);
+ if (r < 0) {
+ log_error_errno(r, "Failed to get cutoff: %m");
+ goto finish;
+ }
+
+ if (r > 0) {
+ if (arg_follow)
+ printf("-- Logs begin at %s. --\n",
+ format_timestamp_maybe_utc(start_buf, sizeof(start_buf), start));
+ else
+ printf("-- Logs begin at %s, end at %s. --\n",
+ format_timestamp_maybe_utc(start_buf, sizeof(start_buf), start),
+ format_timestamp_maybe_utc(end_buf, sizeof(end_buf), end));
+ }
+ }
+
+ for (;;) {
+ while (arg_lines < 0 || n_shown < arg_lines || (arg_follow && !first_line)) {
+ int flags;
+ size_t highlight[2] = {};
+
+ if (need_seek) {
+ if (!arg_reverse)
+ r = sd_journal_next(j);
+ else
+ r = sd_journal_previous(j);
+ if (r < 0) {
+ log_error_errno(r, "Failed to iterate through journal: %m");
+ goto finish;
+ }
+ if (r == 0)
+ break;
+ }
+
+ if (arg_until_set && !arg_reverse) {
+ usec_t usec;
+
+ r = sd_journal_get_realtime_usec(j, &usec);
+ if (r < 0) {
+ log_error_errno(r, "Failed to determine timestamp: %m");
+ goto finish;
+ }
+ if (usec > arg_until)
+ goto finish;
+ }
+
+ if (arg_since_set && arg_reverse) {
+ usec_t usec;
+
+ r = sd_journal_get_realtime_usec(j, &usec);
+ if (r < 0) {
+ log_error_errno(r, "Failed to determine timestamp: %m");
+ goto finish;
+ }
+ if (usec < arg_since)
+ goto finish;
+ }
+
+ if (!arg_merge && !arg_quiet) {
+ sd_id128_t boot_id;
+
+ r = sd_journal_get_monotonic_usec(j, NULL, &boot_id);
+ if (r >= 0) {
+ if (previous_boot_id_valid &&
+ !sd_id128_equal(boot_id, previous_boot_id))
+ printf("%s-- Reboot --%s\n",
+ ansi_highlight(), ansi_normal());
+
+ previous_boot_id = boot_id;
+ previous_boot_id_valid = true;
+ }
+ }
+
+#if HAVE_PCRE2
+ if (arg_compiled_pattern) {
+ _cleanup_(pcre2_match_data_freep) pcre2_match_data *md = NULL;
+ const void *message;
+ size_t len;
+ PCRE2_SIZE *ovec;
+
+ md = pcre2_match_data_create(1, NULL);
+ if (!md)
+ return log_oom();
+
+ r = sd_journal_get_data(j, "MESSAGE", &message, &len);
+ if (r < 0) {
+ if (r == -ENOENT) {
+ need_seek = true;
+ continue;
+ }
+
+ log_error_errno(r, "Failed to get MESSAGE field: %m");
+ goto finish;
+ }
+
+ assert_se(message = startswith(message, "MESSAGE="));
+
+ r = pcre2_match(arg_compiled_pattern,
+ message,
+ len - strlen("MESSAGE="),
+ 0, /* start at offset 0 in the subject */
+ 0, /* default options */
+ md,
+ NULL);
+ if (r == PCRE2_ERROR_NOMATCH) {
+ need_seek = true;
+ continue;
+ }
+ if (r < 0) {
+ unsigned char buf[LINE_MAX];
+ int r2;
+
+ r2 = pcre2_get_error_message(r, buf, sizeof buf);
+ log_error("Pattern matching failed: %s",
+ r2 < 0 ? "unknown error" : (char*) buf);
+ r = -EINVAL;
+ goto finish;
+ }
+
+ ovec = pcre2_get_ovector_pointer(md);
+ highlight[0] = ovec[0];
+ highlight[1] = ovec[1];
+ }
+#endif
+
+ flags =
+ arg_all * OUTPUT_SHOW_ALL |
+ arg_full * OUTPUT_FULL_WIDTH |
+ colors_enabled() * OUTPUT_COLOR |
+ arg_catalog * OUTPUT_CATALOG |
+ arg_utc * OUTPUT_UTC |
+ arg_no_hostname * OUTPUT_NO_HOSTNAME;
+
+ r = show_journal_entry(stdout, j, arg_output, 0, flags,
+ arg_output_fields, highlight, &ellipsized);
+ need_seek = true;
+ if (r == -EADDRNOTAVAIL)
+ break;
+ else if (r < 0)
+ goto finish;
+
+ n_shown++;
+
+ /* If journalctl take a long time to process messages, and during that time journal file
+ * rotation occurs, a journalctl client will keep those rotated files open until it calls
+ * sd_journal_process(), which typically happens as a result of calling sd_journal_wait() below
+ * in the "following" case. By periodically calling sd_journal_process() during the processing
+ * loop we shrink the window of time a client instance has open file descriptors for rotated
+ * (deleted) journal files. */
+ if ((n_shown % PROCESS_INOTIFY_INTERVAL) == 0) {
+ r = sd_journal_process(j);
+ if (r < 0) {
+ log_error_errno(r, "Failed to process inotify events: %m");
+ goto finish;
+ }
+ }
+ }
+
+ if (!arg_follow) {
+ if (n_shown == 0 && !arg_quiet)
+ printf("-- No entries --\n");
+
+ if (arg_show_cursor) {
+ _cleanup_free_ char *cursor = NULL;
+
+ r = sd_journal_get_cursor(j, &cursor);
+ if (r < 0 && r != -EADDRNOTAVAIL)
+ log_error_errno(r, "Failed to get cursor: %m");
+ else if (r >= 0)
+ printf("-- cursor: %s\n", cursor);
+ }
+
+ break;
+ }
+
+ fflush(stdout);
+
+ r = wait_for_change(j, poll_fd);
+ if (r < 0)
+ goto finish;
+
+ first_line = false;
+ }
+
+finish:
+ fflush(stdout);
+ pager_close();
+
+ strv_free(arg_file);
+
+ strv_free(arg_syslog_identifier);
+ strv_free(arg_system_units);
+ strv_free(arg_user_units);
+ strv_free(arg_output_fields);
+
+ free(arg_root);
+ free(arg_verify_key);
+
+#if HAVE_PCRE2
+ if (arg_compiled_pattern)
+ pcre2_code_free(arg_compiled_pattern);
+#endif
+
+ return r < 0 ? EXIT_FAILURE : EXIT_SUCCESS;
+}
diff --git a/src/journal/journald-audit.c b/src/journal/journald-audit.c
new file mode 100644
index 0000000..accbad4
--- /dev/null
+++ b/src/journal/journald-audit.c
@@ -0,0 +1,545 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+
+#include "alloc-util.h"
+#include "audit-type.h"
+#include "fd-util.h"
+#include "hexdecoct.h"
+#include "io-util.h"
+#include "journald-audit.h"
+#include "missing.h"
+#include "string-util.h"
+
+typedef struct MapField {
+ const char *audit_field;
+ const char *journal_field;
+ int (*map)(const char *field, const char **p, struct iovec **iov, size_t *n_iov_allocated, size_t *n_iov);
+} MapField;
+
+static int map_simple_field(const char *field, const char **p, struct iovec **iov, size_t *n_iov_allocated, size_t *n_iov) {
+ _cleanup_free_ char *c = NULL;
+ size_t l = 0, allocated = 0;
+ const char *e;
+
+ assert(field);
+ assert(p);
+ assert(iov);
+ assert(n_iov);
+
+ l = strlen(field);
+ allocated = l + 1;
+ c = malloc(allocated);
+ if (!c)
+ return -ENOMEM;
+
+ memcpy(c, field, l);
+ for (e = *p; !IN_SET(*e, 0, ' '); e++) {
+ if (!GREEDY_REALLOC(c, allocated, l+2))
+ return -ENOMEM;
+
+ c[l++] = *e;
+ }
+
+ c[l] = 0;
+
+ if (!GREEDY_REALLOC(*iov, *n_iov_allocated, *n_iov + 1))
+ return -ENOMEM;
+
+ (*iov)[(*n_iov)++] = IOVEC_MAKE(c, l);
+
+ *p = e;
+ c = NULL;
+
+ return 1;
+}
+
+static int map_string_field_internal(const char *field, const char **p, struct iovec **iov, size_t *n_iov_allocated, size_t *n_iov, bool filter_printable) {
+ _cleanup_free_ char *c = NULL;
+ const char *s, *e;
+ size_t l;
+
+ assert(field);
+ assert(p);
+ assert(iov);
+ assert(n_iov);
+
+ /* The kernel formats string fields in one of two formats. */
+
+ if (**p == '"') {
+ /* Normal quoted syntax */
+ s = *p + 1;
+ e = strchr(s, '"');
+ if (!e)
+ return 0;
+
+ l = strlen(field) + (e - s);
+ c = malloc(l+1);
+ if (!c)
+ return -ENOMEM;
+
+ *((char*) mempcpy(stpcpy(c, field), s, e - s)) = 0;
+
+ e += 1;
+
+ } else if (unhexchar(**p) >= 0) {
+ /* Hexadecimal escaping */
+ size_t allocated = 0;
+
+ l = strlen(field);
+ allocated = l + 2;
+ c = malloc(allocated);
+ if (!c)
+ return -ENOMEM;
+
+ memcpy(c, field, l);
+ for (e = *p; !IN_SET(*e, 0, ' '); e += 2) {
+ int a, b;
+ uint8_t x;
+
+ a = unhexchar(e[0]);
+ if (a < 0)
+ return 0;
+
+ b = unhexchar(e[1]);
+ if (b < 0)
+ return 0;
+
+ x = ((uint8_t) a << 4 | (uint8_t) b);
+
+ if (filter_printable && x < (uint8_t) ' ')
+ x = (uint8_t) ' ';
+
+ if (!GREEDY_REALLOC(c, allocated, l+2))
+ return -ENOMEM;
+
+ c[l++] = (char) x;
+ }
+
+ c[l] = 0;
+ } else
+ return 0;
+
+ if (!GREEDY_REALLOC(*iov, *n_iov_allocated, *n_iov + 1))
+ return -ENOMEM;
+
+ (*iov)[(*n_iov)++] = IOVEC_MAKE(c, l);
+
+ *p = e;
+ c = NULL;
+
+ return 1;
+}
+
+static int map_string_field(const char *field, const char **p, struct iovec **iov, size_t *n_iov_allocated, size_t *n_iov) {
+ return map_string_field_internal(field, p, iov, n_iov_allocated, n_iov, false);
+}
+
+static int map_string_field_printable(const char *field, const char **p, struct iovec **iov, size_t *n_iov_allocated, size_t *n_iov) {
+ return map_string_field_internal(field, p, iov, n_iov_allocated, n_iov, true);
+}
+
+static int map_generic_field(const char *prefix, const char **p, struct iovec **iov, size_t *n_iov_allocated, size_t *n_iov) {
+ const char *e, *f;
+ char *c, *t;
+ int r;
+
+ /* Implements fallback mappings for all fields we don't know */
+
+ for (e = *p; e < *p + 16; e++) {
+
+ if (IN_SET(*e, 0, ' '))
+ return 0;
+
+ if (*e == '=')
+ break;
+
+ if (!((*e >= 'a' && *e <= 'z') ||
+ (*e >= 'A' && *e <= 'Z') ||
+ (*e >= '0' && *e <= '9') ||
+ IN_SET(*e, '_', '-')))
+ return 0;
+ }
+
+ if (e <= *p || e >= *p + 16)
+ return 0;
+
+ c = newa(char, strlen(prefix) + (e - *p) + 2);
+
+ t = stpcpy(c, prefix);
+ for (f = *p; f < e; f++) {
+ char x;
+
+ if (*f >= 'a' && *f <= 'z')
+ x = (*f - 'a') + 'A'; /* uppercase */
+ else if (*f == '-')
+ x = '_'; /* dashes → underscores */
+ else
+ x = *f;
+
+ *(t++) = x;
+ }
+ strcpy(t, "=");
+
+ e++;
+
+ r = map_simple_field(c, &e, iov, n_iov_allocated, n_iov);
+ if (r < 0)
+ return r;
+
+ *p = e;
+ return r;
+}
+
+/* Kernel fields are those occurring in the audit string before
+ * msg='. All of these fields are trusted, hence carry the "_" prefix.
+ * We try to translate the fields we know into our native names. The
+ * other's are generically mapped to _AUDIT_FIELD_XYZ= */
+static const MapField map_fields_kernel[] = {
+
+ /* First, we map certain well-known audit fields into native
+ * well-known fields */
+ { "pid=", "_PID=", map_simple_field },
+ { "ppid=", "_PPID=", map_simple_field },
+ { "uid=", "_UID=", map_simple_field },
+ { "euid=", "_EUID=", map_simple_field },
+ { "fsuid=", "_FSUID=", map_simple_field },
+ { "gid=", "_GID=", map_simple_field },
+ { "egid=", "_EGID=", map_simple_field },
+ { "fsgid=", "_FSGID=", map_simple_field },
+ { "tty=", "_TTY=", map_simple_field },
+ { "ses=", "_AUDIT_SESSION=", map_simple_field },
+ { "auid=", "_AUDIT_LOGINUID=", map_simple_field },
+ { "subj=", "_SELINUX_CONTEXT=", map_simple_field },
+ { "comm=", "_COMM=", map_string_field },
+ { "exe=", "_EXE=", map_string_field },
+ { "proctitle=", "_CMDLINE=", map_string_field_printable },
+
+ /* Some fields don't map to native well-known fields. However,
+ * we know that they are string fields, hence let's undo
+ * string field escaping for them, though we stick to the
+ * generic field names. */
+ { "path=", "_AUDIT_FIELD_PATH=", map_string_field },
+ { "dev=", "_AUDIT_FIELD_DEV=", map_string_field },
+ { "name=", "_AUDIT_FIELD_NAME=", map_string_field },
+ {}
+};
+
+/* Userspace fields are those occurring in the audit string after
+ * msg='. All of these fields are untrusted, hence carry no "_"
+ * prefix. We map the fields we don't know to AUDIT_FIELD_XYZ= */
+static const MapField map_fields_userspace[] = {
+ { "cwd=", "AUDIT_FIELD_CWD=", map_string_field },
+ { "cmd=", "AUDIT_FIELD_CMD=", map_string_field },
+ { "acct=", "AUDIT_FIELD_ACCT=", map_string_field },
+ { "exe=", "AUDIT_FIELD_EXE=", map_string_field },
+ { "comm=", "AUDIT_FIELD_COMM=", map_string_field },
+ {}
+};
+
+static int map_all_fields(
+ const char *p,
+ const MapField map_fields[],
+ const char *prefix,
+ bool handle_msg,
+ struct iovec **iov,
+ size_t *n_iov_allocated,
+ size_t *n_iov) {
+
+ int r;
+
+ assert(p);
+ assert(iov);
+ assert(n_iov_allocated);
+ assert(n_iov);
+
+ for (;;) {
+ bool mapped = false;
+ const MapField *m;
+ const char *v;
+
+ p += strspn(p, WHITESPACE);
+
+ if (*p == 0)
+ return 0;
+
+ if (handle_msg) {
+ v = startswith(p, "msg='");
+ if (v) {
+ const char *e;
+ char *c;
+
+ /* Userspace message. It's enclosed in
+ simple quotation marks, is not
+ escaped, but the last field in the
+ line, hence let's remove the
+ quotation mark, and apply the
+ userspace mapping instead of the
+ kernel mapping. */
+
+ e = endswith(v, "'");
+ if (!e)
+ return 0; /* don't continue splitting up if the final quotation mark is missing */
+
+ c = strndupa(v, e - v);
+ return map_all_fields(c, map_fields_userspace, "AUDIT_FIELD_", false, iov, n_iov_allocated, n_iov);
+ }
+ }
+
+ /* Try to map the kernel fields to our own names */
+ for (m = map_fields; m->audit_field; m++) {
+ v = startswith(p, m->audit_field);
+ if (!v)
+ continue;
+
+ r = m->map(m->journal_field, &v, iov, n_iov_allocated, n_iov);
+ if (r < 0)
+ return log_debug_errno(r, "Failed to parse audit array: %m");
+
+ if (r > 0) {
+ mapped = true;
+ p = v;
+ break;
+ }
+ }
+
+ if (!mapped) {
+ r = map_generic_field(prefix, &p, iov, n_iov_allocated, n_iov);
+ if (r < 0)
+ return log_debug_errno(r, "Failed to parse audit array: %m");
+
+ if (r == 0)
+ /* Couldn't process as generic field, let's just skip over it */
+ p += strcspn(p, WHITESPACE);
+ }
+ }
+}
+
+void process_audit_string(Server *s, int type, const char *data, size_t size) {
+ size_t n_iov_allocated = 0, n_iov = 0, z;
+ _cleanup_free_ struct iovec *iov = NULL;
+ uint64_t seconds, msec, id;
+ const char *p, *type_name;
+ char id_field[sizeof("_AUDIT_ID=") + DECIMAL_STR_MAX(uint64_t)],
+ type_field[sizeof("_AUDIT_TYPE=") + DECIMAL_STR_MAX(int)],
+ source_time_field[sizeof("_SOURCE_REALTIME_TIMESTAMP=") + DECIMAL_STR_MAX(usec_t)];
+ char *m, *type_field_name;
+ int k;
+
+ assert(s);
+
+ if (size <= 0)
+ return;
+
+ if (!data)
+ return;
+
+ /* Note that the input buffer is NUL terminated, but let's
+ * check whether there is a spurious NUL byte */
+ if (memchr(data, 0, size))
+ return;
+
+ p = startswith(data, "audit");
+ if (!p)
+ return;
+
+ k = 0;
+ if (sscanf(p, "(%" PRIu64 ".%" PRIu64 ":%" PRIu64 "):%n",
+ &seconds,
+ &msec,
+ &id,
+ &k) != 3 || k == 0)
+ return;
+
+ p += k;
+ p += strspn(p, WHITESPACE);
+
+ if (isempty(p))
+ return;
+
+ n_iov_allocated = N_IOVEC_META_FIELDS + 8;
+ iov = new(struct iovec, n_iov_allocated);
+ if (!iov) {
+ log_oom();
+ return;
+ }
+
+ iov[n_iov++] = IOVEC_MAKE_STRING("_TRANSPORT=audit");
+
+ sprintf(source_time_field, "_SOURCE_REALTIME_TIMESTAMP=%" PRIu64,
+ (usec_t) seconds * USEC_PER_SEC + (usec_t) msec * USEC_PER_MSEC);
+ iov[n_iov++] = IOVEC_MAKE_STRING(source_time_field);
+
+ sprintf(type_field, "_AUDIT_TYPE=%i", type);
+ iov[n_iov++] = IOVEC_MAKE_STRING(type_field);
+
+ sprintf(id_field, "_AUDIT_ID=%" PRIu64, id);
+ iov[n_iov++] = IOVEC_MAKE_STRING(id_field);
+
+ assert_cc(4 == LOG_FAC(LOG_AUTH));
+ iov[n_iov++] = IOVEC_MAKE_STRING("SYSLOG_FACILITY=4");
+ iov[n_iov++] = IOVEC_MAKE_STRING("SYSLOG_IDENTIFIER=audit");
+
+ type_name = audit_type_name_alloca(type);
+
+ type_field_name = strjoina("_AUDIT_TYPE_NAME=", type_name);
+ iov[n_iov++] = IOVEC_MAKE_STRING(type_field_name);
+
+ m = strjoina("MESSAGE=", type_name, " ", p);
+ iov[n_iov++] = IOVEC_MAKE_STRING(m);
+
+ z = n_iov;
+
+ map_all_fields(p, map_fields_kernel, "_AUDIT_FIELD_", true, &iov, &n_iov_allocated, &n_iov);
+
+ if (!GREEDY_REALLOC(iov, n_iov_allocated, n_iov + N_IOVEC_META_FIELDS)) {
+ log_oom();
+ goto finish;
+ }
+
+ server_dispatch_message(s, iov, n_iov, n_iov_allocated, NULL, NULL, LOG_NOTICE, 0);
+
+finish:
+ /* free() all entries that map_all_fields() added. All others
+ * are allocated on the stack or are constant. */
+
+ for (; z < n_iov; z++)
+ free(iov[z].iov_base);
+}
+
+void server_process_audit_message(
+ Server *s,
+ const void *buffer,
+ size_t buffer_size,
+ const struct ucred *ucred,
+ const union sockaddr_union *sa,
+ socklen_t salen) {
+
+ const struct nlmsghdr *nl = buffer;
+
+ assert(s);
+
+ if (buffer_size < ALIGN(sizeof(struct nlmsghdr)))
+ return;
+
+ assert(buffer);
+
+ /* Filter out fake data */
+ if (!sa ||
+ salen != sizeof(struct sockaddr_nl) ||
+ sa->nl.nl_family != AF_NETLINK ||
+ sa->nl.nl_pid != 0) {
+ log_debug("Audit netlink message from invalid sender.");
+ return;
+ }
+
+ if (!ucred || ucred->pid != 0) {
+ log_debug("Audit netlink message with invalid credentials.");
+ return;
+ }
+
+ if (!NLMSG_OK(nl, buffer_size)) {
+ log_error("Audit netlink message truncated.");
+ return;
+ }
+
+ /* Ignore special Netlink messages */
+ if (IN_SET(nl->nlmsg_type, NLMSG_NOOP, NLMSG_ERROR))
+ return;
+
+ /* Except AUDIT_USER, all messsages below AUDIT_FIRST_USER_MSG are control messages, let's ignore those */
+ if (nl->nlmsg_type < AUDIT_FIRST_USER_MSG && nl->nlmsg_type != AUDIT_USER)
+ return;
+
+ process_audit_string(s, nl->nlmsg_type, NLMSG_DATA(nl), nl->nlmsg_len - ALIGN(sizeof(struct nlmsghdr)));
+}
+
+static int enable_audit(int fd, bool b) {
+ struct {
+ union {
+ struct nlmsghdr header;
+ uint8_t header_space[NLMSG_HDRLEN];
+ };
+ struct audit_status body;
+ } _packed_ request = {
+ .header.nlmsg_len = NLMSG_LENGTH(sizeof(struct audit_status)),
+ .header.nlmsg_type = AUDIT_SET,
+ .header.nlmsg_flags = NLM_F_REQUEST,
+ .header.nlmsg_seq = 1,
+ .header.nlmsg_pid = 0,
+ .body.mask = AUDIT_STATUS_ENABLED,
+ .body.enabled = b,
+ };
+ union sockaddr_union sa = {
+ .nl.nl_family = AF_NETLINK,
+ .nl.nl_pid = 0,
+ };
+ struct iovec iovec = {
+ .iov_base = &request,
+ .iov_len = NLMSG_LENGTH(sizeof(struct audit_status)),
+ };
+ struct msghdr mh = {
+ .msg_iov = &iovec,
+ .msg_iovlen = 1,
+ .msg_name = &sa.sa,
+ .msg_namelen = sizeof(sa.nl),
+ };
+
+ ssize_t n;
+
+ n = sendmsg(fd, &mh, MSG_NOSIGNAL);
+ if (n < 0)
+ return -errno;
+ if (n != NLMSG_LENGTH(sizeof(struct audit_status)))
+ return -EIO;
+
+ /* We don't wait for the result here, we can't do anything
+ * about it anyway */
+
+ return 0;
+}
+
+int server_open_audit(Server *s) {
+ int r;
+
+ if (s->audit_fd < 0) {
+ static const union sockaddr_union sa = {
+ .nl.nl_family = AF_NETLINK,
+ .nl.nl_pid = 0,
+ .nl.nl_groups = AUDIT_NLGRP_READLOG,
+ };
+
+ s->audit_fd = socket(AF_NETLINK, SOCK_RAW|SOCK_CLOEXEC|SOCK_NONBLOCK, NETLINK_AUDIT);
+ if (s->audit_fd < 0) {
+ if (IN_SET(errno, EAFNOSUPPORT, EPROTONOSUPPORT))
+ log_debug("Audit not supported in the kernel.");
+ else
+ log_warning_errno(errno, "Failed to create audit socket, ignoring: %m");
+
+ return 0;
+ }
+
+ if (bind(s->audit_fd, &sa.sa, sizeof(sa.nl)) < 0) {
+ log_warning_errno(errno,
+ "Failed to join audit multicast group. "
+ "The kernel is probably too old or multicast reading is not supported. "
+ "Ignoring: %m");
+ s->audit_fd = safe_close(s->audit_fd);
+ return 0;
+ }
+ } else
+ (void) fd_nonblock(s->audit_fd, true);
+
+ r = setsockopt_int(s->audit_fd, SOL_SOCKET, SO_PASSCRED, true);
+ if (r < 0)
+ return log_error_errno(r, "Failed to set SO_PASSCRED on audit socket: %m");
+
+ r = sd_event_add_io(s->event, &s->audit_event_source, s->audit_fd, EPOLLIN, server_process_datagram, s);
+ if (r < 0)
+ return log_error_errno(r, "Failed to add audit fd to event loop: %m");
+
+ /* We are listening now, try to enable audit */
+ r = enable_audit(s->audit_fd, true);
+ if (r < 0)
+ log_warning_errno(r, "Failed to issue audit enable call: %m");
+
+ return 0;
+}
diff --git a/src/journal/journald-audit.h b/src/journal/journald-audit.h
new file mode 100644
index 0000000..df41f81
--- /dev/null
+++ b/src/journal/journald-audit.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+#pragma once
+
+#include "journald-server.h"
+#include "socket-util.h"
+
+void server_process_audit_message(Server *s, const void *buffer, size_t buffer_size, const struct ucred *ucred, const union sockaddr_union *sa, socklen_t salen);
+
+void process_audit_string(Server *s, int type, const char *data, size_t size);
+
+int server_open_audit(Server *s);
diff --git a/src/journal/journald-console.c b/src/journal/journald-console.c
new file mode 100644
index 0000000..80e2958
--- /dev/null
+++ b/src/journal/journald-console.c
@@ -0,0 +1,103 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+
+#include <fcntl.h>
+#include <sys/socket.h>
+#include <time.h>
+
+#include "alloc-util.h"
+#include "fd-util.h"
+#include "fileio.h"
+#include "format-util.h"
+#include "io-util.h"
+#include "journald-console.h"
+#include "journald-server.h"
+#include "parse-util.h"
+#include "process-util.h"
+#include "stdio-util.h"
+#include "terminal-util.h"
+
+static bool prefix_timestamp(void) {
+
+ static int cached_printk_time = -1;
+
+ if (_unlikely_(cached_printk_time < 0)) {
+ _cleanup_free_ char *p = NULL;
+
+ cached_printk_time =
+ read_one_line_file("/sys/module/printk/parameters/time", &p) >= 0
+ && parse_boolean(p) > 0;
+ }
+
+ return cached_printk_time;
+}
+
+void server_forward_console(
+ Server *s,
+ int priority,
+ const char *identifier,
+ const char *message,
+ const struct ucred *ucred) {
+
+ struct iovec iovec[5];
+ struct timespec ts;
+ char tbuf[STRLEN("[] ") + DECIMAL_STR_MAX(ts.tv_sec) + DECIMAL_STR_MAX(ts.tv_nsec)-3 + 1];
+ char header_pid[STRLEN("[]: ") + DECIMAL_STR_MAX(pid_t)];
+ _cleanup_free_ char *ident_buf = NULL;
+ _cleanup_close_ int fd = -1;
+ const char *tty;
+ int n = 0;
+
+ assert(s);
+ assert(message);
+
+ if (LOG_PRI(priority) > s->max_level_console)
+ return;
+
+ /* First: timestamp */
+ if (prefix_timestamp()) {
+ assert_se(clock_gettime(CLOCK_MONOTONIC, &ts) == 0);
+ xsprintf(tbuf, "[%5"PRI_TIME".%06"PRI_NSEC"] ",
+ ts.tv_sec,
+ (nsec_t)ts.tv_nsec / 1000);
+
+ iovec[n++] = IOVEC_MAKE_STRING(tbuf);
+ }
+
+ /* Second: identifier and PID */
+ if (ucred) {
+ if (!identifier) {
+ get_process_comm(ucred->pid, &ident_buf);
+ identifier = ident_buf;
+ }
+
+ xsprintf(header_pid, "["PID_FMT"]: ", ucred->pid);
+
+ if (identifier)
+ iovec[n++] = IOVEC_MAKE_STRING(identifier);
+
+ iovec[n++] = IOVEC_MAKE_STRING(header_pid);
+ } else if (identifier) {
+ iovec[n++] = IOVEC_MAKE_STRING(identifier);
+ iovec[n++] = IOVEC_MAKE_STRING(": ");
+ }
+
+ /* Fourth: message */
+ iovec[n++] = IOVEC_MAKE_STRING(message);
+ iovec[n++] = IOVEC_MAKE_STRING("\n");
+
+ tty = s->tty_path ?: "/dev/console";
+
+ /* Before you ask: yes, on purpose we open/close the console for each log line we write individually. This is a
+ * good strategy to avoid journald getting killed by the kernel's SAK concept (it doesn't fix this entirely,
+ * but minimizes the time window the kernel might end up killing journald due to SAK). It also makes things
+ * easier for us so that we don't have to recover from hangups and suchlike triggered on the console. */
+
+ fd = open_terminal(tty, O_WRONLY|O_NOCTTY|O_CLOEXEC);
+ if (fd < 0) {
+ log_debug_errno(fd, "Failed to open %s for logging: %m", tty);
+ return;
+ }
+
+ if (writev(fd, iovec, n) < 0)
+ log_debug_errno(errno, "Failed to write to %s for logging: %m", tty);
+}
diff --git a/src/journal/journald-console.h b/src/journal/journald-console.h
new file mode 100644
index 0000000..3def00a
--- /dev/null
+++ b/src/journal/journald-console.h
@@ -0,0 +1,6 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+#pragma once
+
+#include "journald-server.h"
+
+void server_forward_console(Server *s, int priority, const char *identifier, const char *message, const struct ucred *ucred);
diff --git a/src/journal/journald-context.c b/src/journal/journald-context.c
new file mode 100644
index 0000000..7c51f2f
--- /dev/null
+++ b/src/journal/journald-context.c
@@ -0,0 +1,782 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+
+#if HAVE_SELINUX
+#include <selinux/selinux.h>
+#endif
+
+#include "alloc-util.h"
+#include "audit-util.h"
+#include "cgroup-util.h"
+#include "fd-util.h"
+#include "fileio.h"
+#include "fs-util.h"
+#include "io-util.h"
+#include "journal-util.h"
+#include "journald-context.h"
+#include "parse-util.h"
+#include "path-util.h"
+#include "process-util.h"
+#include "procfs-util.h"
+#include "string-util.h"
+#include "syslog-util.h"
+#include "unaligned.h"
+#include "user-util.h"
+
+/* This implements a metadata cache for clients, which are identified by their PID. Requesting metadata through /proc
+ * is expensive, hence let's cache the data if we can. Note that this means the metadata might be out-of-date when we
+ * store it, but it might already be anyway, as we request the data asynchronously from /proc at a different time the
+ * log entry was originally created. We hence just increase the "window of inaccuracy" a bit.
+ *
+ * The cache is indexed by the PID. Entries may be "pinned" in the cache, in which case the entries are not removed
+ * until they are unpinned. Unpinned entries are kept around until cache pressure is seen. Cache entries older than 5s
+ * are never used (a sad attempt to deal with the UNIX weakness of PIDs reuse), cache entries older than 1s are
+ * refreshed in an incremental way (meaning: data is reread from /proc, but any old data we can't refresh is not
+ * flushed out). Data newer than 1s is used immediately without refresh.
+ *
+ * Log stream clients (i.e. all clients using the AF_UNIX/SOCK_STREAM stdout/stderr transport) will pin a cache entry
+ * as long as their socket is connected. Note that cache entries are shared between different transports. That means a
+ * cache entry pinned for the stream connection logic may be reused for the syslog or native protocols.
+ *
+ * Caching metadata like this has two major benefits:
+ *
+ * 1. Reading metadata is expensive, and we can thus substantially speed up log processing under flood.
+ *
+ * 2. Because metadata caching is shared between stream and datagram transports and stream connections pin a cache
+ * entry there's a good chance we can properly map a substantial set of datagram log messages to their originating
+ * service, as all services (unless explicitly configured otherwise) will have their stdout/stderr connected to a
+ * stream connection. This should improve cases where a service process logs immediately before exiting and we
+ * previously had trouble associating the log message with the service.
+ *
+ * NB: With and without the metadata cache: the implicitly added entry metadata in the journal (with the exception of
+ * UID/PID/GID and SELinux label) must be understood as possibly slightly out of sync (i.e. sometimes slighly older
+ * and sometimes slightly newer than what was current at the log event).
+ */
+
+/* We refresh every 1s */
+#define REFRESH_USEC (1*USEC_PER_SEC)
+
+/* Data older than 5s we flush out */
+#define MAX_USEC (5*USEC_PER_SEC)
+
+/* Keep at most 16K entries in the cache. (Note though that this limit may be violated if enough streams pin entries in
+ * the cache, in which case we *do* permit this limit to be breached. That's safe however, as the number of stream
+ * clients itself is limited.) */
+#define CACHE_MAX_FALLBACK 128U
+#define CACHE_MAX_MAX (16*1024U)
+#define CACHE_MAX_MIN 64U
+
+static size_t cache_max(void) {
+ static size_t cached = -1;
+
+ if (cached == (size_t) -1) {
+ uint64_t mem_total;
+ int r;
+
+ r = procfs_memory_get(&mem_total, NULL);
+ if (r < 0) {
+ log_warning_errno(r, "Cannot query /proc/meminfo for MemTotal: %m");
+ cached = CACHE_MAX_FALLBACK;
+ } else {
+ /* Cache entries are usually a few kB, but the process cmdline is controlled by the
+ * user and can be up to _SC_ARG_MAX, usually 2MB. Let's say that approximately up to
+ * 1/8th of memory may be used by the cache.
+ *
+ * In the common case, this formula gives 64 cache entries for each GB of RAM.
+ */
+ long l = sysconf(_SC_ARG_MAX);
+ assert(l > 0);
+
+ cached = CLAMP(mem_total / 8 / (uint64_t) l, CACHE_MAX_MIN, CACHE_MAX_MAX);
+ }
+ }
+
+ return cached;
+}
+
+static int client_context_compare(const void *a, const void *b) {
+ const ClientContext *x = a, *y = b;
+ int r;
+
+ r = CMP(x->timestamp, y->timestamp);
+ if (r != 0)
+ return r;
+
+ return CMP(x->pid, y->pid);
+}
+
+static int client_context_new(Server *s, pid_t pid, ClientContext **ret) {
+ ClientContext *c;
+ int r;
+
+ assert(s);
+ assert(pid_is_valid(pid));
+ assert(ret);
+
+ r = hashmap_ensure_allocated(&s->client_contexts, NULL);
+ if (r < 0)
+ return r;
+
+ r = prioq_ensure_allocated(&s->client_contexts_lru, client_context_compare);
+ if (r < 0)
+ return r;
+
+ c = new0(ClientContext, 1);
+ if (!c)
+ return -ENOMEM;
+
+ c->pid = pid;
+
+ c->uid = UID_INVALID;
+ c->gid = GID_INVALID;
+ c->auditid = AUDIT_SESSION_INVALID;
+ c->loginuid = UID_INVALID;
+ c->owner_uid = UID_INVALID;
+ c->lru_index = PRIOQ_IDX_NULL;
+ c->timestamp = USEC_INFINITY;
+ c->extra_fields_mtime = NSEC_INFINITY;
+ c->log_level_max = -1;
+ c->log_rate_limit_interval = s->rate_limit_interval;
+ c->log_rate_limit_burst = s->rate_limit_burst;
+
+ r = hashmap_put(s->client_contexts, PID_TO_PTR(pid), c);
+ if (r < 0) {
+ free(c);
+ return r;
+ }
+
+ *ret = c;
+ return 0;
+}
+
+static void client_context_reset(Server *s, ClientContext *c) {
+ assert(s);
+ assert(c);
+
+ c->timestamp = USEC_INFINITY;
+
+ c->uid = UID_INVALID;
+ c->gid = GID_INVALID;
+
+ c->comm = mfree(c->comm);
+ c->exe = mfree(c->exe);
+ c->cmdline = mfree(c->cmdline);
+ c->capeff = mfree(c->capeff);
+
+ c->auditid = AUDIT_SESSION_INVALID;
+ c->loginuid = UID_INVALID;
+
+ c->cgroup = mfree(c->cgroup);
+ c->session = mfree(c->session);
+ c->owner_uid = UID_INVALID;
+ c->unit = mfree(c->unit);
+ c->user_unit = mfree(c->user_unit);
+ c->slice = mfree(c->slice);
+ c->user_slice = mfree(c->user_slice);
+
+ c->invocation_id = SD_ID128_NULL;
+
+ c->label = mfree(c->label);
+ c->label_size = 0;
+
+ c->extra_fields_iovec = mfree(c->extra_fields_iovec);
+ c->extra_fields_n_iovec = 0;
+ c->extra_fields_data = mfree(c->extra_fields_data);
+ c->extra_fields_mtime = NSEC_INFINITY;
+
+ c->log_level_max = -1;
+
+ c->log_rate_limit_interval = s->rate_limit_interval;
+ c->log_rate_limit_burst = s->rate_limit_burst;
+}
+
+static ClientContext* client_context_free(Server *s, ClientContext *c) {
+ assert(s);
+
+ if (!c)
+ return NULL;
+
+ assert_se(hashmap_remove(s->client_contexts, PID_TO_PTR(c->pid)) == c);
+
+ if (c->in_lru)
+ assert_se(prioq_remove(s->client_contexts_lru, c, &c->lru_index) >= 0);
+
+ client_context_reset(s, c);
+
+ return mfree(c);
+}
+
+static void client_context_read_uid_gid(ClientContext *c, const struct ucred *ucred) {
+ assert(c);
+ assert(pid_is_valid(c->pid));
+
+ /* The ucred data passed in is always the most current and accurate, if we have any. Use it. */
+ if (ucred && uid_is_valid(ucred->uid))
+ c->uid = ucred->uid;
+ else
+ (void) get_process_uid(c->pid, &c->uid);
+
+ if (ucred && gid_is_valid(ucred->gid))
+ c->gid = ucred->gid;
+ else
+ (void) get_process_gid(c->pid, &c->gid);
+}
+
+static void client_context_read_basic(ClientContext *c) {
+ char *t;
+
+ assert(c);
+ assert(pid_is_valid(c->pid));
+
+ if (get_process_comm(c->pid, &t) >= 0)
+ free_and_replace(c->comm, t);
+
+ if (get_process_exe(c->pid, &t) >= 0)
+ free_and_replace(c->exe, t);
+
+ if (get_process_cmdline(c->pid, 0, false, &t) >= 0)
+ free_and_replace(c->cmdline, t);
+
+ if (get_process_capeff(c->pid, &t) >= 0)
+ free_and_replace(c->capeff, t);
+}
+
+static int client_context_read_label(
+ ClientContext *c,
+ const char *label, size_t label_size) {
+
+ assert(c);
+ assert(pid_is_valid(c->pid));
+ assert(label_size == 0 || label);
+
+ if (label_size > 0) {
+ char *l;
+
+ /* If we got an SELinux label passed in it counts. */
+
+ l = newdup_suffix0(char, label, label_size);
+ if (!l)
+ return -ENOMEM;
+
+ free_and_replace(c->label, l);
+ c->label_size = label_size;
+ }
+#if HAVE_SELINUX
+ else {
+ char *con;
+
+ /* If we got no SELinux label passed in, let's try to acquire one */
+
+ if (getpidcon(c->pid, &con) >= 0) {
+ free_and_replace(c->label, con);
+ c->label_size = strlen(c->label);
+ }
+ }
+#endif
+
+ return 0;
+}
+
+static int client_context_read_cgroup(Server *s, ClientContext *c, const char *unit_id) {
+ _cleanup_free_ char *t = NULL;
+ int r;
+
+ assert(c);
+
+ /* Try to acquire the current cgroup path */
+ r = cg_pid_get_path_shifted(c->pid, s->cgroup_root, &t);
+ if (r < 0 || empty_or_root(t)) {
+ /* We use the unit ID passed in as fallback if we have nothing cached yet and cg_pid_get_path_shifted()
+ * failed or process is running in a root cgroup. Zombie processes are automatically migrated to root cgroup
+ * on cgroup v1 and we want to be able to map log messages from them too. */
+ if (unit_id && !c->unit) {
+ c->unit = strdup(unit_id);
+ if (c->unit)
+ return 0;
+ }
+
+ return r;
+ }
+
+ /* Let's shortcut this if the cgroup path didn't change */
+ if (streq_ptr(c->cgroup, t))
+ return 0;
+
+ free_and_replace(c->cgroup, t);
+
+ (void) cg_path_get_session(c->cgroup, &t);
+ free_and_replace(c->session, t);
+
+ if (cg_path_get_owner_uid(c->cgroup, &c->owner_uid) < 0)
+ c->owner_uid = UID_INVALID;
+
+ (void) cg_path_get_unit(c->cgroup, &t);
+ free_and_replace(c->unit, t);
+
+ (void) cg_path_get_user_unit(c->cgroup, &t);
+ free_and_replace(c->user_unit, t);
+
+ (void) cg_path_get_slice(c->cgroup, &t);
+ free_and_replace(c->slice, t);
+
+ (void) cg_path_get_user_slice(c->cgroup, &t);
+ free_and_replace(c->user_slice, t);
+
+ return 0;
+}
+
+static int client_context_read_invocation_id(
+ Server *s,
+ ClientContext *c) {
+
+ _cleanup_free_ char *value = NULL;
+ const char *p;
+ int r;
+
+ assert(s);
+ assert(c);
+
+ /* Read the invocation ID of a unit off a unit. PID 1 stores it in a per-unit symlink in /run/systemd/units/ */
+
+ if (!c->unit)
+ return 0;
+
+ p = strjoina("/run/systemd/units/invocation:", c->unit);
+ r = readlink_malloc(p, &value);
+ if (r < 0)
+ return r;
+
+ return sd_id128_from_string(value, &c->invocation_id);
+}
+
+static int client_context_read_log_level_max(
+ Server *s,
+ ClientContext *c) {
+
+ _cleanup_free_ char *value = NULL;
+ const char *p;
+ int r, ll;
+
+ if (!c->unit)
+ return 0;
+
+ p = strjoina("/run/systemd/units/log-level-max:", c->unit);
+ r = readlink_malloc(p, &value);
+ if (r < 0)
+ return r;
+
+ ll = log_level_from_string(value);
+ if (ll < 0)
+ return -EINVAL;
+
+ c->log_level_max = ll;
+ return 0;
+}
+
+static int client_context_read_extra_fields(
+ Server *s,
+ ClientContext *c) {
+
+ size_t size = 0, n_iovec = 0, n_allocated = 0, left;
+ _cleanup_free_ struct iovec *iovec = NULL;
+ _cleanup_free_ void *data = NULL;
+ _cleanup_fclose_ FILE *f = NULL;
+ struct stat st;
+ const char *p;
+ uint8_t *q;
+ int r;
+
+ if (!c->unit)
+ return 0;
+
+ p = strjoina("/run/systemd/units/log-extra-fields:", c->unit);
+
+ if (c->extra_fields_mtime != NSEC_INFINITY) {
+ if (stat(p, &st) < 0) {
+ if (errno == ENOENT)
+ return 0;
+
+ return -errno;
+ }
+
+ if (timespec_load_nsec(&st.st_mtim) == c->extra_fields_mtime)
+ return 0;
+ }
+
+ f = fopen(p, "re");
+ if (!f) {
+ if (errno == ENOENT)
+ return 0;
+
+ return -errno;
+ }
+
+ if (fstat(fileno(f), &st) < 0) /* The file might have been replaced since the stat() above, let's get a new
+ * one, that matches the stuff we are reading */
+ return -errno;
+
+ r = read_full_stream(f, (char**) &data, &size);
+ if (r < 0)
+ return r;
+
+ q = data, left = size;
+ while (left > 0) {
+ uint8_t *field, *eq;
+ uint64_t v, n;
+
+ if (left < sizeof(uint64_t))
+ return -EBADMSG;
+
+ v = unaligned_read_le64(q);
+ if (v < 2)
+ return -EBADMSG;
+
+ n = sizeof(uint64_t) + v;
+ if (left < n)
+ return -EBADMSG;
+
+ field = q + sizeof(uint64_t);
+
+ eq = memchr(field, '=', v);
+ if (!eq)
+ return -EBADMSG;
+
+ if (!journal_field_valid((const char *) field, eq - field, false))
+ return -EBADMSG;
+
+ if (!GREEDY_REALLOC(iovec, n_allocated, n_iovec+1))
+ return -ENOMEM;
+
+ iovec[n_iovec++] = IOVEC_MAKE(field, v);
+
+ left -= n, q += n;
+ }
+
+ free(c->extra_fields_iovec);
+ free(c->extra_fields_data);
+
+ c->extra_fields_iovec = TAKE_PTR(iovec);
+ c->extra_fields_n_iovec = n_iovec;
+ c->extra_fields_data = TAKE_PTR(data);
+ c->extra_fields_mtime = timespec_load_nsec(&st.st_mtim);
+
+ return 0;
+}
+
+static int client_context_read_log_rate_limit_interval(ClientContext *c) {
+ _cleanup_free_ char *value = NULL;
+ const char *p;
+ int r;
+
+ assert(c);
+
+ if (!c->unit)
+ return 0;
+
+ p = strjoina("/run/systemd/units/log-rate-limit-interval:", c->unit);
+ r = readlink_malloc(p, &value);
+ if (r < 0)
+ return r;
+
+ return safe_atou64(value, &c->log_rate_limit_interval);
+}
+
+static int client_context_read_log_rate_limit_burst(ClientContext *c) {
+ _cleanup_free_ char *value = NULL;
+ const char *p;
+ int r;
+
+ assert(c);
+
+ if (!c->unit)
+ return 0;
+
+ p = strjoina("/run/systemd/units/log-rate-limit-burst:", c->unit);
+ r = readlink_malloc(p, &value);
+ if (r < 0)
+ return r;
+
+ return safe_atou(value, &c->log_rate_limit_burst);
+}
+
+static void client_context_really_refresh(
+ Server *s,
+ ClientContext *c,
+ const struct ucred *ucred,
+ const char *label, size_t label_size,
+ const char *unit_id,
+ usec_t timestamp) {
+
+ assert(s);
+ assert(c);
+ assert(pid_is_valid(c->pid));
+
+ if (timestamp == USEC_INFINITY)
+ timestamp = now(CLOCK_MONOTONIC);
+
+ client_context_read_uid_gid(c, ucred);
+ client_context_read_basic(c);
+ (void) client_context_read_label(c, label, label_size);
+
+ (void) audit_session_from_pid(c->pid, &c->auditid);
+ (void) audit_loginuid_from_pid(c->pid, &c->loginuid);
+
+ (void) client_context_read_cgroup(s, c, unit_id);
+ (void) client_context_read_invocation_id(s, c);
+ (void) client_context_read_log_level_max(s, c);
+ (void) client_context_read_extra_fields(s, c);
+ (void) client_context_read_log_rate_limit_interval(c);
+ (void) client_context_read_log_rate_limit_burst(c);
+
+ c->timestamp = timestamp;
+
+ if (c->in_lru) {
+ assert(c->n_ref == 0);
+ assert_se(prioq_reshuffle(s->client_contexts_lru, c, &c->lru_index) >= 0);
+ }
+}
+
+void client_context_maybe_refresh(
+ Server *s,
+ ClientContext *c,
+ const struct ucred *ucred,
+ const char *label, size_t label_size,
+ const char *unit_id,
+ usec_t timestamp) {
+
+ assert(s);
+ assert(c);
+
+ if (timestamp == USEC_INFINITY)
+ timestamp = now(CLOCK_MONOTONIC);
+
+ /* No cached data so far? Let's fill it up */
+ if (c->timestamp == USEC_INFINITY)
+ goto refresh;
+
+ /* If the data isn't pinned and if the cashed data is older than the upper limit, we flush it out
+ * entirely. This follows the logic that as long as an entry is pinned the PID reuse is unlikely. */
+ if (c->n_ref == 0 && c->timestamp + MAX_USEC < timestamp) {
+ client_context_reset(s, c);
+ goto refresh;
+ }
+
+ /* If the data is older than the lower limit, we refresh, but keep the old data for all we can't update */
+ if (c->timestamp + REFRESH_USEC < timestamp)
+ goto refresh;
+
+ /* If the data passed along doesn't match the cached data we also do a refresh */
+ if (ucred && uid_is_valid(ucred->uid) && c->uid != ucred->uid)
+ goto refresh;
+
+ if (ucred && gid_is_valid(ucred->gid) && c->gid != ucred->gid)
+ goto refresh;
+
+ if (label_size > 0 && (label_size != c->label_size || memcmp(label, c->label, label_size) != 0))
+ goto refresh;
+
+ return;
+
+refresh:
+ client_context_really_refresh(s, c, ucred, label, label_size, unit_id, timestamp);
+}
+
+static void client_context_try_shrink_to(Server *s, size_t limit) {
+ ClientContext *c;
+ usec_t t;
+
+ assert(s);
+
+ /* Flush any cache entries for PIDs that have already moved on. Don't do this
+ * too often, since it's a slow process. */
+ t = now(CLOCK_MONOTONIC);
+ if (s->last_cache_pid_flush + MAX_USEC < t) {
+ unsigned n = prioq_size(s->client_contexts_lru), idx = 0;
+
+ /* We do a number of iterations based on the initial size of the prioq. When we remove an
+ * item, a new item is moved into its places, and items to the right might be reshuffled.
+ */
+ for (unsigned i = 0; i < n; i++) {
+ c = prioq_peek_by_index(s->client_contexts_lru, idx);
+
+ assert(c->n_ref == 0);
+
+ if (!pid_is_unwaited(c->pid))
+ client_context_free(s, c);
+ else
+ idx ++;
+ }
+
+ s->last_cache_pid_flush = t;
+ }
+
+ /* Bring the number of cache entries below the indicated limit, so that we can create a new entry without
+ * breaching the limit. Note that we only flush out entries that aren't pinned here. This means the number of
+ * cache entries may very well grow beyond the limit, if all entries stored remain pinned. */
+
+ while (hashmap_size(s->client_contexts) > limit) {
+ c = prioq_pop(s->client_contexts_lru);
+ if (!c)
+ break; /* All remaining entries are pinned, give up */
+
+ assert(c->in_lru);
+ assert(c->n_ref == 0);
+
+ c->in_lru = false;
+
+ client_context_free(s, c);
+ }
+}
+
+void client_context_flush_all(Server *s) {
+ assert(s);
+
+ /* Flush out all remaining entries. This assumes all references are already dropped. */
+
+ s->my_context = client_context_release(s, s->my_context);
+ s->pid1_context = client_context_release(s, s->pid1_context);
+
+ client_context_try_shrink_to(s, 0);
+
+ assert(prioq_size(s->client_contexts_lru) == 0);
+ assert(hashmap_size(s->client_contexts) == 0);
+
+ s->client_contexts_lru = prioq_free(s->client_contexts_lru);
+ s->client_contexts = hashmap_free(s->client_contexts);
+}
+
+static int client_context_get_internal(
+ Server *s,
+ pid_t pid,
+ const struct ucred *ucred,
+ const char *label, size_t label_len,
+ const char *unit_id,
+ bool add_ref,
+ ClientContext **ret) {
+
+ ClientContext *c;
+ int r;
+
+ assert(s);
+ assert(ret);
+
+ if (!pid_is_valid(pid))
+ return -EINVAL;
+
+ c = hashmap_get(s->client_contexts, PID_TO_PTR(pid));
+ if (c) {
+
+ if (add_ref) {
+ if (c->in_lru) {
+ /* The entry wasn't pinned so far, let's remove it from the LRU list then */
+ assert(c->n_ref == 0);
+ assert_se(prioq_remove(s->client_contexts_lru, c, &c->lru_index) >= 0);
+ c->in_lru = false;
+ }
+
+ c->n_ref++;
+ }
+
+ client_context_maybe_refresh(s, c, ucred, label, label_len, unit_id, USEC_INFINITY);
+
+ *ret = c;
+ return 0;
+ }
+
+ client_context_try_shrink_to(s, cache_max()-1);
+
+ r = client_context_new(s, pid, &c);
+ if (r < 0)
+ return r;
+
+ if (add_ref)
+ c->n_ref++;
+ else {
+ r = prioq_put(s->client_contexts_lru, c, &c->lru_index);
+ if (r < 0) {
+ client_context_free(s, c);
+ return r;
+ }
+
+ c->in_lru = true;
+ }
+
+ client_context_really_refresh(s, c, ucred, label, label_len, unit_id, USEC_INFINITY);
+
+ *ret = c;
+ return 0;
+}
+
+int client_context_get(
+ Server *s,
+ pid_t pid,
+ const struct ucred *ucred,
+ const char *label, size_t label_len,
+ const char *unit_id,
+ ClientContext **ret) {
+
+ return client_context_get_internal(s, pid, ucred, label, label_len, unit_id, false, ret);
+}
+
+int client_context_acquire(
+ Server *s,
+ pid_t pid,
+ const struct ucred *ucred,
+ const char *label, size_t label_len,
+ const char *unit_id,
+ ClientContext **ret) {
+
+ return client_context_get_internal(s, pid, ucred, label, label_len, unit_id, true, ret);
+};
+
+ClientContext *client_context_release(Server *s, ClientContext *c) {
+ assert(s);
+
+ if (!c)
+ return NULL;
+
+ assert(c->n_ref > 0);
+ assert(!c->in_lru);
+
+ c->n_ref--;
+ if (c->n_ref > 0)
+ return NULL;
+
+ /* The entry is not pinned anymore, let's add it to the LRU prioq if we can. If we can't we'll drop it
+ * right-away */
+
+ if (prioq_put(s->client_contexts_lru, c, &c->lru_index) < 0)
+ client_context_free(s, c);
+ else
+ c->in_lru = true;
+
+ return NULL;
+}
+
+void client_context_acquire_default(Server *s) {
+ int r;
+
+ assert(s);
+
+ /* Ensure that our own and PID1's contexts are always pinned. Our own context is particularly useful to
+ * generate driver messages. */
+
+ if (!s->my_context) {
+ struct ucred ucred = {
+ .pid = getpid_cached(),
+ .uid = getuid(),
+ .gid = getgid(),
+ };
+
+ r = client_context_acquire(s, ucred.pid, &ucred, NULL, 0, NULL, &s->my_context);
+ if (r < 0)
+ log_warning_errno(r, "Failed to acquire our own context, ignoring: %m");
+ }
+
+ if (!s->pid1_context) {
+
+ r = client_context_acquire(s, 1, NULL, NULL, 0, NULL, &s->pid1_context);
+ if (r < 0)
+ log_warning_errno(r, "Failed to acquire PID1's context, ignoring: %m");
+
+ }
+}
diff --git a/src/journal/journald-context.h b/src/journal/journald-context.h
new file mode 100644
index 0000000..5e19c71
--- /dev/null
+++ b/src/journal/journald-context.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+#pragma once
+
+#include <inttypes.h>
+#include <sys/types.h>
+
+#include "sd-id128.h"
+
+typedef struct ClientContext ClientContext;
+
+#include "journald-server.h"
+
+struct ClientContext {
+ unsigned n_ref;
+ unsigned lru_index;
+ usec_t timestamp;
+ bool in_lru;
+
+ pid_t pid;
+ uid_t uid;
+ gid_t gid;
+
+ char *comm;
+ char *exe;
+ char *cmdline;
+ char *capeff;
+
+ uint32_t auditid;
+ uid_t loginuid;
+
+ char *cgroup;
+ char *session;
+ uid_t owner_uid;
+
+ char *unit;
+ char *user_unit;
+
+ char *slice;
+ char *user_slice;
+
+ sd_id128_t invocation_id;
+
+ char *label;
+ size_t label_size;
+
+ int log_level_max;
+
+ struct iovec *extra_fields_iovec;
+ size_t extra_fields_n_iovec;
+ void *extra_fields_data;
+ nsec_t extra_fields_mtime;
+
+ usec_t log_rate_limit_interval;
+ unsigned log_rate_limit_burst;
+};
+
+int client_context_get(
+ Server *s,
+ pid_t pid,
+ const struct ucred *ucred,
+ const char *label, size_t label_len,
+ const char *unit_id,
+ ClientContext **ret);
+
+int client_context_acquire(
+ Server *s,
+ pid_t pid,
+ const struct ucred *ucred,
+ const char *label, size_t label_len,
+ const char *unit_id,
+ ClientContext **ret);
+
+ClientContext* client_context_release(Server *s, ClientContext *c);
+
+void client_context_maybe_refresh(
+ Server *s,
+ ClientContext *c,
+ const struct ucred *ucred,
+ const char *label, size_t label_size,
+ const char *unit_id,
+ usec_t tstamp);
+
+void client_context_acquire_default(Server *s);
+void client_context_flush_all(Server *s);
+
+static inline size_t client_context_extra_fields_n_iovec(const ClientContext *c) {
+ return c ? c->extra_fields_n_iovec : 0;
+}
+
+static inline bool client_context_test_priority(const ClientContext *c, int priority) {
+ if (!c)
+ return true;
+
+ if (c->log_level_max < 0)
+ return true;
+
+ return LOG_PRI(priority) <= c->log_level_max;
+}
diff --git a/src/journal/journald-gperf.gperf b/src/journal/journald-gperf.gperf
new file mode 100644
index 0000000..1adcb50
--- /dev/null
+++ b/src/journal/journald-gperf.gperf
@@ -0,0 +1,51 @@
+%{
+#if __GNUC__ >= 7
+_Pragma("GCC diagnostic ignored \"-Wimplicit-fallthrough\"")
+#endif
+#include <stddef.h>
+#include <sys/socket.h>
+#include "conf-parser.h"
+#include "journald-server.h"
+%}
+struct ConfigPerfItem;
+%null_strings
+%language=ANSI-C
+%define slot-name section_and_lvalue
+%define hash-function-name journald_gperf_hash
+%define lookup-function-name journald_gperf_lookup
+%readonly-tables
+%omit-struct-type
+%struct-type
+%includes
+%%
+Journal.Storage, config_parse_storage, 0, offsetof(Server, storage)
+Journal.Compress, config_parse_compress, 0, offsetof(Server, compress)
+Journal.Seal, config_parse_bool, 0, offsetof(Server, seal)
+Journal.ReadKMsg, config_parse_bool, 0, offsetof(Server, read_kmsg)
+Journal.SyncIntervalSec, config_parse_sec, 0, offsetof(Server, sync_interval_usec)
+# The following is a legacy name for compatibility
+Journal.RateLimitInterval, config_parse_sec, 0, offsetof(Server, rate_limit_interval)
+Journal.RateLimitIntervalSec,config_parse_sec, 0, offsetof(Server, rate_limit_interval)
+Journal.RateLimitBurst, config_parse_unsigned, 0, offsetof(Server, rate_limit_burst)
+Journal.SystemMaxUse, config_parse_iec_uint64, 0, offsetof(Server, system_storage.metrics.max_use)
+Journal.SystemMaxFileSize, config_parse_iec_uint64, 0, offsetof(Server, system_storage.metrics.max_size)
+Journal.SystemKeepFree, config_parse_iec_uint64, 0, offsetof(Server, system_storage.metrics.keep_free)
+Journal.SystemMaxFiles, config_parse_uint64, 0, offsetof(Server, system_storage.metrics.n_max_files)
+Journal.RuntimeMaxUse, config_parse_iec_uint64, 0, offsetof(Server, runtime_storage.metrics.max_use)
+Journal.RuntimeMaxFileSize, config_parse_iec_uint64, 0, offsetof(Server, runtime_storage.metrics.max_size)
+Journal.RuntimeKeepFree, config_parse_iec_uint64, 0, offsetof(Server, runtime_storage.metrics.keep_free)
+Journal.RuntimeMaxFiles, config_parse_uint64, 0, offsetof(Server, runtime_storage.metrics.n_max_files)
+Journal.MaxRetentionSec, config_parse_sec, 0, offsetof(Server, max_retention_usec)
+Journal.MaxFileSec, config_parse_sec, 0, offsetof(Server, max_file_usec)
+Journal.ForwardToSyslog, config_parse_bool, 0, offsetof(Server, forward_to_syslog)
+Journal.ForwardToKMsg, config_parse_bool, 0, offsetof(Server, forward_to_kmsg)
+Journal.ForwardToConsole, config_parse_bool, 0, offsetof(Server, forward_to_console)
+Journal.ForwardToWall, config_parse_bool, 0, offsetof(Server, forward_to_wall)
+Journal.TTYPath, config_parse_path, 0, offsetof(Server, tty_path)
+Journal.MaxLevelStore, config_parse_log_level, 0, offsetof(Server, max_level_store)
+Journal.MaxLevelSyslog, config_parse_log_level, 0, offsetof(Server, max_level_syslog)
+Journal.MaxLevelKMsg, config_parse_log_level, 0, offsetof(Server, max_level_kmsg)
+Journal.MaxLevelConsole, config_parse_log_level, 0, offsetof(Server, max_level_console)
+Journal.MaxLevelWall, config_parse_log_level, 0, offsetof(Server, max_level_wall)
+Journal.SplitMode, config_parse_split_mode, 0, offsetof(Server, split_mode)
+Journal.LineMax, config_parse_line_max, 0, offsetof(Server, line_max)
diff --git a/src/journal/journald-kmsg.c b/src/journal/journald-kmsg.c
new file mode 100644
index 0000000..ce82102
--- /dev/null
+++ b/src/journal/journald-kmsg.c
@@ -0,0 +1,450 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+
+#include <fcntl.h>
+#include <sys/epoll.h>
+#include <sys/mman.h>
+#include <sys/socket.h>
+#include <unistd.h>
+
+#include "sd-device.h"
+#include "sd-messages.h"
+
+#include "alloc-util.h"
+#include "device-util.h"
+#include "escape.h"
+#include "fd-util.h"
+#include "format-util.h"
+#include "io-util.h"
+#include "journald-kmsg.h"
+#include "journald-server.h"
+#include "journald-syslog.h"
+#include "parse-util.h"
+#include "process-util.h"
+#include "stdio-util.h"
+#include "string-util.h"
+
+void server_forward_kmsg(
+ Server *s,
+ int priority,
+ const char *identifier,
+ const char *message,
+ const struct ucred *ucred) {
+
+ _cleanup_free_ char *ident_buf = NULL;
+ struct iovec iovec[5];
+ char header_priority[DECIMAL_STR_MAX(priority) + 3],
+ header_pid[STRLEN("[]: ") + DECIMAL_STR_MAX(pid_t) + 1];
+ int n = 0;
+
+ assert(s);
+ assert(priority >= 0);
+ assert(priority <= 999);
+ assert(message);
+
+ if (_unlikely_(LOG_PRI(priority) > s->max_level_kmsg))
+ return;
+
+ if (_unlikely_(s->dev_kmsg_fd < 0))
+ return;
+
+ /* Never allow messages with kernel facility to be written to
+ * kmsg, regardless where the data comes from. */
+ priority = syslog_fixup_facility(priority);
+
+ /* First: priority field */
+ xsprintf(header_priority, "<%i>", priority);
+ iovec[n++] = IOVEC_MAKE_STRING(header_priority);
+
+ /* Second: identifier and PID */
+ if (ucred) {
+ if (!identifier) {
+ get_process_comm(ucred->pid, &ident_buf);
+ identifier = ident_buf;
+ }
+
+ xsprintf(header_pid, "["PID_FMT"]: ", ucred->pid);
+
+ if (identifier)
+ iovec[n++] = IOVEC_MAKE_STRING(identifier);
+
+ iovec[n++] = IOVEC_MAKE_STRING(header_pid);
+ } else if (identifier) {
+ iovec[n++] = IOVEC_MAKE_STRING(identifier);
+ iovec[n++] = IOVEC_MAKE_STRING(": ");
+ }
+
+ /* Fourth: message */
+ iovec[n++] = IOVEC_MAKE_STRING(message);
+ iovec[n++] = IOVEC_MAKE_STRING("\n");
+
+ if (writev(s->dev_kmsg_fd, iovec, n) < 0)
+ log_debug_errno(errno, "Failed to write to /dev/kmsg for logging: %m");
+}
+
+static bool is_us(const char *identifier, const char *pid) {
+ pid_t pid_num;
+
+ if (!identifier || !pid)
+ return false;
+
+ if (parse_pid(pid, &pid_num) < 0)
+ return false;
+
+ return pid_num == getpid_cached() &&
+ streq(identifier, program_invocation_short_name);
+}
+
+void dev_kmsg_record(Server *s, char *p, size_t l) {
+
+ _cleanup_free_ char *message = NULL, *syslog_priority = NULL, *syslog_pid = NULL, *syslog_facility = NULL, *syslog_identifier = NULL, *source_time = NULL, *identifier = NULL, *pid = NULL;
+ struct iovec iovec[N_IOVEC_META_FIELDS + 7 + N_IOVEC_KERNEL_FIELDS + 2 + N_IOVEC_UDEV_FIELDS];
+ char *kernel_device = NULL;
+ unsigned long long usec;
+ size_t n = 0, z = 0, j;
+ int priority, r;
+ char *e, *f, *k;
+ uint64_t serial;
+ size_t pl;
+
+ assert(s);
+ assert(p);
+
+ if (l <= 0)
+ return;
+
+ e = memchr(p, ',', l);
+ if (!e)
+ return;
+ *e = 0;
+
+ r = safe_atoi(p, &priority);
+ if (r < 0 || priority < 0 || priority > 999)
+ return;
+
+ if (s->forward_to_kmsg && LOG_FAC(priority) != LOG_KERN)
+ return;
+
+ l -= (e - p) + 1;
+ p = e + 1;
+ e = memchr(p, ',', l);
+ if (!e)
+ return;
+ *e = 0;
+
+ r = safe_atou64(p, &serial);
+ if (r < 0)
+ return;
+
+ if (s->kernel_seqnum) {
+ /* We already read this one? */
+ if (serial < *s->kernel_seqnum)
+ return;
+
+ /* Did we lose any? */
+ if (serial > *s->kernel_seqnum)
+ server_driver_message(s, 0,
+ "MESSAGE_ID=" SD_MESSAGE_JOURNAL_MISSED_STR,
+ LOG_MESSAGE("Missed %"PRIu64" kernel messages",
+ serial - *s->kernel_seqnum),
+ NULL);
+
+ /* Make sure we never read this one again. Note that
+ * we always store the next message serial we expect
+ * here, simply because this makes handling the first
+ * message with serial 0 easy. */
+ *s->kernel_seqnum = serial + 1;
+ }
+
+ l -= (e - p) + 1;
+ p = e + 1;
+ f = memchr(p, ';', l);
+ if (!f)
+ return;
+ /* Kernel 3.6 has the flags field, kernel 3.5 lacks that */
+ e = memchr(p, ',', l);
+ if (!e || f < e)
+ e = f;
+ *e = 0;
+
+ r = safe_atollu(p, &usec);
+ if (r < 0)
+ return;
+
+ l -= (f - p) + 1;
+ p = f + 1;
+ e = memchr(p, '\n', l);
+ if (!e)
+ return;
+ *e = 0;
+
+ pl = e - p;
+ l -= (e - p) + 1;
+ k = e + 1;
+
+ for (j = 0; l > 0 && j < N_IOVEC_KERNEL_FIELDS; j++) {
+ char *m;
+ /* Metadata fields attached */
+
+ if (*k != ' ')
+ break;
+
+ k++, l--;
+
+ e = memchr(k, '\n', l);
+ if (!e)
+ goto finish;
+
+ *e = 0;
+
+ if (cunescape_length_with_prefix(k, e - k, "_KERNEL_", UNESCAPE_RELAX, &m) < 0)
+ break;
+
+ if (startswith(m, "_KERNEL_DEVICE="))
+ kernel_device = m + 15;
+
+ iovec[n++] = IOVEC_MAKE_STRING(m);
+ z++;
+
+ l -= (e - k) + 1;
+ k = e + 1;
+ }
+
+ if (kernel_device) {
+ _cleanup_(sd_device_unrefp) sd_device *d = NULL;
+
+ if (sd_device_new_from_device_id(&d, kernel_device) >= 0) {
+ const char *g;
+ char *b;
+
+ if (sd_device_get_devname(d, &g) >= 0) {
+ b = strappend("_UDEV_DEVNODE=", g);
+ if (b) {
+ iovec[n++] = IOVEC_MAKE_STRING(b);
+ z++;
+ }
+ }
+
+ if (sd_device_get_sysname(d, &g) >= 0) {
+ b = strappend("_UDEV_SYSNAME=", g);
+ if (b) {
+ iovec[n++] = IOVEC_MAKE_STRING(b);
+ z++;
+ }
+ }
+
+ j = 0;
+ FOREACH_DEVICE_DEVLINK(d, g) {
+
+ if (j >= N_IOVEC_UDEV_FIELDS)
+ break;
+
+ b = strappend("_UDEV_DEVLINK=", g);
+ if (b) {
+ iovec[n++] = IOVEC_MAKE_STRING(b);
+ z++;
+ }
+
+ j++;
+ }
+ }
+ }
+
+ if (asprintf(&source_time, "_SOURCE_MONOTONIC_TIMESTAMP=%llu", usec) >= 0)
+ iovec[n++] = IOVEC_MAKE_STRING(source_time);
+
+ iovec[n++] = IOVEC_MAKE_STRING("_TRANSPORT=kernel");
+
+ if (asprintf(&syslog_priority, "PRIORITY=%i", priority & LOG_PRIMASK) >= 0)
+ iovec[n++] = IOVEC_MAKE_STRING(syslog_priority);
+
+ if (asprintf(&syslog_facility, "SYSLOG_FACILITY=%i", LOG_FAC(priority)) >= 0)
+ iovec[n++] = IOVEC_MAKE_STRING(syslog_facility);
+
+ if (LOG_FAC(priority) == LOG_KERN)
+ iovec[n++] = IOVEC_MAKE_STRING("SYSLOG_IDENTIFIER=kernel");
+ else {
+ pl -= syslog_parse_identifier((const char**) &p, &identifier, &pid);
+
+ /* Avoid any messages we generated ourselves via
+ * log_info() and friends. */
+ if (is_us(identifier, pid))
+ goto finish;
+
+ if (identifier) {
+ syslog_identifier = strappend("SYSLOG_IDENTIFIER=", identifier);
+ if (syslog_identifier)
+ iovec[n++] = IOVEC_MAKE_STRING(syslog_identifier);
+ }
+
+ if (pid) {
+ syslog_pid = strappend("SYSLOG_PID=", pid);
+ if (syslog_pid)
+ iovec[n++] = IOVEC_MAKE_STRING(syslog_pid);
+ }
+ }
+
+ if (cunescape_length_with_prefix(p, pl, "MESSAGE=", UNESCAPE_RELAX, &message) >= 0)
+ iovec[n++] = IOVEC_MAKE_STRING(message);
+
+ server_dispatch_message(s, iovec, n, ELEMENTSOF(iovec), NULL, NULL, priority, 0);
+
+finish:
+ for (j = 0; j < z; j++)
+ free(iovec[j].iov_base);
+}
+
+static int server_read_dev_kmsg(Server *s) {
+ char buffer[8192+1]; /* the kernel-side limit per record is 8K currently */
+ ssize_t l;
+
+ assert(s);
+ assert(s->dev_kmsg_fd >= 0);
+
+ l = read(s->dev_kmsg_fd, buffer, sizeof(buffer) - 1);
+ if (l == 0)
+ return 0;
+ if (l < 0) {
+ /* Old kernels who don't allow reading from /dev/kmsg
+ * return EINVAL when we try. So handle this cleanly,
+ * but don' try to ever read from it again. */
+ if (errno == EINVAL) {
+ s->dev_kmsg_event_source = sd_event_source_unref(s->dev_kmsg_event_source);
+ return 0;
+ }
+
+ if (IN_SET(errno, EAGAIN, EINTR, EPIPE))
+ return 0;
+
+ return log_error_errno(errno, "Failed to read from kernel: %m");
+ }
+
+ dev_kmsg_record(s, buffer, l);
+ return 1;
+}
+
+int server_flush_dev_kmsg(Server *s) {
+ int r;
+
+ assert(s);
+
+ if (s->dev_kmsg_fd < 0)
+ return 0;
+
+ if (!s->dev_kmsg_readable)
+ return 0;
+
+ log_debug("Flushing /dev/kmsg...");
+
+ for (;;) {
+ r = server_read_dev_kmsg(s);
+ if (r < 0)
+ return r;
+
+ if (r == 0)
+ break;
+ }
+
+ return 0;
+}
+
+static int dispatch_dev_kmsg(sd_event_source *es, int fd, uint32_t revents, void *userdata) {
+ Server *s = userdata;
+
+ assert(es);
+ assert(fd == s->dev_kmsg_fd);
+ assert(s);
+
+ if (revents & EPOLLERR)
+ log_warning("/dev/kmsg buffer overrun, some messages lost.");
+
+ if (!(revents & EPOLLIN))
+ log_error("Got invalid event from epoll for /dev/kmsg: %"PRIx32, revents);
+
+ return server_read_dev_kmsg(s);
+}
+
+int server_open_dev_kmsg(Server *s) {
+ mode_t mode;
+ int r;
+
+ assert(s);
+
+ if (s->read_kmsg)
+ mode = O_RDWR|O_CLOEXEC|O_NONBLOCK|O_NOCTTY;
+ else
+ mode = O_WRONLY|O_CLOEXEC|O_NONBLOCK|O_NOCTTY;
+
+ s->dev_kmsg_fd = open("/dev/kmsg", mode);
+ if (s->dev_kmsg_fd < 0) {
+ log_full(errno == ENOENT ? LOG_DEBUG : LOG_WARNING,
+ "Failed to open /dev/kmsg, ignoring: %m");
+ return 0;
+ }
+
+ if (!s->read_kmsg)
+ return 0;
+
+ r = sd_event_add_io(s->event, &s->dev_kmsg_event_source, s->dev_kmsg_fd, EPOLLIN, dispatch_dev_kmsg, s);
+ if (r < 0) {
+
+ /* This will fail with EPERM on older kernels where
+ * /dev/kmsg is not readable. */
+ if (r == -EPERM) {
+ r = 0;
+ goto fail;
+ }
+
+ log_error_errno(r, "Failed to add /dev/kmsg fd to event loop: %m");
+ goto fail;
+ }
+
+ r = sd_event_source_set_priority(s->dev_kmsg_event_source, SD_EVENT_PRIORITY_IMPORTANT+10);
+ if (r < 0) {
+ log_error_errno(r, "Failed to adjust priority of kmsg event source: %m");
+ goto fail;
+ }
+
+ s->dev_kmsg_readable = true;
+
+ return 0;
+
+fail:
+ s->dev_kmsg_event_source = sd_event_source_unref(s->dev_kmsg_event_source);
+ s->dev_kmsg_fd = safe_close(s->dev_kmsg_fd);
+
+ return r;
+}
+
+int server_open_kernel_seqnum(Server *s) {
+ _cleanup_close_ int fd;
+ uint64_t *p;
+ int r;
+
+ assert(s);
+
+ /* We store the seqnum we last read in an mmaped file. That
+ * way we can just use it like a variable, but it is
+ * persistent and automatically flushed at reboot. */
+
+ fd = open("/run/systemd/journal/kernel-seqnum", O_RDWR|O_CREAT|O_CLOEXEC|O_NOCTTY|O_NOFOLLOW, 0644);
+ if (fd < 0) {
+ log_error_errno(errno, "Failed to open /run/systemd/journal/kernel-seqnum, ignoring: %m");
+ return 0;
+ }
+
+ r = posix_fallocate(fd, 0, sizeof(uint64_t));
+ if (r != 0) {
+ log_error_errno(r, "Failed to allocate sequential number file, ignoring: %m");
+ return 0;
+ }
+
+ p = mmap(NULL, sizeof(uint64_t), PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
+ if (p == MAP_FAILED) {
+ log_error_errno(errno, "Failed to map sequential number file, ignoring: %m");
+ return 0;
+ }
+
+ s->kernel_seqnum = p;
+
+ return 0;
+}
diff --git a/src/journal/journald-kmsg.h b/src/journal/journald-kmsg.h
new file mode 100644
index 0000000..2326bc8
--- /dev/null
+++ b/src/journal/journald-kmsg.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+#pragma once
+
+#include "journald-server.h"
+
+int server_open_dev_kmsg(Server *s);
+int server_flush_dev_kmsg(Server *s);
+
+void server_forward_kmsg(Server *s, int priority, const char *identifier, const char *message, const struct ucred *ucred);
+
+int server_open_kernel_seqnum(Server *s);
+
+void dev_kmsg_record(Server *s, char *p, size_t l);
diff --git a/src/journal/journald-native.c b/src/journal/journald-native.c
new file mode 100644
index 0000000..221188d
--- /dev/null
+++ b/src/journal/journald-native.c
@@ -0,0 +1,502 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+
+#include <stddef.h>
+#include <sys/epoll.h>
+#include <sys/mman.h>
+#include <sys/statvfs.h>
+#include <unistd.h>
+
+#include "alloc-util.h"
+#include "fd-util.h"
+#include "fs-util.h"
+#include "io-util.h"
+#include "journal-importer.h"
+#include "journal-util.h"
+#include "journald-console.h"
+#include "journald-kmsg.h"
+#include "journald-native.h"
+#include "journald-server.h"
+#include "journald-syslog.h"
+#include "journald-wall.h"
+#include "memfd-util.h"
+#include "parse-util.h"
+#include "path-util.h"
+#include "process-util.h"
+#include "selinux-util.h"
+#include "socket-util.h"
+#include "string-util.h"
+#include "strv.h"
+#include "unaligned.h"
+
+static bool allow_object_pid(const struct ucred *ucred) {
+ return ucred && ucred->uid == 0;
+}
+
+static void server_process_entry_meta(
+ const char *p, size_t l,
+ const struct ucred *ucred,
+ int *priority,
+ char **identifier,
+ char **message,
+ pid_t *object_pid) {
+
+ /* We need to determine the priority of this entry for the rate limiting logic */
+
+ if (l == 10 &&
+ startswith(p, "PRIORITY=") &&
+ p[9] >= '0' && p[9] <= '9')
+ *priority = (*priority & LOG_FACMASK) | (p[9] - '0');
+
+ else if (l == 17 &&
+ startswith(p, "SYSLOG_FACILITY=") &&
+ p[16] >= '0' && p[16] <= '9')
+ *priority = (*priority & LOG_PRIMASK) | ((p[16] - '0') << 3);
+
+ else if (l == 18 &&
+ startswith(p, "SYSLOG_FACILITY=") &&
+ p[16] >= '0' && p[16] <= '9' &&
+ p[17] >= '0' && p[17] <= '9')
+ *priority = (*priority & LOG_PRIMASK) | (((p[16] - '0')*10 + (p[17] - '0')) << 3);
+
+ else if (l >= 19 &&
+ startswith(p, "SYSLOG_IDENTIFIER=")) {
+ char *t;
+
+ t = strndup(p + 18, l - 18);
+ if (t) {
+ free(*identifier);
+ *identifier = t;
+ }
+
+ } else if (l >= 8 &&
+ startswith(p, "MESSAGE=")) {
+ char *t;
+
+ t = strndup(p + 8, l - 8);
+ if (t) {
+ free(*message);
+ *message = t;
+ }
+
+ } else if (l > STRLEN("OBJECT_PID=") &&
+ l < STRLEN("OBJECT_PID=") + DECIMAL_STR_MAX(pid_t) &&
+ startswith(p, "OBJECT_PID=") &&
+ allow_object_pid(ucred)) {
+ char buf[DECIMAL_STR_MAX(pid_t)];
+ memcpy(buf, p + STRLEN("OBJECT_PID="),
+ l - STRLEN("OBJECT_PID="));
+ buf[l-STRLEN("OBJECT_PID=")] = '\0';
+
+ (void) parse_pid(buf, object_pid);
+ }
+}
+
+static int server_process_entry(
+ Server *s,
+ const void *buffer, size_t *remaining,
+ ClientContext *context,
+ const struct ucred *ucred,
+ const struct timeval *tv,
+ const char *label, size_t label_len) {
+
+ /* Process a single entry from a native message. Returns 0 if nothing special happened and the message
+ * processing should continue, and a negative or positive value otherwise.
+ *
+ * Note that *remaining is altered on both success and failure. */
+
+ size_t n = 0, j, tn = (size_t) -1, m = 0, entry_size = 0;
+ char *identifier = NULL, *message = NULL;
+ struct iovec *iovec = NULL;
+ int priority = LOG_INFO;
+ pid_t object_pid = 0;
+ const char *p;
+ int r = 1;
+
+ p = buffer;
+
+ while (*remaining > 0) {
+ const char *e, *q;
+
+ e = memchr(p, '\n', *remaining);
+
+ if (!e) {
+ /* Trailing noise, let's ignore it, and flush what we collected */
+ log_debug("Received message with trailing noise, ignoring.");
+ break; /* finish processing of the message */
+ }
+
+ if (e == p) {
+ /* Entry separator */
+ *remaining -= 1;
+ break;
+ }
+
+ if (IN_SET(*p, '.', '#')) {
+ /* Ignore control commands for now, and comments too. */
+ *remaining -= (e - p) + 1;
+ p = e + 1;
+ continue;
+ }
+
+ /* A property follows */
+ if (n > ENTRY_FIELD_COUNT_MAX) {
+ log_debug("Received an entry that has more than " STRINGIFY(ENTRY_FIELD_COUNT_MAX) " fields, ignoring entry.");
+ goto finish;
+ }
+
+ /* n existing properties, 1 new, +1 for _TRANSPORT */
+ if (!GREEDY_REALLOC(iovec, m,
+ n + 2 +
+ N_IOVEC_META_FIELDS + N_IOVEC_OBJECT_FIELDS +
+ client_context_extra_fields_n_iovec(context))) {
+ r = log_oom();
+ goto finish;
+ }
+
+ q = memchr(p, '=', e - p);
+ if (q) {
+ if (journal_field_valid(p, q - p, false)) {
+ size_t l;
+
+ l = e - p;
+ if (l > DATA_SIZE_MAX) {
+ log_debug("Received text block of %zu bytes is too large, ignoring entry.", l);
+ goto finish;
+ }
+
+ if (entry_size + l + n + 1 > ENTRY_SIZE_MAX) { /* data + separators + trailer */
+ log_debug("Entry is too big (%zu bytes after processing %zu entries), ignoring entry.",
+ entry_size + l, n + 1);
+ goto finish;
+ }
+
+ /* If the field name starts with an underscore, skip the variable, since that indicates
+ * a trusted field */
+ iovec[n++] = IOVEC_MAKE((char*) p, l);
+ entry_size += l;
+
+ server_process_entry_meta(p, l, ucred,
+ &priority,
+ &identifier,
+ &message,
+ &object_pid);
+ }
+
+ *remaining -= (e - p) + 1;
+ p = e + 1;
+ continue;
+ } else {
+ uint64_t l, total;
+ char *k;
+
+ if (*remaining < e - p + 1 + sizeof(uint64_t) + 1) {
+ log_debug("Failed to parse message, ignoring.");
+ break;
+ }
+
+ l = unaligned_read_le64(e + 1);
+ if (l > DATA_SIZE_MAX) {
+ log_debug("Received binary data block of %"PRIu64" bytes is too large, ignoring entry.", l);
+ goto finish;
+ }
+
+ total = (e - p) + 1 + l;
+ if (entry_size + total + n + 1 > ENTRY_SIZE_MAX) { /* data + separators + trailer */
+ log_debug("Entry is too big (%"PRIu64"bytes after processing %zu fields), ignoring.",
+ entry_size + total, n + 1);
+ goto finish;
+ }
+
+ if ((uint64_t) *remaining < e - p + 1 + sizeof(uint64_t) + l + 1 ||
+ e[1+sizeof(uint64_t)+l] != '\n') {
+ log_debug("Failed to parse message, ignoring.");
+ break;
+ }
+
+ k = malloc(total);
+ if (!k) {
+ log_oom();
+ break;
+ }
+
+ memcpy(k, p, e - p);
+ k[e - p] = '=';
+ memcpy(k + (e - p) + 1, e + 1 + sizeof(uint64_t), l);
+
+ if (journal_field_valid(p, e - p, false)) {
+ iovec[n] = IOVEC_MAKE(k, (e - p) + 1 + l);
+ entry_size += iovec[n].iov_len;
+ n++;
+
+ server_process_entry_meta(k, (e - p) + 1 + l, ucred,
+ &priority,
+ &identifier,
+ &message,
+ &object_pid);
+ } else
+ free(k);
+
+ *remaining -= (e - p) + 1 + sizeof(uint64_t) + l + 1;
+ p = e + 1 + sizeof(uint64_t) + l + 1;
+ }
+ }
+
+ if (n <= 0)
+ goto finish;
+
+ tn = n++;
+ iovec[tn] = IOVEC_MAKE_STRING("_TRANSPORT=journal");
+ entry_size += STRLEN("_TRANSPORT=journal");
+
+ if (entry_size + n + 1 > ENTRY_SIZE_MAX) { /* data + separators + trailer */
+ log_debug("Entry is too big with %zu properties and %zu bytes, ignoring.", n, entry_size);
+ goto finish;
+ }
+
+ r = 0; /* Success, we read the message. */
+
+ if (!client_context_test_priority(context, priority))
+ goto finish;
+
+ if (message) {
+ if (s->forward_to_syslog)
+ server_forward_syslog(s, syslog_fixup_facility(priority), identifier, message, ucred, tv);
+
+ if (s->forward_to_kmsg)
+ server_forward_kmsg(s, priority, identifier, message, ucred);
+
+ if (s->forward_to_console)
+ server_forward_console(s, priority, identifier, message, ucred);
+
+ if (s->forward_to_wall)
+ server_forward_wall(s, priority, identifier, message, ucred);
+ }
+
+ server_dispatch_message(s, iovec, n, m, context, tv, priority, object_pid);
+
+finish:
+ for (j = 0; j < n; j++) {
+ if (j == tn)
+ continue;
+
+ if (iovec[j].iov_base < buffer ||
+ (const char*) iovec[j].iov_base >= p + *remaining)
+ free(iovec[j].iov_base);
+ }
+
+ free(iovec);
+ free(identifier);
+ free(message);
+
+ return r;
+}
+
+void server_process_native_message(
+ Server *s,
+ const char *buffer, size_t buffer_size,
+ const struct ucred *ucred,
+ const struct timeval *tv,
+ const char *label, size_t label_len) {
+
+ size_t remaining = buffer_size;
+ ClientContext *context = NULL;
+ int r;
+
+ assert(s);
+ assert(buffer || buffer_size == 0);
+
+ if (ucred && pid_is_valid(ucred->pid)) {
+ r = client_context_get(s, ucred->pid, ucred, label, label_len, NULL, &context);
+ if (r < 0)
+ log_warning_errno(r, "Failed to retrieve credentials for PID " PID_FMT ", ignoring: %m", ucred->pid);
+ }
+
+ do {
+ r = server_process_entry(s,
+ (const uint8_t*) buffer + (buffer_size - remaining), &remaining,
+ context, ucred, tv, label, label_len);
+ } while (r == 0);
+}
+
+void server_process_native_file(
+ Server *s,
+ int fd,
+ const struct ucred *ucred,
+ const struct timeval *tv,
+ const char *label, size_t label_len) {
+
+ struct stat st;
+ bool sealed;
+ int r;
+
+ /* Data is in the passed fd, probably it didn't fit in a datagram. */
+
+ assert(s);
+ assert(fd >= 0);
+
+ /* If it's a memfd, check if it is sealed. If so, we can just
+ * mmap it and use it, and do not need to copy the data out. */
+ sealed = memfd_get_sealed(fd) > 0;
+
+ if (!sealed && (!ucred || ucred->uid != 0)) {
+ _cleanup_free_ char *k = NULL;
+ const char *e;
+
+ /* If this is not a sealed memfd, and the peer is unknown or
+ * unprivileged, then verify the path. */
+
+ r = fd_get_path(fd, &k);
+ if (r < 0) {
+ log_error_errno(r, "readlink(/proc/self/fd/%i) failed: %m", fd);
+ return;
+ }
+
+ e = PATH_STARTSWITH_SET(k, "/dev/shm/", "/tmp/", "/var/tmp/");
+ if (!e) {
+ log_error("Received file outside of allowed directories. Refusing.");
+ return;
+ }
+
+ if (!filename_is_valid(e)) {
+ log_error("Received file in subdirectory of allowed directories. Refusing.");
+ return;
+ }
+ }
+
+ if (fstat(fd, &st) < 0) {
+ log_error_errno(errno, "Failed to stat passed file, ignoring: %m");
+ return;
+ }
+
+ if (!S_ISREG(st.st_mode)) {
+ log_error("File passed is not regular. Ignoring.");
+ return;
+ }
+
+ if (st.st_size <= 0)
+ return;
+
+ /* When !sealed, set a lower memory limit. We have to read the file,
+ * effectively doubling memory use. */
+ if (st.st_size > ENTRY_SIZE_MAX / (sealed ? 1 : 2)) {
+ log_error("File passed too large (%"PRIu64" bytes). Ignoring.", (uint64_t) st.st_size);
+ return;
+ }
+
+ if (sealed) {
+ void *p;
+ size_t ps;
+
+ /* The file is sealed, we can just map it and use it. */
+
+ ps = PAGE_ALIGN(st.st_size);
+ p = mmap(NULL, ps, PROT_READ, MAP_PRIVATE, fd, 0);
+ if (p == MAP_FAILED) {
+ log_error_errno(errno, "Failed to map memfd, ignoring: %m");
+ return;
+ }
+
+ server_process_native_message(s, p, st.st_size, ucred, tv, label, label_len);
+ assert_se(munmap(p, ps) >= 0);
+ } else {
+ _cleanup_free_ void *p = NULL;
+ struct statvfs vfs;
+ ssize_t n;
+
+ if (fstatvfs(fd, &vfs) < 0) {
+ log_error_errno(errno, "Failed to stat file system of passed file, not processing it: %m");
+ return;
+ }
+
+ /* Refuse operating on file systems that have
+ * mandatory locking enabled, see:
+ *
+ * https://github.com/systemd/systemd/issues/1822
+ */
+ if (vfs.f_flag & ST_MANDLOCK) {
+ log_error("Received file descriptor from file system with mandatory locking enabled, not processing it.");
+ return;
+ }
+
+ /* Make the fd non-blocking. On regular files this has
+ * the effect of bypassing mandatory locking. Of
+ * course, this should normally not be necessary given
+ * the check above, but let's better be safe than
+ * sorry, after all NFS is pretty confusing regarding
+ * file system flags, and we better don't trust it,
+ * and so is SMB. */
+ r = fd_nonblock(fd, true);
+ if (r < 0) {
+ log_error_errno(r, "Failed to make fd non-blocking, not processing it: %m");
+ return;
+ }
+
+ /* The file is not sealed, we can't map the file here, since
+ * clients might then truncate it and trigger a SIGBUS for
+ * us. So let's stupidly read it. */
+
+ p = malloc(st.st_size);
+ if (!p) {
+ log_oom();
+ return;
+ }
+
+ n = pread(fd, p, st.st_size, 0);
+ if (n < 0)
+ log_error_errno(errno, "Failed to read file, ignoring: %m");
+ else if (n > 0)
+ server_process_native_message(s, p, n, ucred, tv, label, label_len);
+ }
+}
+
+int server_open_native_socket(Server *s) {
+
+ static const union sockaddr_union sa = {
+ .un.sun_family = AF_UNIX,
+ .un.sun_path = "/run/systemd/journal/socket",
+ };
+ int r;
+
+ assert(s);
+
+ if (s->native_fd < 0) {
+ s->native_fd = socket(AF_UNIX, SOCK_DGRAM|SOCK_CLOEXEC|SOCK_NONBLOCK, 0);
+ if (s->native_fd < 0)
+ return log_error_errno(errno, "socket() failed: %m");
+
+ (void) sockaddr_un_unlink(&sa.un);
+
+ r = bind(s->native_fd, &sa.sa, SOCKADDR_UN_LEN(sa.un));
+ if (r < 0)
+ return log_error_errno(errno, "bind(%s) failed: %m", sa.un.sun_path);
+
+ (void) chmod(sa.un.sun_path, 0666);
+ } else
+ (void) fd_nonblock(s->native_fd, true);
+
+ r = setsockopt_int(s->native_fd, SOL_SOCKET, SO_PASSCRED, true);
+ if (r < 0)
+ return log_error_errno(r, "SO_PASSCRED failed: %m");
+
+#if HAVE_SELINUX
+ if (mac_selinux_use()) {
+ r = setsockopt_int(s->native_fd, SOL_SOCKET, SO_PASSSEC, true);
+ if (r < 0)
+ log_warning_errno(r, "SO_PASSSEC failed: %m");
+ }
+#endif
+
+ r = setsockopt_int(s->native_fd, SOL_SOCKET, SO_TIMESTAMP, true);
+ if (r < 0)
+ return log_error_errno(r, "SO_TIMESTAMP failed: %m");
+
+ r = sd_event_add_io(s->event, &s->native_event_source, s->native_fd, EPOLLIN, server_process_datagram, s);
+ if (r < 0)
+ return log_error_errno(r, "Failed to add native server fd to event loop: %m");
+
+ r = sd_event_source_set_priority(s->native_event_source, SD_EVENT_PRIORITY_NORMAL+5);
+ if (r < 0)
+ return log_error_errno(r, "Failed to adjust native event source priority: %m");
+
+ return 0;
+}
diff --git a/src/journal/journald-native.h b/src/journal/journald-native.h
new file mode 100644
index 0000000..2a33ef7
--- /dev/null
+++ b/src/journal/journald-native.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+#pragma once
+
+#include "journald-server.h"
+
+void server_process_native_message(
+ Server *s,
+ const char *buffer,
+ size_t buffer_size,
+ const struct ucred *ucred,
+ const struct timeval *tv,
+ const char *label,
+ size_t label_len);
+
+void server_process_native_file(
+ Server *s,
+ int fd,
+ const struct ucred *ucred,
+ const struct timeval *tv,
+ const char *label,
+ size_t label_len);
+
+int server_open_native_socket(Server *s);
diff --git a/src/journal/journald-rate-limit.c b/src/journal/journald-rate-limit.c
new file mode 100644
index 0000000..0b42d53
--- /dev/null
+++ b/src/journal/journald-rate-limit.c
@@ -0,0 +1,255 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+
+#include <errno.h>
+#include <string.h>
+
+#include "alloc-util.h"
+#include "hashmap.h"
+#include "journald-rate-limit.h"
+#include "list.h"
+#include "random-util.h"
+#include "string-util.h"
+#include "util.h"
+
+#define POOLS_MAX 5
+#define BUCKETS_MAX 127
+#define GROUPS_MAX 2047
+
+static const int priority_map[] = {
+ [LOG_EMERG] = 0,
+ [LOG_ALERT] = 0,
+ [LOG_CRIT] = 0,
+ [LOG_ERR] = 1,
+ [LOG_WARNING] = 2,
+ [LOG_NOTICE] = 3,
+ [LOG_INFO] = 3,
+ [LOG_DEBUG] = 4
+};
+
+typedef struct JournalRateLimitPool JournalRateLimitPool;
+typedef struct JournalRateLimitGroup JournalRateLimitGroup;
+
+struct JournalRateLimitPool {
+ usec_t begin;
+ unsigned num;
+ unsigned suppressed;
+};
+
+struct JournalRateLimitGroup {
+ JournalRateLimit *parent;
+
+ char *id;
+
+ /* Interval is stored to keep track of when the group expires */
+ usec_t interval;
+
+ JournalRateLimitPool pools[POOLS_MAX];
+ uint64_t hash;
+
+ LIST_FIELDS(JournalRateLimitGroup, bucket);
+ LIST_FIELDS(JournalRateLimitGroup, lru);
+};
+
+struct JournalRateLimit {
+
+ JournalRateLimitGroup* buckets[BUCKETS_MAX];
+ JournalRateLimitGroup *lru, *lru_tail;
+
+ unsigned n_groups;
+
+ uint8_t hash_key[16];
+};
+
+JournalRateLimit *journal_rate_limit_new(void) {
+ JournalRateLimit *r;
+
+ r = new0(JournalRateLimit, 1);
+ if (!r)
+ return NULL;
+
+ random_bytes(r->hash_key, sizeof(r->hash_key));
+
+ return r;
+}
+
+static void journal_rate_limit_group_free(JournalRateLimitGroup *g) {
+ assert(g);
+
+ if (g->parent) {
+ assert(g->parent->n_groups > 0);
+
+ if (g->parent->lru_tail == g)
+ g->parent->lru_tail = g->lru_prev;
+
+ LIST_REMOVE(lru, g->parent->lru, g);
+ LIST_REMOVE(bucket, g->parent->buckets[g->hash % BUCKETS_MAX], g);
+
+ g->parent->n_groups--;
+ }
+
+ free(g->id);
+ free(g);
+}
+
+void journal_rate_limit_free(JournalRateLimit *r) {
+ assert(r);
+
+ while (r->lru)
+ journal_rate_limit_group_free(r->lru);
+
+ free(r);
+}
+
+_pure_ static bool journal_rate_limit_group_expired(JournalRateLimitGroup *g, usec_t ts) {
+ unsigned i;
+
+ assert(g);
+
+ for (i = 0; i < POOLS_MAX; i++)
+ if (g->pools[i].begin + g->interval >= ts)
+ return false;
+
+ return true;
+}
+
+static void journal_rate_limit_vacuum(JournalRateLimit *r, usec_t ts) {
+ assert(r);
+
+ /* Makes room for at least one new item, but drop all
+ * expored items too. */
+
+ while (r->n_groups >= GROUPS_MAX ||
+ (r->lru_tail && journal_rate_limit_group_expired(r->lru_tail, ts)))
+ journal_rate_limit_group_free(r->lru_tail);
+}
+
+static JournalRateLimitGroup* journal_rate_limit_group_new(JournalRateLimit *r, const char *id, usec_t interval, usec_t ts) {
+ JournalRateLimitGroup *g;
+
+ assert(r);
+ assert(id);
+
+ g = new0(JournalRateLimitGroup, 1);
+ if (!g)
+ return NULL;
+
+ g->id = strdup(id);
+ if (!g->id)
+ goto fail;
+
+ g->hash = siphash24_string(g->id, r->hash_key);
+
+ g->interval = interval;
+
+ journal_rate_limit_vacuum(r, ts);
+
+ LIST_PREPEND(bucket, r->buckets[g->hash % BUCKETS_MAX], g);
+ LIST_PREPEND(lru, r->lru, g);
+ if (!g->lru_next)
+ r->lru_tail = g;
+ r->n_groups++;
+
+ g->parent = r;
+ return g;
+
+fail:
+ journal_rate_limit_group_free(g);
+ return NULL;
+}
+
+static unsigned burst_modulate(unsigned burst, uint64_t available) {
+ unsigned k;
+
+ /* Modulates the burst rate a bit with the amount of available
+ * disk space */
+
+ k = u64log2(available);
+
+ /* 1MB */
+ if (k <= 20)
+ return burst;
+
+ burst = (burst * (k-16)) / 4;
+
+ /*
+ * Example:
+ *
+ * <= 1MB = rate * 1
+ * 16MB = rate * 2
+ * 256MB = rate * 3
+ * 4GB = rate * 4
+ * 64GB = rate * 5
+ * 1TB = rate * 6
+ */
+
+ return burst;
+}
+
+int journal_rate_limit_test(JournalRateLimit *r, const char *id, usec_t rl_interval, unsigned rl_burst, int priority, uint64_t available) {
+ uint64_t h;
+ JournalRateLimitGroup *g;
+ JournalRateLimitPool *p;
+ unsigned burst;
+ usec_t ts;
+
+ assert(id);
+
+ /* Returns:
+ *
+ * 0 → the log message shall be suppressed,
+ * 1 + n → the log message shall be permitted, and n messages were dropped from the peer before
+ * < 0 → error
+ */
+
+ if (!r)
+ return 1;
+
+ ts = now(CLOCK_MONOTONIC);
+
+ h = siphash24_string(id, r->hash_key);
+ g = r->buckets[h % BUCKETS_MAX];
+
+ LIST_FOREACH(bucket, g, g)
+ if (streq(g->id, id))
+ break;
+
+ if (!g) {
+ g = journal_rate_limit_group_new(r, id, rl_interval, ts);
+ if (!g)
+ return -ENOMEM;
+ } else
+ g->interval = rl_interval;
+
+ if (rl_interval == 0 || rl_burst == 0)
+ return 1;
+
+ burst = burst_modulate(rl_burst, available);
+
+ p = &g->pools[priority_map[priority]];
+
+ if (p->begin <= 0) {
+ p->suppressed = 0;
+ p->num = 1;
+ p->begin = ts;
+ return 1;
+ }
+
+ if (p->begin + rl_interval < ts) {
+ unsigned s;
+
+ s = p->suppressed;
+ p->suppressed = 0;
+ p->num = 1;
+ p->begin = ts;
+
+ return 1 + s;
+ }
+
+ if (p->num < burst) {
+ p->num++;
+ return 1;
+ }
+
+ p->suppressed++;
+ return 0;
+}
diff --git a/src/journal/journald-rate-limit.h b/src/journal/journald-rate-limit.h
new file mode 100644
index 0000000..a299280
--- /dev/null
+++ b/src/journal/journald-rate-limit.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+#pragma once
+
+#include "util.h"
+
+typedef struct JournalRateLimit JournalRateLimit;
+
+JournalRateLimit *journal_rate_limit_new(void);
+void journal_rate_limit_free(JournalRateLimit *r);
+int journal_rate_limit_test(JournalRateLimit *r, const char *id, usec_t rl_interval, unsigned rl_burst, int priority, uint64_t available);
diff --git a/src/journal/journald-server.c b/src/journal/journald-server.c
new file mode 100644
index 0000000..2a960eb
--- /dev/null
+++ b/src/journal/journald-server.c
@@ -0,0 +1,2192 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+
+#if HAVE_SELINUX
+#include <selinux/selinux.h>
+#endif
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <sys/signalfd.h>
+#include <sys/statvfs.h>
+#include <linux/sockios.h>
+
+#include "sd-daemon.h"
+#include "sd-journal.h"
+#include "sd-messages.h"
+
+#include "acl-util.h"
+#include "alloc-util.h"
+#include "audit-util.h"
+#include "cgroup-util.h"
+#include "conf-parser.h"
+#include "dirent-util.h"
+#include "extract-word.h"
+#include "fd-util.h"
+#include "fileio.h"
+#include "format-util.h"
+#include "fs-util.h"
+#include "hashmap.h"
+#include "hostname-util.h"
+#include "id128-util.h"
+#include "io-util.h"
+#include "journal-authenticate.h"
+#include "journal-file.h"
+#include "journal-internal.h"
+#include "journal-vacuum.h"
+#include "journald-audit.h"
+#include "journald-context.h"
+#include "journald-kmsg.h"
+#include "journald-native.h"
+#include "journald-rate-limit.h"
+#include "journald-server.h"
+#include "journald-stream.h"
+#include "journald-syslog.h"
+#include "log.h"
+#include "missing.h"
+#include "mkdir.h"
+#include "parse-util.h"
+#include "proc-cmdline.h"
+#include "process-util.h"
+#include "rm-rf.h"
+#include "selinux-util.h"
+#include "signal-util.h"
+#include "socket-util.h"
+#include "stdio-util.h"
+#include "string-table.h"
+#include "string-util.h"
+#include "syslog-util.h"
+#include "user-util.h"
+
+#define USER_JOURNALS_MAX 1024
+
+#define DEFAULT_SYNC_INTERVAL_USEC (5*USEC_PER_MINUTE)
+#define DEFAULT_RATE_LIMIT_INTERVAL (30*USEC_PER_SEC)
+#define DEFAULT_RATE_LIMIT_BURST 10000
+#define DEFAULT_MAX_FILE_USEC USEC_PER_MONTH
+
+#define RECHECK_SPACE_USEC (30*USEC_PER_SEC)
+
+#define NOTIFY_SNDBUF_SIZE (8*1024*1024)
+
+/* The period to insert between posting changes for coalescing */
+#define POST_CHANGE_TIMER_INTERVAL_USEC (250*USEC_PER_MSEC)
+
+/* Pick a good default that is likely to fit into AF_UNIX and AF_INET SOCK_DGRAM datagrams, and even leaves some room
+ * for a bit of additional metadata. */
+#define DEFAULT_LINE_MAX (48*1024)
+
+#define DEFERRED_CLOSES_MAX (4096)
+
+static int determine_path_usage(Server *s, const char *path, uint64_t *ret_used, uint64_t *ret_free) {
+ _cleanup_closedir_ DIR *d = NULL;
+ struct dirent *de;
+ struct statvfs ss;
+
+ assert(ret_used);
+ assert(ret_free);
+
+ d = opendir(path);
+ if (!d)
+ return log_full_errno(errno == ENOENT ? LOG_DEBUG : LOG_ERR,
+ errno, "Failed to open %s: %m", path);
+
+ if (fstatvfs(dirfd(d), &ss) < 0)
+ return log_error_errno(errno, "Failed to fstatvfs(%s): %m", path);
+
+ *ret_free = ss.f_bsize * ss.f_bavail;
+ *ret_used = 0;
+ FOREACH_DIRENT_ALL(de, d, break) {
+ struct stat st;
+
+ if (!endswith(de->d_name, ".journal") &&
+ !endswith(de->d_name, ".journal~"))
+ continue;
+
+ if (fstatat(dirfd(d), de->d_name, &st, AT_SYMLINK_NOFOLLOW) < 0) {
+ log_debug_errno(errno, "Failed to stat %s/%s, ignoring: %m", path, de->d_name);
+ continue;
+ }
+
+ if (!S_ISREG(st.st_mode))
+ continue;
+
+ *ret_used += (uint64_t) st.st_blocks * 512UL;
+ }
+
+ return 0;
+}
+
+static void cache_space_invalidate(JournalStorageSpace *space) {
+ zero(*space);
+}
+
+static int cache_space_refresh(Server *s, JournalStorage *storage) {
+ JournalStorageSpace *space;
+ JournalMetrics *metrics;
+ uint64_t vfs_used, vfs_avail, avail;
+ usec_t ts;
+ int r;
+
+ assert(s);
+
+ metrics = &storage->metrics;
+ space = &storage->space;
+
+ ts = now(CLOCK_MONOTONIC);
+
+ if (space->timestamp != 0 && space->timestamp + RECHECK_SPACE_USEC > ts)
+ return 0;
+
+ r = determine_path_usage(s, storage->path, &vfs_used, &vfs_avail);
+ if (r < 0)
+ return r;
+
+ space->vfs_used = vfs_used;
+ space->vfs_available = vfs_avail;
+
+ avail = LESS_BY(vfs_avail, metrics->keep_free);
+
+ space->limit = MIN(MAX(vfs_used + avail, metrics->min_use), metrics->max_use);
+ space->available = LESS_BY(space->limit, vfs_used);
+ space->timestamp = ts;
+ return 1;
+}
+
+static void patch_min_use(JournalStorage *storage) {
+ assert(storage);
+
+ /* Let's bump the min_use limit to the current usage on disk. We do
+ * this when starting up and first opening the journal files. This way
+ * sudden spikes in disk usage will not cause journald to vacuum files
+ * without bounds. Note that this means that only a restart of journald
+ * will make it reset this value. */
+
+ storage->metrics.min_use = MAX(storage->metrics.min_use, storage->space.vfs_used);
+}
+
+static int determine_space(Server *s, uint64_t *available, uint64_t *limit) {
+ JournalStorage *js;
+ int r;
+
+ assert(s);
+
+ js = s->system_journal ? &s->system_storage : &s->runtime_storage;
+
+ r = cache_space_refresh(s, js);
+ if (r >= 0) {
+ if (available)
+ *available = js->space.available;
+ if (limit)
+ *limit = js->space.limit;
+ }
+ return r;
+}
+
+void server_space_usage_message(Server *s, JournalStorage *storage) {
+ char fb1[FORMAT_BYTES_MAX], fb2[FORMAT_BYTES_MAX], fb3[FORMAT_BYTES_MAX],
+ fb4[FORMAT_BYTES_MAX], fb5[FORMAT_BYTES_MAX], fb6[FORMAT_BYTES_MAX];
+ JournalMetrics *metrics;
+
+ assert(s);
+
+ if (!storage)
+ storage = s->system_journal ? &s->system_storage : &s->runtime_storage;
+
+ if (cache_space_refresh(s, storage) < 0)
+ return;
+
+ metrics = &storage->metrics;
+ format_bytes(fb1, sizeof(fb1), storage->space.vfs_used);
+ format_bytes(fb2, sizeof(fb2), metrics->max_use);
+ format_bytes(fb3, sizeof(fb3), metrics->keep_free);
+ format_bytes(fb4, sizeof(fb4), storage->space.vfs_available);
+ format_bytes(fb5, sizeof(fb5), storage->space.limit);
+ format_bytes(fb6, sizeof(fb6), storage->space.available);
+
+ server_driver_message(s, 0,
+ "MESSAGE_ID=" SD_MESSAGE_JOURNAL_USAGE_STR,
+ LOG_MESSAGE("%s (%s) is %s, max %s, %s free.",
+ storage->name, storage->path, fb1, fb5, fb6),
+ "JOURNAL_NAME=%s", storage->name,
+ "JOURNAL_PATH=%s", storage->path,
+ "CURRENT_USE=%"PRIu64, storage->space.vfs_used,
+ "CURRENT_USE_PRETTY=%s", fb1,
+ "MAX_USE=%"PRIu64, metrics->max_use,
+ "MAX_USE_PRETTY=%s", fb2,
+ "DISK_KEEP_FREE=%"PRIu64, metrics->keep_free,
+ "DISK_KEEP_FREE_PRETTY=%s", fb3,
+ "DISK_AVAILABLE=%"PRIu64, storage->space.vfs_available,
+ "DISK_AVAILABLE_PRETTY=%s", fb4,
+ "LIMIT=%"PRIu64, storage->space.limit,
+ "LIMIT_PRETTY=%s", fb5,
+ "AVAILABLE=%"PRIu64, storage->space.available,
+ "AVAILABLE_PRETTY=%s", fb6,
+ NULL);
+}
+
+static bool uid_for_system_journal(uid_t uid) {
+
+ /* Returns true if the specified UID shall get its data stored in the system journal*/
+
+ return uid_is_system(uid) || uid_is_dynamic(uid) || uid == UID_NOBODY;
+}
+
+static void server_add_acls(JournalFile *f, uid_t uid) {
+#if HAVE_ACL
+ int r;
+#endif
+ assert(f);
+
+#if HAVE_ACL
+ if (uid_for_system_journal(uid))
+ return;
+
+ r = add_acls_for_user(f->fd, uid);
+ if (r < 0)
+ log_warning_errno(r, "Failed to set ACL on %s, ignoring: %m", f->path);
+#endif
+}
+
+static int open_journal(
+ Server *s,
+ bool reliably,
+ const char *fname,
+ int flags,
+ bool seal,
+ JournalMetrics *metrics,
+ JournalFile **ret) {
+
+ JournalFile *f;
+ int r;
+
+ assert(s);
+ assert(fname);
+ assert(ret);
+
+ if (reliably)
+ r = journal_file_open_reliably(fname, flags, 0640, s->compress.enabled, s->compress.threshold_bytes,
+ seal, metrics, s->mmap, s->deferred_closes, NULL, &f);
+ else
+ r = journal_file_open(-1, fname, flags, 0640, s->compress.enabled, s->compress.threshold_bytes, seal,
+ metrics, s->mmap, s->deferred_closes, NULL, &f);
+
+ if (r < 0)
+ return r;
+
+ r = journal_file_enable_post_change_timer(f, s->event, POST_CHANGE_TIMER_INTERVAL_USEC);
+ if (r < 0) {
+ (void) journal_file_close(f);
+ return r;
+ }
+
+ *ret = f;
+ return r;
+}
+
+static bool flushed_flag_is_set(void) {
+ return access("/run/systemd/journal/flushed", F_OK) >= 0;
+}
+
+static int system_journal_open(Server *s, bool flush_requested) {
+ const char *fn;
+ int r = 0;
+
+ if (!s->system_journal &&
+ IN_SET(s->storage, STORAGE_PERSISTENT, STORAGE_AUTO) &&
+ (flush_requested || flushed_flag_is_set())) {
+
+ /* If in auto mode: first try to create the machine
+ * path, but not the prefix.
+ *
+ * If in persistent mode: create /var/log/journal and
+ * the machine path */
+
+ if (s->storage == STORAGE_PERSISTENT)
+ (void) mkdir_p("/var/log/journal/", 0755);
+
+ (void) mkdir(s->system_storage.path, 0755);
+
+ fn = strjoina(s->system_storage.path, "/system.journal");
+ r = open_journal(s, true, fn, O_RDWR|O_CREAT, s->seal, &s->system_storage.metrics, &s->system_journal);
+ if (r >= 0) {
+ server_add_acls(s->system_journal, 0);
+ (void) cache_space_refresh(s, &s->system_storage);
+ patch_min_use(&s->system_storage);
+ } else {
+ if (!IN_SET(r, -ENOENT, -EROFS))
+ log_warning_errno(r, "Failed to open system journal: %m");
+
+ r = 0;
+ }
+
+ /* If the runtime journal is open, and we're post-flush, we're
+ * recovering from a failed system journal rotate (ENOSPC)
+ * for which the runtime journal was reopened.
+ *
+ * Perform an implicit flush to var, leaving the runtime
+ * journal closed, now that the system journal is back.
+ */
+ if (!flush_requested)
+ (void) server_flush_to_var(s, true);
+ }
+
+ if (!s->runtime_journal &&
+ (s->storage != STORAGE_NONE)) {
+
+ fn = strjoina(s->runtime_storage.path, "/system.journal");
+
+ if (s->system_journal) {
+
+ /* Try to open the runtime journal, but only
+ * if it already exists, so that we can flush
+ * it into the system journal */
+
+ r = open_journal(s, false, fn, O_RDWR, false, &s->runtime_storage.metrics, &s->runtime_journal);
+ if (r < 0) {
+ if (r != -ENOENT)
+ log_warning_errno(r, "Failed to open runtime journal: %m");
+
+ r = 0;
+ }
+
+ } else {
+
+ /* OK, we really need the runtime journal, so create
+ * it if necessary. */
+
+ (void) mkdir("/run/log", 0755);
+ (void) mkdir("/run/log/journal", 0755);
+ (void) mkdir_parents(fn, 0750);
+
+ r = open_journal(s, true, fn, O_RDWR|O_CREAT, false, &s->runtime_storage.metrics, &s->runtime_journal);
+ if (r < 0)
+ return log_error_errno(r, "Failed to open runtime journal: %m");
+ }
+
+ if (s->runtime_journal) {
+ server_add_acls(s->runtime_journal, 0);
+ (void) cache_space_refresh(s, &s->runtime_storage);
+ patch_min_use(&s->runtime_storage);
+ }
+ }
+
+ return r;
+}
+
+static JournalFile* find_journal(Server *s, uid_t uid) {
+ _cleanup_free_ char *p = NULL;
+ int r;
+ JournalFile *f;
+ sd_id128_t machine;
+
+ assert(s);
+
+ /* A rotate that fails to create the new journal (ENOSPC) leaves the
+ * rotated journal as NULL. Unless we revisit opening, even after
+ * space is made available we'll continue to return NULL indefinitely.
+ *
+ * system_journal_open() is a noop if the journals are already open, so
+ * we can just call it here to recover from failed rotates (or anything
+ * else that's left the journals as NULL).
+ *
+ * Fixes https://github.com/systemd/systemd/issues/3968 */
+ (void) system_journal_open(s, false);
+
+ /* We split up user logs only on /var, not on /run. If the
+ * runtime file is open, we write to it exclusively, in order
+ * to guarantee proper order as soon as we flush /run to
+ * /var and close the runtime file. */
+
+ if (s->runtime_journal)
+ return s->runtime_journal;
+
+ if (uid_for_system_journal(uid))
+ return s->system_journal;
+
+ f = ordered_hashmap_get(s->user_journals, UID_TO_PTR(uid));
+ if (f)
+ return f;
+
+ r = sd_id128_get_machine(&machine);
+ if (r < 0) {
+ log_debug_errno(r, "Failed to determine machine ID, using system log: %m");
+ return s->system_journal;
+ }
+
+ if (asprintf(&p, "/var/log/journal/" SD_ID128_FORMAT_STR "/user-"UID_FMT".journal",
+ SD_ID128_FORMAT_VAL(machine), uid) < 0) {
+ log_oom();
+ return s->system_journal;
+ }
+
+ while (ordered_hashmap_size(s->user_journals) >= USER_JOURNALS_MAX) {
+ /* Too many open? Then let's close one */
+ f = ordered_hashmap_steal_first(s->user_journals);
+ assert(f);
+ (void) journal_file_close(f);
+ }
+
+ r = open_journal(s, true, p, O_RDWR|O_CREAT, s->seal, &s->system_storage.metrics, &f);
+ if (r < 0)
+ return s->system_journal;
+
+ server_add_acls(f, uid);
+
+ r = ordered_hashmap_put(s->user_journals, UID_TO_PTR(uid), f);
+ if (r < 0) {
+ (void) journal_file_close(f);
+ return s->system_journal;
+ }
+
+ return f;
+}
+
+static int do_rotate(
+ Server *s,
+ JournalFile **f,
+ const char* name,
+ bool seal,
+ uint32_t uid) {
+
+ int r;
+ assert(s);
+
+ if (!*f)
+ return -EINVAL;
+
+ r = journal_file_rotate(f, s->compress.enabled, s->compress.threshold_bytes, seal, s->deferred_closes);
+ if (r < 0) {
+ if (*f)
+ return log_error_errno(r, "Failed to rotate %s: %m", (*f)->path);
+ else
+ return log_error_errno(r, "Failed to create new %s journal: %m", name);
+ }
+
+ server_add_acls(*f, uid);
+
+ return r;
+}
+
+static void server_process_deferred_closes(Server *s) {
+ JournalFile *f;
+ Iterator i;
+
+ /* Perform any deferred closes which aren't still offlining. */
+ SET_FOREACH(f, s->deferred_closes, i) {
+ if (journal_file_is_offlining(f))
+ continue;
+
+ (void) set_remove(s->deferred_closes, f);
+ (void) journal_file_close(f);
+ }
+}
+
+static void server_vacuum_deferred_closes(Server *s) {
+ assert(s);
+
+ /* Make some room in the deferred closes list, so that it doesn't grow without bounds */
+ if (set_size(s->deferred_closes) < DEFERRED_CLOSES_MAX)
+ return;
+
+ /* Let's first remove all journal files that might already have completed closing */
+ server_process_deferred_closes(s);
+
+ /* And now, let's close some more until we reach the limit again. */
+ while (set_size(s->deferred_closes) >= DEFERRED_CLOSES_MAX) {
+ JournalFile *f;
+
+ assert_se(f = set_steal_first(s->deferred_closes));
+ journal_file_close(f);
+ }
+}
+
+static int open_user_journal_directory(Server *s, DIR **ret_dir, char **ret_path) {
+ _cleanup_closedir_ DIR *dir = NULL;
+ _cleanup_free_ char *path = NULL;
+ sd_id128_t machine;
+ int r;
+
+ assert(s);
+
+ r = sd_id128_get_machine(&machine);
+ if (r < 0)
+ return log_error_errno(r, "Failed to determine machine ID, ignoring: %m");
+
+ if (asprintf(&path, "/var/log/journal/" SD_ID128_FORMAT_STR "/", SD_ID128_FORMAT_VAL(machine)) < 0)
+ return log_oom();
+
+ dir = opendir(path);
+ if (!dir)
+ return log_error_errno(errno, "Failed to open user journal directory '%s': %m", path);
+
+ if (ret_dir)
+ *ret_dir = TAKE_PTR(dir);
+ if (ret_path)
+ *ret_path = TAKE_PTR(path);
+
+ return 0;
+}
+
+void server_rotate(Server *s) {
+ _cleanup_free_ char *path = NULL;
+ _cleanup_closedir_ DIR *d = NULL;
+ JournalFile *f;
+ Iterator i;
+ void *k;
+ int r;
+
+ log_debug("Rotating...");
+
+ /* First, rotate the system journal (either in its runtime flavour or in its runtime flavour) */
+ (void) do_rotate(s, &s->runtime_journal, "runtime", false, 0);
+ (void) do_rotate(s, &s->system_journal, "system", s->seal, 0);
+
+ /* Then, rotate all user journals we have open (keeping them open) */
+ ORDERED_HASHMAP_FOREACH_KEY(f, k, s->user_journals, i) {
+ r = do_rotate(s, &f, "user", s->seal, PTR_TO_UID(k));
+ if (r >= 0)
+ ordered_hashmap_replace(s->user_journals, k, f);
+ else if (!f)
+ /* Old file has been closed and deallocated */
+ ordered_hashmap_remove(s->user_journals, k);
+ }
+
+ /* Finally, also rotate all user journals we currently do not have open. (But do so only if we actually have
+ * access to /var, i.e. are not in the log-to-runtime-journal mode). */
+ if (!s->runtime_journal &&
+ open_user_journal_directory(s, &d, &path) >= 0) {
+
+ struct dirent *de;
+
+ FOREACH_DIRENT(de, d, log_warning_errno(errno, "Failed to enumerate %s, ignoring: %m", path)) {
+ _cleanup_free_ char *u = NULL, *full = NULL;
+ _cleanup_close_ int fd = -1;
+ const char *a, *b;
+ uid_t uid;
+
+ a = startswith(de->d_name, "user-");
+ if (!a)
+ continue;
+ b = endswith(de->d_name, ".journal");
+ if (!b)
+ continue;
+
+ u = strndup(a, b-a);
+ if (!u) {
+ log_oom();
+ break;
+ }
+
+ r = parse_uid(u, &uid);
+ if (r < 0) {
+ log_debug_errno(r, "Failed to parse UID from file name '%s', ignoring: %m", de->d_name);
+ continue;
+ }
+
+ /* Already rotated in the above loop? i.e. is it an open user journal? */
+ if (ordered_hashmap_contains(s->user_journals, UID_TO_PTR(uid)))
+ continue;
+
+ full = strjoin(path, de->d_name);
+ if (!full) {
+ log_oom();
+ break;
+ }
+
+ fd = openat(dirfd(d), de->d_name, O_RDWR|O_CLOEXEC|O_NOCTTY|O_NOFOLLOW|O_NONBLOCK);
+ if (fd < 0) {
+ log_full_errno(IN_SET(errno, ELOOP, ENOENT) ? LOG_DEBUG : LOG_WARNING, errno,
+ "Failed to open journal file '%s' for rotation: %m", full);
+ continue;
+ }
+
+ /* Make some room in the set of deferred close()s */
+ server_vacuum_deferred_closes(s);
+
+ /* Open the file briefly, so that we can archive it */
+ r = journal_file_open(fd,
+ full,
+ O_RDWR,
+ 0640,
+ s->compress.enabled,
+ s->compress.threshold_bytes,
+ s->seal,
+ &s->system_storage.metrics,
+ s->mmap,
+ s->deferred_closes,
+ NULL,
+ &f);
+ if (r < 0) {
+ log_warning_errno(r, "Failed to read journal file %s for rotation, trying to move it out of the way: %m", full);
+
+ r = journal_file_dispose(dirfd(d), de->d_name);
+ if (r < 0)
+ log_warning_errno(r, "Failed to move %s out of the way, ignoring: %m", full);
+ else
+ log_debug("Successfully moved %s out of the way.", full);
+
+ continue;
+ }
+
+ TAKE_FD(fd); /* Donated to journal_file_open() */
+
+ r = journal_file_archive(f);
+ if (r < 0)
+ log_debug_errno(r, "Failed to archive journal file '%s', ignoring: %m", full);
+
+ f = journal_initiate_close(f, s->deferred_closes);
+ }
+ }
+
+ server_process_deferred_closes(s);
+}
+
+void server_sync(Server *s) {
+ JournalFile *f;
+ Iterator i;
+ int r;
+
+ if (s->system_journal) {
+ r = journal_file_set_offline(s->system_journal, false);
+ if (r < 0)
+ log_warning_errno(r, "Failed to sync system journal, ignoring: %m");
+ }
+
+ ORDERED_HASHMAP_FOREACH(f, s->user_journals, i) {
+ r = journal_file_set_offline(f, false);
+ if (r < 0)
+ log_warning_errno(r, "Failed to sync user journal, ignoring: %m");
+ }
+
+ if (s->sync_event_source) {
+ r = sd_event_source_set_enabled(s->sync_event_source, SD_EVENT_OFF);
+ if (r < 0)
+ log_error_errno(r, "Failed to disable sync timer source: %m");
+ }
+
+ s->sync_scheduled = false;
+}
+
+static void do_vacuum(Server *s, JournalStorage *storage, bool verbose) {
+
+ int r;
+
+ assert(s);
+ assert(storage);
+
+ (void) cache_space_refresh(s, storage);
+
+ if (verbose)
+ server_space_usage_message(s, storage);
+
+ r = journal_directory_vacuum(storage->path, storage->space.limit,
+ storage->metrics.n_max_files, s->max_retention_usec,
+ &s->oldest_file_usec, verbose);
+ if (r < 0 && r != -ENOENT)
+ log_warning_errno(r, "Failed to vacuum %s, ignoring: %m", storage->path);
+
+ cache_space_invalidate(&storage->space);
+}
+
+int server_vacuum(Server *s, bool verbose) {
+ assert(s);
+
+ log_debug("Vacuuming...");
+
+ s->oldest_file_usec = 0;
+
+ if (s->system_journal)
+ do_vacuum(s, &s->system_storage, verbose);
+ if (s->runtime_journal)
+ do_vacuum(s, &s->runtime_storage, verbose);
+
+ return 0;
+}
+
+static void server_cache_machine_id(Server *s) {
+ sd_id128_t id;
+ int r;
+
+ assert(s);
+
+ r = sd_id128_get_machine(&id);
+ if (r < 0)
+ return;
+
+ sd_id128_to_string(id, stpcpy(s->machine_id_field, "_MACHINE_ID="));
+}
+
+static void server_cache_boot_id(Server *s) {
+ sd_id128_t id;
+ int r;
+
+ assert(s);
+
+ r = sd_id128_get_boot(&id);
+ if (r < 0)
+ return;
+
+ sd_id128_to_string(id, stpcpy(s->boot_id_field, "_BOOT_ID="));
+}
+
+static void server_cache_hostname(Server *s) {
+ _cleanup_free_ char *t = NULL;
+ char *x;
+
+ assert(s);
+
+ t = gethostname_malloc();
+ if (!t)
+ return;
+
+ x = strappend("_HOSTNAME=", t);
+ if (!x)
+ return;
+
+ free(s->hostname_field);
+ s->hostname_field = x;
+}
+
+static bool shall_try_append_again(JournalFile *f, int r) {
+ switch(r) {
+
+ case -E2BIG: /* Hit configured limit */
+ case -EFBIG: /* Hit fs limit */
+ case -EDQUOT: /* Quota limit hit */
+ case -ENOSPC: /* Disk full */
+ log_debug("%s: Allocation limit reached, rotating.", f->path);
+ return true;
+
+ case -EIO: /* I/O error of some kind (mmap) */
+ log_warning("%s: IO error, rotating.", f->path);
+ return true;
+
+ case -EHOSTDOWN: /* Other machine */
+ log_info("%s: Journal file from other machine, rotating.", f->path);
+ return true;
+
+ case -EBUSY: /* Unclean shutdown */
+ log_info("%s: Unclean shutdown, rotating.", f->path);
+ return true;
+
+ case -EPROTONOSUPPORT: /* Unsupported feature */
+ log_info("%s: Unsupported feature, rotating.", f->path);
+ return true;
+
+ case -EBADMSG: /* Corrupted */
+ case -ENODATA: /* Truncated */
+ case -ESHUTDOWN: /* Already archived */
+ log_warning("%s: Journal file corrupted, rotating.", f->path);
+ return true;
+
+ case -EIDRM: /* Journal file has been deleted */
+ log_warning("%s: Journal file has been deleted, rotating.", f->path);
+ return true;
+
+ case -ETXTBSY: /* Journal file is from the future */
+ log_warning("%s: Journal file is from the future, rotating.", f->path);
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+static void write_to_journal(Server *s, uid_t uid, struct iovec *iovec, size_t n, int priority) {
+ bool vacuumed = false, rotate = false;
+ struct dual_timestamp ts;
+ JournalFile *f;
+ int r;
+
+ assert(s);
+ assert(iovec);
+ assert(n > 0);
+
+ /* Get the closest, linearized time we have for this log event from the event loop. (Note that we do not use
+ * the source time, and not even the time the event was originally seen, but instead simply the time we started
+ * processing it, as we want strictly linear ordering in what we write out.) */
+ assert_se(sd_event_now(s->event, CLOCK_REALTIME, &ts.realtime) >= 0);
+ assert_se(sd_event_now(s->event, CLOCK_MONOTONIC, &ts.monotonic) >= 0);
+
+ if (ts.realtime < s->last_realtime_clock) {
+ /* When the time jumps backwards, let's immediately rotate. Of course, this should not happen during
+ * regular operation. However, when it does happen, then we should make sure that we start fresh files
+ * to ensure that the entries in the journal files are strictly ordered by time, in order to ensure
+ * bisection works correctly. */
+
+ log_debug("Time jumped backwards, rotating.");
+ rotate = true;
+ } else {
+
+ f = find_journal(s, uid);
+ if (!f)
+ return;
+
+ if (journal_file_rotate_suggested(f, s->max_file_usec)) {
+ log_debug("%s: Journal header limits reached or header out-of-date, rotating.", f->path);
+ rotate = true;
+ }
+ }
+
+ if (rotate) {
+ server_rotate(s);
+ server_vacuum(s, false);
+ vacuumed = true;
+
+ f = find_journal(s, uid);
+ if (!f)
+ return;
+ }
+
+ s->last_realtime_clock = ts.realtime;
+
+ r = journal_file_append_entry(f, &ts, NULL, iovec, n, &s->seqnum, NULL, NULL);
+ if (r >= 0) {
+ server_schedule_sync(s, priority);
+ return;
+ }
+
+ if (vacuumed || !shall_try_append_again(f, r)) {
+ log_error_errno(r, "Failed to write entry (%zu items, %zu bytes), ignoring: %m", n, IOVEC_TOTAL_SIZE(iovec, n));
+ return;
+ }
+
+ server_rotate(s);
+ server_vacuum(s, false);
+
+ f = find_journal(s, uid);
+ if (!f)
+ return;
+
+ log_debug("Retrying write.");
+ r = journal_file_append_entry(f, &ts, NULL, iovec, n, &s->seqnum, NULL, NULL);
+ if (r < 0)
+ log_error_errno(r, "Failed to write entry (%zu items, %zu bytes) despite vacuuming, ignoring: %m", n, IOVEC_TOTAL_SIZE(iovec, n));
+ else
+ server_schedule_sync(s, priority);
+}
+
+#define IOVEC_ADD_NUMERIC_FIELD(iovec, n, value, type, isset, format, field) \
+ if (isset(value)) { \
+ char *k; \
+ k = newa(char, STRLEN(field "=") + DECIMAL_STR_MAX(type) + 1); \
+ sprintf(k, field "=" format, value); \
+ iovec[n++] = IOVEC_MAKE_STRING(k); \
+ }
+
+#define IOVEC_ADD_STRING_FIELD(iovec, n, value, field) \
+ if (!isempty(value)) { \
+ char *k; \
+ k = strjoina(field "=", value); \
+ iovec[n++] = IOVEC_MAKE_STRING(k); \
+ }
+
+#define IOVEC_ADD_ID128_FIELD(iovec, n, value, field) \
+ if (!sd_id128_is_null(value)) { \
+ char *k; \
+ k = newa(char, STRLEN(field "=") + SD_ID128_STRING_MAX); \
+ sd_id128_to_string(value, stpcpy(k, field "=")); \
+ iovec[n++] = IOVEC_MAKE_STRING(k); \
+ }
+
+#define IOVEC_ADD_SIZED_FIELD(iovec, n, value, value_size, field) \
+ if (value_size > 0) { \
+ char *k; \
+ k = newa(char, STRLEN(field "=") + value_size + 1); \
+ *((char*) mempcpy(stpcpy(k, field "="), value, value_size)) = 0; \
+ iovec[n++] = IOVEC_MAKE_STRING(k); \
+ } \
+
+static void dispatch_message_real(
+ Server *s,
+ struct iovec *iovec, size_t n, size_t m,
+ const ClientContext *c,
+ const struct timeval *tv,
+ int priority,
+ pid_t object_pid) {
+
+ char source_time[sizeof("_SOURCE_REALTIME_TIMESTAMP=") + DECIMAL_STR_MAX(usec_t)];
+ _cleanup_free_ char *cmdline1 = NULL, *cmdline2 = NULL;
+ uid_t journal_uid;
+ ClientContext *o;
+
+ assert(s);
+ assert(iovec);
+ assert(n > 0);
+ assert(n +
+ N_IOVEC_META_FIELDS +
+ (pid_is_valid(object_pid) ? N_IOVEC_OBJECT_FIELDS : 0) +
+ client_context_extra_fields_n_iovec(c) <= m);
+
+ if (c) {
+ IOVEC_ADD_NUMERIC_FIELD(iovec, n, c->pid, pid_t, pid_is_valid, PID_FMT, "_PID");
+ IOVEC_ADD_NUMERIC_FIELD(iovec, n, c->uid, uid_t, uid_is_valid, UID_FMT, "_UID");
+ IOVEC_ADD_NUMERIC_FIELD(iovec, n, c->gid, gid_t, gid_is_valid, GID_FMT, "_GID");
+
+ IOVEC_ADD_STRING_FIELD(iovec, n, c->comm, "_COMM"); /* At most TASK_COMM_LENGTH (16 bytes) */
+ IOVEC_ADD_STRING_FIELD(iovec, n, c->exe, "_EXE"); /* A path, so at most PATH_MAX (4096 bytes) */
+
+ if (c->cmdline)
+ /* At most _SC_ARG_MAX (2MB usually), which is too much to put on stack.
+ * Let's use a heap allocation for this one. */
+ cmdline1 = set_iovec_string_field(iovec, &n, "_CMDLINE=", c->cmdline);
+
+ IOVEC_ADD_STRING_FIELD(iovec, n, c->capeff, "_CAP_EFFECTIVE"); /* Read from /proc/.../status */
+ IOVEC_ADD_SIZED_FIELD(iovec, n, c->label, c->label_size, "_SELINUX_CONTEXT");
+ IOVEC_ADD_NUMERIC_FIELD(iovec, n, c->auditid, uint32_t, audit_session_is_valid, "%" PRIu32, "_AUDIT_SESSION");
+ IOVEC_ADD_NUMERIC_FIELD(iovec, n, c->loginuid, uid_t, uid_is_valid, UID_FMT, "_AUDIT_LOGINUID");
+
+ IOVEC_ADD_STRING_FIELD(iovec, n, c->cgroup, "_SYSTEMD_CGROUP"); /* A path */
+ IOVEC_ADD_STRING_FIELD(iovec, n, c->session, "_SYSTEMD_SESSION");
+ IOVEC_ADD_NUMERIC_FIELD(iovec, n, c->owner_uid, uid_t, uid_is_valid, UID_FMT, "_SYSTEMD_OWNER_UID");
+ IOVEC_ADD_STRING_FIELD(iovec, n, c->unit, "_SYSTEMD_UNIT"); /* Unit names are bounded by UNIT_NAME_MAX */
+ IOVEC_ADD_STRING_FIELD(iovec, n, c->user_unit, "_SYSTEMD_USER_UNIT");
+ IOVEC_ADD_STRING_FIELD(iovec, n, c->slice, "_SYSTEMD_SLICE");
+ IOVEC_ADD_STRING_FIELD(iovec, n, c->user_slice, "_SYSTEMD_USER_SLICE");
+
+ IOVEC_ADD_ID128_FIELD(iovec, n, c->invocation_id, "_SYSTEMD_INVOCATION_ID");
+
+ if (c->extra_fields_n_iovec > 0) {
+ memcpy(iovec + n, c->extra_fields_iovec, c->extra_fields_n_iovec * sizeof(struct iovec));
+ n += c->extra_fields_n_iovec;
+ }
+ }
+
+ assert(n <= m);
+
+ if (pid_is_valid(object_pid) && client_context_get(s, object_pid, NULL, NULL, 0, NULL, &o) >= 0) {
+
+ IOVEC_ADD_NUMERIC_FIELD(iovec, n, o->pid, pid_t, pid_is_valid, PID_FMT, "OBJECT_PID");
+ IOVEC_ADD_NUMERIC_FIELD(iovec, n, o->uid, uid_t, uid_is_valid, UID_FMT, "OBJECT_UID");
+ IOVEC_ADD_NUMERIC_FIELD(iovec, n, o->gid, gid_t, gid_is_valid, GID_FMT, "OBJECT_GID");
+
+ /* See above for size limits, only ->cmdline may be large, so use a heap allocation for it. */
+ IOVEC_ADD_STRING_FIELD(iovec, n, o->comm, "OBJECT_COMM");
+ IOVEC_ADD_STRING_FIELD(iovec, n, o->exe, "OBJECT_EXE");
+ if (o->cmdline)
+ cmdline2 = set_iovec_string_field(iovec, &n, "OBJECT_CMDLINE=", o->cmdline);
+
+ IOVEC_ADD_STRING_FIELD(iovec, n, o->capeff, "OBJECT_CAP_EFFECTIVE");
+ IOVEC_ADD_SIZED_FIELD(iovec, n, o->label, o->label_size, "OBJECT_SELINUX_CONTEXT");
+ IOVEC_ADD_NUMERIC_FIELD(iovec, n, o->auditid, uint32_t, audit_session_is_valid, "%" PRIu32, "OBJECT_AUDIT_SESSION");
+ IOVEC_ADD_NUMERIC_FIELD(iovec, n, o->loginuid, uid_t, uid_is_valid, UID_FMT, "OBJECT_AUDIT_LOGINUID");
+
+ IOVEC_ADD_STRING_FIELD(iovec, n, o->cgroup, "OBJECT_SYSTEMD_CGROUP");
+ IOVEC_ADD_STRING_FIELD(iovec, n, o->session, "OBJECT_SYSTEMD_SESSION");
+ IOVEC_ADD_NUMERIC_FIELD(iovec, n, o->owner_uid, uid_t, uid_is_valid, UID_FMT, "OBJECT_SYSTEMD_OWNER_UID");
+ IOVEC_ADD_STRING_FIELD(iovec, n, o->unit, "OBJECT_SYSTEMD_UNIT");
+ IOVEC_ADD_STRING_FIELD(iovec, n, o->user_unit, "OBJECT_SYSTEMD_USER_UNIT");
+ IOVEC_ADD_STRING_FIELD(iovec, n, o->slice, "OBJECT_SYSTEMD_SLICE");
+ IOVEC_ADD_STRING_FIELD(iovec, n, o->user_slice, "OBJECT_SYSTEMD_USER_SLICE");
+
+ IOVEC_ADD_ID128_FIELD(iovec, n, o->invocation_id, "OBJECT_SYSTEMD_INVOCATION_ID=");
+ }
+
+ assert(n <= m);
+
+ if (tv) {
+ sprintf(source_time, "_SOURCE_REALTIME_TIMESTAMP=" USEC_FMT, timeval_load(tv));
+ iovec[n++] = IOVEC_MAKE_STRING(source_time);
+ }
+
+ /* Note that strictly speaking storing the boot id here is
+ * redundant since the entry includes this in-line
+ * anyway. However, we need this indexed, too. */
+ if (!isempty(s->boot_id_field))
+ iovec[n++] = IOVEC_MAKE_STRING(s->boot_id_field);
+
+ if (!isempty(s->machine_id_field))
+ iovec[n++] = IOVEC_MAKE_STRING(s->machine_id_field);
+
+ if (!isempty(s->hostname_field))
+ iovec[n++] = IOVEC_MAKE_STRING(s->hostname_field);
+
+ assert(n <= m);
+
+ if (s->split_mode == SPLIT_UID && c && uid_is_valid(c->uid))
+ /* Split up strictly by (non-root) UID */
+ journal_uid = c->uid;
+ else if (s->split_mode == SPLIT_LOGIN && c && c->uid > 0 && uid_is_valid(c->owner_uid))
+ /* Split up by login UIDs. We do this only if the
+ * realuid is not root, in order not to accidentally
+ * leak privileged information to the user that is
+ * logged by a privileged process that is part of an
+ * unprivileged session. */
+ journal_uid = c->owner_uid;
+ else
+ journal_uid = 0;
+
+ write_to_journal(s, journal_uid, iovec, n, priority);
+}
+
+void server_driver_message(Server *s, pid_t object_pid, const char *message_id, const char *format, ...) {
+
+ struct iovec *iovec;
+ size_t n = 0, k, m;
+ va_list ap;
+ int r;
+
+ assert(s);
+ assert(format);
+
+ m = N_IOVEC_META_FIELDS + 5 + N_IOVEC_PAYLOAD_FIELDS + client_context_extra_fields_n_iovec(s->my_context) + N_IOVEC_OBJECT_FIELDS;
+ iovec = newa(struct iovec, m);
+
+ assert_cc(3 == LOG_FAC(LOG_DAEMON));
+ iovec[n++] = IOVEC_MAKE_STRING("SYSLOG_FACILITY=3");
+ iovec[n++] = IOVEC_MAKE_STRING("SYSLOG_IDENTIFIER=systemd-journald");
+
+ iovec[n++] = IOVEC_MAKE_STRING("_TRANSPORT=driver");
+ assert_cc(6 == LOG_INFO);
+ iovec[n++] = IOVEC_MAKE_STRING("PRIORITY=6");
+
+ if (message_id)
+ iovec[n++] = IOVEC_MAKE_STRING(message_id);
+ k = n;
+
+ va_start(ap, format);
+ r = log_format_iovec(iovec, m, &n, false, 0, format, ap);
+ /* Error handling below */
+ va_end(ap);
+
+ if (r >= 0)
+ dispatch_message_real(s, iovec, n, m, s->my_context, NULL, LOG_INFO, object_pid);
+
+ while (k < n)
+ free(iovec[k++].iov_base);
+
+ if (r < 0) {
+ /* We failed to format the message. Emit a warning instead. */
+ char buf[LINE_MAX];
+
+ xsprintf(buf, "MESSAGE=Entry printing failed: %s", strerror(-r));
+
+ n = 3;
+ iovec[n++] = IOVEC_MAKE_STRING("PRIORITY=4");
+ iovec[n++] = IOVEC_MAKE_STRING(buf);
+ dispatch_message_real(s, iovec, n, m, s->my_context, NULL, LOG_INFO, object_pid);
+ }
+}
+
+void server_dispatch_message(
+ Server *s,
+ struct iovec *iovec, size_t n, size_t m,
+ ClientContext *c,
+ const struct timeval *tv,
+ int priority,
+ pid_t object_pid) {
+
+ uint64_t available = 0;
+ int rl;
+
+ assert(s);
+ assert(iovec || n == 0);
+
+ if (n == 0)
+ return;
+
+ if (LOG_PRI(priority) > s->max_level_store)
+ return;
+
+ /* Stop early in case the information will not be stored
+ * in a journal. */
+ if (s->storage == STORAGE_NONE)
+ return;
+
+ if (c && c->unit) {
+ (void) determine_space(s, &available, NULL);
+
+ rl = journal_rate_limit_test(s->rate_limit, c->unit, c->log_rate_limit_interval, c->log_rate_limit_burst, priority & LOG_PRIMASK, available);
+ if (rl == 0)
+ return;
+
+ /* Write a suppression message if we suppressed something */
+ if (rl > 1)
+ server_driver_message(s, c->pid,
+ "MESSAGE_ID=" SD_MESSAGE_JOURNAL_DROPPED_STR,
+ LOG_MESSAGE("Suppressed %i messages from %s", rl - 1, c->unit),
+ "N_DROPPED=%i", rl - 1,
+ NULL);
+ }
+
+ dispatch_message_real(s, iovec, n, m, c, tv, priority, object_pid);
+}
+
+int server_flush_to_var(Server *s, bool require_flag_file) {
+ sd_id128_t machine;
+ sd_journal *j = NULL;
+ char ts[FORMAT_TIMESPAN_MAX];
+ usec_t start;
+ unsigned n = 0;
+ int r;
+
+ assert(s);
+
+ if (!IN_SET(s->storage, STORAGE_AUTO, STORAGE_PERSISTENT))
+ return 0;
+
+ if (!s->runtime_journal)
+ return 0;
+
+ if (require_flag_file && !flushed_flag_is_set())
+ return 0;
+
+ (void) system_journal_open(s, true);
+
+ if (!s->system_journal)
+ return 0;
+
+ log_debug("Flushing to /var...");
+
+ start = now(CLOCK_MONOTONIC);
+
+ r = sd_id128_get_machine(&machine);
+ if (r < 0)
+ return r;
+
+ r = sd_journal_open(&j, SD_JOURNAL_RUNTIME_ONLY);
+ if (r < 0)
+ return log_error_errno(r, "Failed to read runtime journal: %m");
+
+ sd_journal_set_data_threshold(j, 0);
+
+ SD_JOURNAL_FOREACH(j) {
+ Object *o = NULL;
+ JournalFile *f;
+
+ f = j->current_file;
+ assert(f && f->current_offset > 0);
+
+ n++;
+
+ r = journal_file_move_to_object(f, OBJECT_ENTRY, f->current_offset, &o);
+ if (r < 0) {
+ log_error_errno(r, "Can't read entry: %m");
+ goto finish;
+ }
+
+ r = journal_file_copy_entry(f, s->system_journal, o, f->current_offset);
+ if (r >= 0)
+ continue;
+
+ if (!shall_try_append_again(s->system_journal, r)) {
+ log_error_errno(r, "Can't write entry: %m");
+ goto finish;
+ }
+
+ server_rotate(s);
+ server_vacuum(s, false);
+
+ if (!s->system_journal) {
+ log_notice("Didn't flush runtime journal since rotation of system journal wasn't successful.");
+ r = -EIO;
+ goto finish;
+ }
+
+ log_debug("Retrying write.");
+ r = journal_file_copy_entry(f, s->system_journal, o, f->current_offset);
+ if (r < 0) {
+ log_error_errno(r, "Can't write entry: %m");
+ goto finish;
+ }
+ }
+
+ r = 0;
+
+finish:
+ if (s->system_journal)
+ journal_file_post_change(s->system_journal);
+
+ s->runtime_journal = journal_file_close(s->runtime_journal);
+
+ if (r >= 0)
+ (void) rm_rf("/run/log/journal", REMOVE_ROOT);
+
+ sd_journal_close(j);
+
+ server_driver_message(s, 0, NULL,
+ LOG_MESSAGE("Time spent on flushing to /var is %s for %u entries.",
+ format_timespan(ts, sizeof(ts), now(CLOCK_MONOTONIC) - start, 0),
+ n),
+ NULL);
+
+ return r;
+}
+
+int server_process_datagram(sd_event_source *es, int fd, uint32_t revents, void *userdata) {
+ Server *s = userdata;
+ struct ucred *ucred = NULL;
+ struct timeval *tv = NULL;
+ struct cmsghdr *cmsg;
+ char *label = NULL;
+ size_t label_len = 0, m;
+ struct iovec iovec;
+ ssize_t n;
+ int *fds = NULL, v = 0;
+ size_t n_fds = 0;
+
+ union {
+ struct cmsghdr cmsghdr;
+
+ /* We use NAME_MAX space for the SELinux label
+ * here. The kernel currently enforces no
+ * limit, but according to suggestions from
+ * the SELinux people this will change and it
+ * will probably be identical to NAME_MAX. For
+ * now we use that, but this should be updated
+ * one day when the final limit is known. */
+ uint8_t buf[CMSG_SPACE(sizeof(struct ucred)) +
+ CMSG_SPACE(sizeof(struct timeval)) +
+ CMSG_SPACE(sizeof(int)) + /* fd */
+ CMSG_SPACE(NAME_MAX)]; /* selinux label */
+ } control = {};
+
+ union sockaddr_union sa = {};
+
+ struct msghdr msghdr = {
+ .msg_iov = &iovec,
+ .msg_iovlen = 1,
+ .msg_control = &control,
+ .msg_controllen = sizeof(control),
+ .msg_name = &sa,
+ .msg_namelen = sizeof(sa),
+ };
+
+ assert(s);
+ assert(fd == s->native_fd || fd == s->syslog_fd || fd == s->audit_fd);
+
+ if (revents != EPOLLIN)
+ return log_error_errno(SYNTHETIC_ERRNO(EIO),
+ "Got invalid event from epoll for datagram fd: %" PRIx32,
+ revents);
+
+ /* Try to get the right size, if we can. (Not all sockets support SIOCINQ, hence we just try, but don't rely on
+ * it.) */
+ (void) ioctl(fd, SIOCINQ, &v);
+
+ /* Fix it up, if it is too small. We use the same fixed value as auditd here. Awful! */
+ m = PAGE_ALIGN(MAX3((size_t) v + 1,
+ (size_t) LINE_MAX,
+ ALIGN(sizeof(struct nlmsghdr)) + ALIGN((size_t) MAX_AUDIT_MESSAGE_LENGTH)) + 1);
+
+ if (!GREEDY_REALLOC(s->buffer, s->buffer_size, m))
+ return log_oom();
+
+ iovec = IOVEC_MAKE(s->buffer, s->buffer_size - 1); /* Leave room for trailing NUL we add later */
+
+ n = recvmsg(fd, &msghdr, MSG_DONTWAIT|MSG_CMSG_CLOEXEC);
+ if (n < 0) {
+ if (IN_SET(errno, EINTR, EAGAIN))
+ return 0;
+
+ return log_error_errno(errno, "recvmsg() failed: %m");
+ }
+
+ CMSG_FOREACH(cmsg, &msghdr)
+ if (cmsg->cmsg_level == SOL_SOCKET &&
+ cmsg->cmsg_type == SCM_CREDENTIALS &&
+ cmsg->cmsg_len == CMSG_LEN(sizeof(struct ucred)))
+ ucred = (struct ucred*) CMSG_DATA(cmsg);
+ else if (cmsg->cmsg_level == SOL_SOCKET &&
+ cmsg->cmsg_type == SCM_SECURITY) {
+ label = (char*) CMSG_DATA(cmsg);
+ label_len = cmsg->cmsg_len - CMSG_LEN(0);
+ } else if (cmsg->cmsg_level == SOL_SOCKET &&
+ cmsg->cmsg_type == SO_TIMESTAMP &&
+ cmsg->cmsg_len == CMSG_LEN(sizeof(struct timeval)))
+ tv = (struct timeval*) CMSG_DATA(cmsg);
+ else if (cmsg->cmsg_level == SOL_SOCKET &&
+ cmsg->cmsg_type == SCM_RIGHTS) {
+ fds = (int*) CMSG_DATA(cmsg);
+ n_fds = (cmsg->cmsg_len - CMSG_LEN(0)) / sizeof(int);
+ }
+
+ /* And a trailing NUL, just in case */
+ s->buffer[n] = 0;
+
+ if (fd == s->syslog_fd) {
+ if (n > 0 && n_fds == 0)
+ server_process_syslog_message(s, s->buffer, n, ucred, tv, label, label_len);
+ else if (n_fds > 0)
+ log_warning("Got file descriptors via syslog socket. Ignoring.");
+
+ } else if (fd == s->native_fd) {
+ if (n > 0 && n_fds == 0)
+ server_process_native_message(s, s->buffer, n, ucred, tv, label, label_len);
+ else if (n == 0 && n_fds == 1)
+ server_process_native_file(s, fds[0], ucred, tv, label, label_len);
+ else if (n_fds > 0)
+ log_warning("Got too many file descriptors via native socket. Ignoring.");
+
+ } else {
+ assert(fd == s->audit_fd);
+
+ if (n > 0 && n_fds == 0)
+ server_process_audit_message(s, s->buffer, n, ucred, &sa, msghdr.msg_namelen);
+ else if (n_fds > 0)
+ log_warning("Got file descriptors via audit socket. Ignoring.");
+ }
+
+ close_many(fds, n_fds);
+ return 0;
+}
+
+static int dispatch_sigusr1(sd_event_source *es, const struct signalfd_siginfo *si, void *userdata) {
+ Server *s = userdata;
+ int r;
+
+ assert(s);
+
+ log_info("Received request to flush runtime journal from PID " PID_FMT, si->ssi_pid);
+
+ (void) server_flush_to_var(s, false);
+ server_sync(s);
+ server_vacuum(s, false);
+
+ r = touch("/run/systemd/journal/flushed");
+ if (r < 0)
+ log_warning_errno(r, "Failed to touch /run/systemd/journal/flushed, ignoring: %m");
+
+ server_space_usage_message(s, NULL);
+ return 0;
+}
+
+static int dispatch_sigusr2(sd_event_source *es, const struct signalfd_siginfo *si, void *userdata) {
+ Server *s = userdata;
+ int r;
+
+ assert(s);
+
+ log_info("Received request to rotate journal from PID " PID_FMT, si->ssi_pid);
+ server_rotate(s);
+ server_vacuum(s, true);
+
+ if (s->system_journal)
+ patch_min_use(&s->system_storage);
+ if (s->runtime_journal)
+ patch_min_use(&s->runtime_storage);
+
+ /* Let clients know when the most recent rotation happened. */
+ r = write_timestamp_file_atomic("/run/systemd/journal/rotated", now(CLOCK_MONOTONIC));
+ if (r < 0)
+ log_warning_errno(r, "Failed to write /run/systemd/journal/rotated, ignoring: %m");
+
+ return 0;
+}
+
+static int dispatch_sigterm(sd_event_source *es, const struct signalfd_siginfo *si, void *userdata) {
+ Server *s = userdata;
+
+ assert(s);
+
+ log_received_signal(LOG_INFO, si);
+
+ sd_event_exit(s->event, 0);
+ return 0;
+}
+
+static int dispatch_sigrtmin1(sd_event_source *es, const struct signalfd_siginfo *si, void *userdata) {
+ Server *s = userdata;
+ int r;
+
+ assert(s);
+
+ log_debug("Received request to sync from PID " PID_FMT, si->ssi_pid);
+
+ server_sync(s);
+
+ /* Let clients know when the most recent sync happened. */
+ r = write_timestamp_file_atomic("/run/systemd/journal/synced", now(CLOCK_MONOTONIC));
+ if (r < 0)
+ log_warning_errno(r, "Failed to write /run/systemd/journal/synced, ignoring: %m");
+
+ return 0;
+}
+
+static int setup_signals(Server *s) {
+ int r;
+
+ assert(s);
+
+ assert_se(sigprocmask_many(SIG_SETMASK, NULL, SIGINT, SIGTERM, SIGUSR1, SIGUSR2, SIGRTMIN+1, -1) >= 0);
+
+ r = sd_event_add_signal(s->event, &s->sigusr1_event_source, SIGUSR1, dispatch_sigusr1, s);
+ if (r < 0)
+ return r;
+
+ r = sd_event_add_signal(s->event, &s->sigusr2_event_source, SIGUSR2, dispatch_sigusr2, s);
+ if (r < 0)
+ return r;
+
+ r = sd_event_add_signal(s->event, &s->sigterm_event_source, SIGTERM, dispatch_sigterm, s);
+ if (r < 0)
+ return r;
+
+ /* Let's process SIGTERM late, so that we flush all queued
+ * messages to disk before we exit */
+ r = sd_event_source_set_priority(s->sigterm_event_source, SD_EVENT_PRIORITY_NORMAL+20);
+ if (r < 0)
+ return r;
+
+ /* When journald is invoked on the terminal (when debugging),
+ * it's useful if C-c is handled equivalent to SIGTERM. */
+ r = sd_event_add_signal(s->event, &s->sigint_event_source, SIGINT, dispatch_sigterm, s);
+ if (r < 0)
+ return r;
+
+ r = sd_event_source_set_priority(s->sigint_event_source, SD_EVENT_PRIORITY_NORMAL+20);
+ if (r < 0)
+ return r;
+
+ /* SIGRTMIN+1 causes an immediate sync. We process this very
+ * late, so that everything else queued at this point is
+ * really written to disk. Clients can watch
+ * /run/systemd/journal/synced with inotify until its mtime
+ * changes to see when a sync happened. */
+ r = sd_event_add_signal(s->event, &s->sigrtmin1_event_source, SIGRTMIN+1, dispatch_sigrtmin1, s);
+ if (r < 0)
+ return r;
+
+ r = sd_event_source_set_priority(s->sigrtmin1_event_source, SD_EVENT_PRIORITY_NORMAL+15);
+ if (r < 0)
+ return r;
+
+ return 0;
+}
+
+static int parse_proc_cmdline_item(const char *key, const char *value, void *data) {
+ Server *s = data;
+ int r;
+
+ assert(s);
+
+ if (proc_cmdline_key_streq(key, "systemd.journald.forward_to_syslog")) {
+
+ r = value ? parse_boolean(value) : true;
+ if (r < 0)
+ log_warning("Failed to parse forward to syslog switch \"%s\". Ignoring.", value);
+ else
+ s->forward_to_syslog = r;
+
+ } else if (proc_cmdline_key_streq(key, "systemd.journald.forward_to_kmsg")) {
+
+ r = value ? parse_boolean(value) : true;
+ if (r < 0)
+ log_warning("Failed to parse forward to kmsg switch \"%s\". Ignoring.", value);
+ else
+ s->forward_to_kmsg = r;
+
+ } else if (proc_cmdline_key_streq(key, "systemd.journald.forward_to_console")) {
+
+ r = value ? parse_boolean(value) : true;
+ if (r < 0)
+ log_warning("Failed to parse forward to console switch \"%s\". Ignoring.", value);
+ else
+ s->forward_to_console = r;
+
+ } else if (proc_cmdline_key_streq(key, "systemd.journald.forward_to_wall")) {
+
+ r = value ? parse_boolean(value) : true;
+ if (r < 0)
+ log_warning("Failed to parse forward to wall switch \"%s\". Ignoring.", value);
+ else
+ s->forward_to_wall = r;
+
+ } else if (proc_cmdline_key_streq(key, "systemd.journald.max_level_console")) {
+
+ if (proc_cmdline_value_missing(key, value))
+ return 0;
+
+ r = log_level_from_string(value);
+ if (r < 0)
+ log_warning("Failed to parse max level console value \"%s\". Ignoring.", value);
+ else
+ s->max_level_console = r;
+
+ } else if (proc_cmdline_key_streq(key, "systemd.journald.max_level_store")) {
+
+ if (proc_cmdline_value_missing(key, value))
+ return 0;
+
+ r = log_level_from_string(value);
+ if (r < 0)
+ log_warning("Failed to parse max level store value \"%s\". Ignoring.", value);
+ else
+ s->max_level_store = r;
+
+ } else if (proc_cmdline_key_streq(key, "systemd.journald.max_level_syslog")) {
+
+ if (proc_cmdline_value_missing(key, value))
+ return 0;
+
+ r = log_level_from_string(value);
+ if (r < 0)
+ log_warning("Failed to parse max level syslog value \"%s\". Ignoring.", value);
+ else
+ s->max_level_syslog = r;
+
+ } else if (proc_cmdline_key_streq(key, "systemd.journald.max_level_kmsg")) {
+
+ if (proc_cmdline_value_missing(key, value))
+ return 0;
+
+ r = log_level_from_string(value);
+ if (r < 0)
+ log_warning("Failed to parse max level kmsg value \"%s\". Ignoring.", value);
+ else
+ s->max_level_kmsg = r;
+
+ } else if (proc_cmdline_key_streq(key, "systemd.journald.max_level_wall")) {
+
+ if (proc_cmdline_value_missing(key, value))
+ return 0;
+
+ r = log_level_from_string(value);
+ if (r < 0)
+ log_warning("Failed to parse max level wall value \"%s\". Ignoring.", value);
+ else
+ s->max_level_wall = r;
+
+ } else if (startswith(key, "systemd.journald"))
+ log_warning("Unknown journald kernel command line option \"%s\". Ignoring.", key);
+
+ /* do not warn about state here, since probably systemd already did */
+ return 0;
+}
+
+static int server_parse_config_file(Server *s) {
+ assert(s);
+
+ return config_parse_many_nulstr(PKGSYSCONFDIR "/journald.conf",
+ CONF_PATHS_NULSTR("systemd/journald.conf.d"),
+ "Journal\0",
+ config_item_perf_lookup, journald_gperf_lookup,
+ CONFIG_PARSE_WARN, s);
+}
+
+static int server_dispatch_sync(sd_event_source *es, usec_t t, void *userdata) {
+ Server *s = userdata;
+
+ assert(s);
+
+ server_sync(s);
+ return 0;
+}
+
+int server_schedule_sync(Server *s, int priority) {
+ int r;
+
+ assert(s);
+
+ if (priority <= LOG_CRIT) {
+ /* Immediately sync to disk when this is of priority CRIT, ALERT, EMERG */
+ server_sync(s);
+ return 0;
+ }
+
+ if (s->sync_scheduled)
+ return 0;
+
+ if (s->sync_interval_usec > 0) {
+ usec_t when;
+
+ r = sd_event_now(s->event, CLOCK_MONOTONIC, &when);
+ if (r < 0)
+ return r;
+
+ when += s->sync_interval_usec;
+
+ if (!s->sync_event_source) {
+ r = sd_event_add_time(
+ s->event,
+ &s->sync_event_source,
+ CLOCK_MONOTONIC,
+ when, 0,
+ server_dispatch_sync, s);
+ if (r < 0)
+ return r;
+
+ r = sd_event_source_set_priority(s->sync_event_source, SD_EVENT_PRIORITY_IMPORTANT);
+ } else {
+ r = sd_event_source_set_time(s->sync_event_source, when);
+ if (r < 0)
+ return r;
+
+ r = sd_event_source_set_enabled(s->sync_event_source, SD_EVENT_ONESHOT);
+ }
+ if (r < 0)
+ return r;
+
+ s->sync_scheduled = true;
+ }
+
+ return 0;
+}
+
+static int dispatch_hostname_change(sd_event_source *es, int fd, uint32_t revents, void *userdata) {
+ Server *s = userdata;
+
+ assert(s);
+
+ server_cache_hostname(s);
+ return 0;
+}
+
+static int server_open_hostname(Server *s) {
+ int r;
+
+ assert(s);
+
+ s->hostname_fd = open("/proc/sys/kernel/hostname",
+ O_RDONLY|O_CLOEXEC|O_NONBLOCK|O_NOCTTY);
+ if (s->hostname_fd < 0)
+ return log_error_errno(errno, "Failed to open /proc/sys/kernel/hostname: %m");
+
+ r = sd_event_add_io(s->event, &s->hostname_event_source, s->hostname_fd, 0, dispatch_hostname_change, s);
+ if (r < 0) {
+ /* kernels prior to 3.2 don't support polling this file. Ignore
+ * the failure. */
+ if (r == -EPERM) {
+ log_warning_errno(r, "Failed to register hostname fd in event loop, ignoring: %m");
+ s->hostname_fd = safe_close(s->hostname_fd);
+ return 0;
+ }
+
+ return log_error_errno(r, "Failed to register hostname fd in event loop: %m");
+ }
+
+ r = sd_event_source_set_priority(s->hostname_event_source, SD_EVENT_PRIORITY_IMPORTANT-10);
+ if (r < 0)
+ return log_error_errno(r, "Failed to adjust priority of host name event source: %m");
+
+ return 0;
+}
+
+static int dispatch_notify_event(sd_event_source *es, int fd, uint32_t revents, void *userdata) {
+ Server *s = userdata;
+ int r;
+
+ assert(s);
+ assert(s->notify_event_source == es);
+ assert(s->notify_fd == fd);
+
+ /* The $NOTIFY_SOCKET is writable again, now send exactly one
+ * message on it. Either it's the watchdog event, the initial
+ * READY=1 event or an stdout stream event. If there's nothing
+ * to write anymore, turn our event source off. The next time
+ * there's something to send it will be turned on again. */
+
+ if (!s->sent_notify_ready) {
+ static const char p[] =
+ "READY=1\n"
+ "STATUS=Processing requests...";
+ ssize_t l;
+
+ l = send(s->notify_fd, p, strlen(p), MSG_DONTWAIT);
+ if (l < 0) {
+ if (errno == EAGAIN)
+ return 0;
+
+ return log_error_errno(errno, "Failed to send READY=1 notification message: %m");
+ }
+
+ s->sent_notify_ready = true;
+ log_debug("Sent READY=1 notification.");
+
+ } else if (s->send_watchdog) {
+
+ static const char p[] =
+ "WATCHDOG=1";
+
+ ssize_t l;
+
+ l = send(s->notify_fd, p, strlen(p), MSG_DONTWAIT);
+ if (l < 0) {
+ if (errno == EAGAIN)
+ return 0;
+
+ return log_error_errno(errno, "Failed to send WATCHDOG=1 notification message: %m");
+ }
+
+ s->send_watchdog = false;
+ log_debug("Sent WATCHDOG=1 notification.");
+
+ } else if (s->stdout_streams_notify_queue)
+ /* Dispatch one stream notification event */
+ stdout_stream_send_notify(s->stdout_streams_notify_queue);
+
+ /* Leave us enabled if there's still more to do. */
+ if (s->send_watchdog || s->stdout_streams_notify_queue)
+ return 0;
+
+ /* There was nothing to do anymore, let's turn ourselves off. */
+ r = sd_event_source_set_enabled(es, SD_EVENT_OFF);
+ if (r < 0)
+ return log_error_errno(r, "Failed to turn off notify event source: %m");
+
+ return 0;
+}
+
+static int dispatch_watchdog(sd_event_source *es, uint64_t usec, void *userdata) {
+ Server *s = userdata;
+ int r;
+
+ assert(s);
+
+ s->send_watchdog = true;
+
+ r = sd_event_source_set_enabled(s->notify_event_source, SD_EVENT_ON);
+ if (r < 0)
+ log_warning_errno(r, "Failed to turn on notify event source: %m");
+
+ r = sd_event_source_set_time(s->watchdog_event_source, usec + s->watchdog_usec / 2);
+ if (r < 0)
+ return log_error_errno(r, "Failed to restart watchdog event source: %m");
+
+ r = sd_event_source_set_enabled(s->watchdog_event_source, SD_EVENT_ON);
+ if (r < 0)
+ return log_error_errno(r, "Failed to enable watchdog event source: %m");
+
+ return 0;
+}
+
+static int server_connect_notify(Server *s) {
+ union sockaddr_union sa = {};
+ const char *e;
+ int r, salen;
+
+ assert(s);
+ assert(s->notify_fd < 0);
+ assert(!s->notify_event_source);
+
+ /*
+ So here's the problem: we'd like to send notification
+ messages to PID 1, but we cannot do that via sd_notify(),
+ since that's synchronous, and we might end up blocking on
+ it. Specifically: given that PID 1 might block on
+ dbus-daemon during IPC, and dbus-daemon is logging to us,
+ and might hence block on us, we might end up in a deadlock
+ if we block on sending PID 1 notification messages — by
+ generating a full blocking circle. To avoid this, let's
+ create a non-blocking socket, and connect it to the
+ notification socket, and then wait for POLLOUT before we
+ send anything. This should efficiently avoid any deadlocks,
+ as we'll never block on PID 1, hence PID 1 can safely block
+ on dbus-daemon which can safely block on us again.
+
+ Don't think that this issue is real? It is, see:
+ https://github.com/systemd/systemd/issues/1505
+ */
+
+ e = getenv("NOTIFY_SOCKET");
+ if (!e)
+ return 0;
+
+ salen = sockaddr_un_set_path(&sa.un, e);
+ if (salen < 0)
+ return log_error_errno(salen, "NOTIFY_SOCKET set to invalid value '%s': %m", e);
+
+ s->notify_fd = socket(AF_UNIX, SOCK_DGRAM|SOCK_CLOEXEC|SOCK_NONBLOCK, 0);
+ if (s->notify_fd < 0)
+ return log_error_errno(errno, "Failed to create notify socket: %m");
+
+ (void) fd_inc_sndbuf(s->notify_fd, NOTIFY_SNDBUF_SIZE);
+
+ r = connect(s->notify_fd, &sa.sa, salen);
+ if (r < 0)
+ return log_error_errno(errno, "Failed to connect to notify socket: %m");
+
+ r = sd_event_add_io(s->event, &s->notify_event_source, s->notify_fd, EPOLLOUT, dispatch_notify_event, s);
+ if (r < 0)
+ return log_error_errno(r, "Failed to watch notification socket: %m");
+
+ if (sd_watchdog_enabled(false, &s->watchdog_usec) > 0) {
+ s->send_watchdog = true;
+
+ r = sd_event_add_time(s->event, &s->watchdog_event_source, CLOCK_MONOTONIC, now(CLOCK_MONOTONIC) + s->watchdog_usec/2, s->watchdog_usec/4, dispatch_watchdog, s);
+ if (r < 0)
+ return log_error_errno(r, "Failed to add watchdog time event: %m");
+ }
+
+ /* This should fire pretty soon, which we'll use to send the
+ * READY=1 event. */
+
+ return 0;
+}
+
+int server_init(Server *s) {
+ _cleanup_fdset_free_ FDSet *fds = NULL;
+ int n, r, fd;
+ bool no_sockets;
+
+ assert(s);
+
+ zero(*s);
+ s->syslog_fd = s->native_fd = s->stdout_fd = s->dev_kmsg_fd = s->audit_fd = s->hostname_fd = s->notify_fd = -1;
+ s->compress.enabled = true;
+ s->compress.threshold_bytes = (uint64_t) -1;
+ s->seal = true;
+ s->read_kmsg = true;
+
+ s->watchdog_usec = USEC_INFINITY;
+
+ s->sync_interval_usec = DEFAULT_SYNC_INTERVAL_USEC;
+ s->sync_scheduled = false;
+
+ s->rate_limit_interval = DEFAULT_RATE_LIMIT_INTERVAL;
+ s->rate_limit_burst = DEFAULT_RATE_LIMIT_BURST;
+
+ s->forward_to_wall = true;
+
+ s->max_file_usec = DEFAULT_MAX_FILE_USEC;
+
+ s->max_level_store = LOG_DEBUG;
+ s->max_level_syslog = LOG_DEBUG;
+ s->max_level_kmsg = LOG_NOTICE;
+ s->max_level_console = LOG_INFO;
+ s->max_level_wall = LOG_EMERG;
+
+ s->line_max = DEFAULT_LINE_MAX;
+
+ journal_reset_metrics(&s->system_storage.metrics);
+ journal_reset_metrics(&s->runtime_storage.metrics);
+
+ server_parse_config_file(s);
+
+ r = proc_cmdline_parse(parse_proc_cmdline_item, s, PROC_CMDLINE_STRIP_RD_PREFIX);
+ if (r < 0)
+ log_warning_errno(r, "Failed to parse kernel command line, ignoring: %m");
+
+ if (!!s->rate_limit_interval ^ !!s->rate_limit_burst) {
+ log_debug("Setting both rate limit interval and burst from "USEC_FMT",%u to 0,0",
+ s->rate_limit_interval, s->rate_limit_burst);
+ s->rate_limit_interval = s->rate_limit_burst = 0;
+ }
+
+ (void) mkdir_p("/run/systemd/journal", 0755);
+
+ s->user_journals = ordered_hashmap_new(NULL);
+ if (!s->user_journals)
+ return log_oom();
+
+ s->mmap = mmap_cache_new();
+ if (!s->mmap)
+ return log_oom();
+
+ s->deferred_closes = set_new(NULL);
+ if (!s->deferred_closes)
+ return log_oom();
+
+ r = sd_event_default(&s->event);
+ if (r < 0)
+ return log_error_errno(r, "Failed to create event loop: %m");
+
+ n = sd_listen_fds(true);
+ if (n < 0)
+ return log_error_errno(n, "Failed to read listening file descriptors from environment: %m");
+
+ for (fd = SD_LISTEN_FDS_START; fd < SD_LISTEN_FDS_START + n; fd++) {
+
+ if (sd_is_socket_unix(fd, SOCK_DGRAM, -1, "/run/systemd/journal/socket", 0) > 0) {
+
+ if (s->native_fd >= 0)
+ return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
+ "Too many native sockets passed.");
+
+ s->native_fd = fd;
+
+ } else if (sd_is_socket_unix(fd, SOCK_STREAM, 1, "/run/systemd/journal/stdout", 0) > 0) {
+
+ if (s->stdout_fd >= 0)
+ return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
+ "Too many stdout sockets passed.");
+
+ s->stdout_fd = fd;
+
+ } else if (sd_is_socket_unix(fd, SOCK_DGRAM, -1, "/dev/log", 0) > 0 ||
+ sd_is_socket_unix(fd, SOCK_DGRAM, -1, "/run/systemd/journal/dev-log", 0) > 0) {
+
+ if (s->syslog_fd >= 0)
+ return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
+ "Too many /dev/log sockets passed.");
+
+ s->syslog_fd = fd;
+
+ } else if (sd_is_socket(fd, AF_NETLINK, SOCK_RAW, -1) > 0) {
+
+ if (s->audit_fd >= 0)
+ return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
+ "Too many audit sockets passed.");
+
+ s->audit_fd = fd;
+
+ } else {
+
+ if (!fds) {
+ fds = fdset_new();
+ if (!fds)
+ return log_oom();
+ }
+
+ r = fdset_put(fds, fd);
+ if (r < 0)
+ return log_oom();
+ }
+ }
+
+ /* Try to restore streams, but don't bother if this fails */
+ (void) server_restore_streams(s, fds);
+
+ if (fdset_size(fds) > 0) {
+ log_warning("%u unknown file descriptors passed, closing.", fdset_size(fds));
+ fds = fdset_free(fds);
+ }
+
+ no_sockets = s->native_fd < 0 && s->stdout_fd < 0 && s->syslog_fd < 0 && s->audit_fd < 0;
+
+ /* always open stdout, syslog, native, and kmsg sockets */
+
+ /* systemd-journald.socket: /run/systemd/journal/stdout */
+ r = server_open_stdout_socket(s);
+ if (r < 0)
+ return r;
+
+ /* systemd-journald-dev-log.socket: /run/systemd/journal/dev-log */
+ r = server_open_syslog_socket(s);
+ if (r < 0)
+ return r;
+
+ /* systemd-journald.socket: /run/systemd/journal/socket */
+ r = server_open_native_socket(s);
+ if (r < 0)
+ return r;
+
+ /* /dev/kmsg */
+ r = server_open_dev_kmsg(s);
+ if (r < 0)
+ return r;
+
+ /* Unless we got *some* sockets and not audit, open audit socket */
+ if (s->audit_fd >= 0 || no_sockets) {
+ r = server_open_audit(s);
+ if (r < 0)
+ return r;
+ }
+
+ r = server_open_kernel_seqnum(s);
+ if (r < 0)
+ return r;
+
+ r = server_open_hostname(s);
+ if (r < 0)
+ return r;
+
+ r = setup_signals(s);
+ if (r < 0)
+ return r;
+
+ s->rate_limit = journal_rate_limit_new();
+ if (!s->rate_limit)
+ return -ENOMEM;
+
+ r = cg_get_root_path(&s->cgroup_root);
+ if (r < 0)
+ return r;
+
+ server_cache_hostname(s);
+ server_cache_boot_id(s);
+ server_cache_machine_id(s);
+
+ s->runtime_storage.name = "Runtime journal";
+ s->system_storage.name = "System journal";
+
+ s->runtime_storage.path = strjoin("/run/log/journal/", SERVER_MACHINE_ID(s));
+ s->system_storage.path = strjoin("/var/log/journal/", SERVER_MACHINE_ID(s));
+ if (!s->runtime_storage.path || !s->system_storage.path)
+ return -ENOMEM;
+
+ (void) server_connect_notify(s);
+
+ (void) client_context_acquire_default(s);
+
+ return system_journal_open(s, false);
+}
+
+void server_maybe_append_tags(Server *s) {
+#if HAVE_GCRYPT
+ JournalFile *f;
+ Iterator i;
+ usec_t n;
+
+ n = now(CLOCK_REALTIME);
+
+ if (s->system_journal)
+ journal_file_maybe_append_tag(s->system_journal, n);
+
+ ORDERED_HASHMAP_FOREACH(f, s->user_journals, i)
+ journal_file_maybe_append_tag(f, n);
+#endif
+}
+
+void server_done(Server *s) {
+ assert(s);
+
+ set_free_with_destructor(s->deferred_closes, journal_file_close);
+
+ while (s->stdout_streams)
+ stdout_stream_free(s->stdout_streams);
+
+ client_context_flush_all(s);
+
+ if (s->system_journal)
+ (void) journal_file_close(s->system_journal);
+
+ if (s->runtime_journal)
+ (void) journal_file_close(s->runtime_journal);
+
+ ordered_hashmap_free_with_destructor(s->user_journals, journal_file_close);
+
+ sd_event_source_unref(s->syslog_event_source);
+ sd_event_source_unref(s->native_event_source);
+ sd_event_source_unref(s->stdout_event_source);
+ sd_event_source_unref(s->dev_kmsg_event_source);
+ sd_event_source_unref(s->audit_event_source);
+ sd_event_source_unref(s->sync_event_source);
+ sd_event_source_unref(s->sigusr1_event_source);
+ sd_event_source_unref(s->sigusr2_event_source);
+ sd_event_source_unref(s->sigterm_event_source);
+ sd_event_source_unref(s->sigint_event_source);
+ sd_event_source_unref(s->sigrtmin1_event_source);
+ sd_event_source_unref(s->hostname_event_source);
+ sd_event_source_unref(s->notify_event_source);
+ sd_event_source_unref(s->watchdog_event_source);
+ sd_event_unref(s->event);
+
+ safe_close(s->syslog_fd);
+ safe_close(s->native_fd);
+ safe_close(s->stdout_fd);
+ safe_close(s->dev_kmsg_fd);
+ safe_close(s->audit_fd);
+ safe_close(s->hostname_fd);
+ safe_close(s->notify_fd);
+
+ if (s->rate_limit)
+ journal_rate_limit_free(s->rate_limit);
+
+ if (s->kernel_seqnum)
+ munmap(s->kernel_seqnum, sizeof(uint64_t));
+
+ free(s->buffer);
+ free(s->tty_path);
+ free(s->cgroup_root);
+ free(s->hostname_field);
+ free(s->runtime_storage.path);
+ free(s->system_storage.path);
+
+ if (s->mmap)
+ mmap_cache_unref(s->mmap);
+}
+
+static const char* const storage_table[_STORAGE_MAX] = {
+ [STORAGE_AUTO] = "auto",
+ [STORAGE_VOLATILE] = "volatile",
+ [STORAGE_PERSISTENT] = "persistent",
+ [STORAGE_NONE] = "none"
+};
+
+DEFINE_STRING_TABLE_LOOKUP(storage, Storage);
+DEFINE_CONFIG_PARSE_ENUM(config_parse_storage, storage, Storage, "Failed to parse storage setting");
+
+static const char* const split_mode_table[_SPLIT_MAX] = {
+ [SPLIT_LOGIN] = "login",
+ [SPLIT_UID] = "uid",
+ [SPLIT_NONE] = "none",
+};
+
+DEFINE_STRING_TABLE_LOOKUP(split_mode, SplitMode);
+DEFINE_CONFIG_PARSE_ENUM(config_parse_split_mode, split_mode, SplitMode, "Failed to parse split mode setting");
+
+int config_parse_line_max(
+ const char* unit,
+ const char *filename,
+ unsigned line,
+ const char *section,
+ unsigned section_line,
+ const char *lvalue,
+ int ltype,
+ const char *rvalue,
+ void *data,
+ void *userdata) {
+
+ size_t *sz = data;
+ int r;
+
+ assert(filename);
+ assert(lvalue);
+ assert(rvalue);
+ assert(data);
+
+ if (isempty(rvalue))
+ /* Empty assignment means default */
+ *sz = DEFAULT_LINE_MAX;
+ else {
+ uint64_t v;
+
+ r = parse_size(rvalue, 1024, &v);
+ if (r < 0) {
+ log_syntax(unit, LOG_ERR, filename, line, r, "Failed to parse LineMax= value, ignoring: %s", rvalue);
+ return 0;
+ }
+
+ if (v < 79) {
+ /* Why specify 79 here as minimum line length? Simply, because the most common traditional
+ * terminal size is 80ch, and it might make sense to break one character before the natural
+ * line break would occur on that. */
+ log_syntax(unit, LOG_WARNING, filename, line, 0, "LineMax= too small, clamping to 79: %s", rvalue);
+ *sz = 79;
+ } else if (v > (uint64_t) (SSIZE_MAX-1)) {
+ /* So, why specify SSIZE_MAX-1 here? Because that's one below the largest size value read()
+ * can return, and we need one extra byte for the trailing NUL byte. Of course IRL such large
+ * memory allocations will fail anyway, hence this limit is mostly theoretical anyway, as we'll
+ * fail much earlier anyway. */
+ log_syntax(unit, LOG_WARNING, filename, line, 0, "LineMax= too large, clamping to %" PRIu64 ": %s", (uint64_t) (SSIZE_MAX-1), rvalue);
+ *sz = SSIZE_MAX-1;
+ } else
+ *sz = (size_t) v;
+ }
+
+ return 0;
+}
+
+int config_parse_compress(const char* unit,
+ const char *filename,
+ unsigned line,
+ const char *section,
+ unsigned section_line,
+ const char *lvalue,
+ int ltype,
+ const char *rvalue,
+ void *data,
+ void *userdata) {
+ JournalCompressOptions* compress = data;
+ int r;
+
+ if (streq(rvalue, "1")) {
+ log_syntax(unit, LOG_WARNING, filename, line, 0,
+ "Compress= ambiguously specified as 1, enabling compression with default threshold");
+ compress->enabled = true;
+ } else if (streq(rvalue, "0")) {
+ log_syntax(unit, LOG_WARNING, filename, line, 0,
+ "Compress= ambiguously specified as 0, disabling compression");
+ compress->enabled = false;
+ } else if ((r = parse_boolean(rvalue)) >= 0)
+ compress->enabled = r;
+ else if (parse_size(rvalue, 1024, &compress->threshold_bytes) == 0)
+ compress->enabled = true;
+ else if (isempty(rvalue)) {
+ compress->enabled = true;
+ compress->threshold_bytes = (uint64_t) -1;
+ } else
+ log_syntax(unit, LOG_ERR, filename, line, r, "Failed to parse Compress= value, ignoring: %s", rvalue);
+
+ return 0;
+}
diff --git a/src/journal/journald-server.h b/src/journal/journald-server.h
new file mode 100644
index 0000000..3f6b42d
--- /dev/null
+++ b/src/journal/journald-server.h
@@ -0,0 +1,214 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+#pragma once
+
+#include <stdbool.h>
+#include <sys/types.h>
+
+#include "sd-event.h"
+
+typedef struct Server Server;
+
+#include "conf-parser.h"
+#include "hashmap.h"
+#include "journal-file.h"
+#include "journald-context.h"
+#include "journald-rate-limit.h"
+#include "journald-stream.h"
+#include "list.h"
+#include "prioq.h"
+
+typedef enum Storage {
+ STORAGE_AUTO,
+ STORAGE_VOLATILE,
+ STORAGE_PERSISTENT,
+ STORAGE_NONE,
+ _STORAGE_MAX,
+ _STORAGE_INVALID = -1
+} Storage;
+
+typedef enum SplitMode {
+ SPLIT_UID,
+ SPLIT_LOGIN, /* deprecated */
+ SPLIT_NONE,
+ _SPLIT_MAX,
+ _SPLIT_INVALID = -1
+} SplitMode;
+
+typedef struct JournalCompressOptions {
+ bool enabled;
+ uint64_t threshold_bytes;
+} JournalCompressOptions;
+
+typedef struct JournalStorageSpace {
+ usec_t timestamp;
+
+ uint64_t available;
+ uint64_t limit;
+
+ uint64_t vfs_used; /* space used by journal files */
+ uint64_t vfs_available;
+} JournalStorageSpace;
+
+typedef struct JournalStorage {
+ const char *name;
+ char *path;
+
+ JournalMetrics metrics;
+ JournalStorageSpace space;
+} JournalStorage;
+
+struct Server {
+ int syslog_fd;
+ int native_fd;
+ int stdout_fd;
+ int dev_kmsg_fd;
+ int audit_fd;
+ int hostname_fd;
+ int notify_fd;
+
+ sd_event *event;
+
+ sd_event_source *syslog_event_source;
+ sd_event_source *native_event_source;
+ sd_event_source *stdout_event_source;
+ sd_event_source *dev_kmsg_event_source;
+ sd_event_source *audit_event_source;
+ sd_event_source *sync_event_source;
+ sd_event_source *sigusr1_event_source;
+ sd_event_source *sigusr2_event_source;
+ sd_event_source *sigterm_event_source;
+ sd_event_source *sigint_event_source;
+ sd_event_source *sigrtmin1_event_source;
+ sd_event_source *hostname_event_source;
+ sd_event_source *notify_event_source;
+ sd_event_source *watchdog_event_source;
+
+ JournalFile *runtime_journal;
+ JournalFile *system_journal;
+ OrderedHashmap *user_journals;
+
+ uint64_t seqnum;
+
+ char *buffer;
+ size_t buffer_size;
+
+ JournalRateLimit *rate_limit;
+ usec_t sync_interval_usec;
+ usec_t rate_limit_interval;
+ unsigned rate_limit_burst;
+
+ JournalStorage runtime_storage;
+ JournalStorage system_storage;
+
+ JournalCompressOptions compress;
+ bool seal;
+ bool read_kmsg;
+
+ bool forward_to_kmsg;
+ bool forward_to_syslog;
+ bool forward_to_console;
+ bool forward_to_wall;
+
+ unsigned n_forward_syslog_missed;
+ usec_t last_warn_forward_syslog_missed;
+
+ uint64_t var_available_timestamp;
+
+ usec_t max_retention_usec;
+ usec_t max_file_usec;
+ usec_t oldest_file_usec;
+
+ LIST_HEAD(StdoutStream, stdout_streams);
+ LIST_HEAD(StdoutStream, stdout_streams_notify_queue);
+ unsigned n_stdout_streams;
+
+ char *tty_path;
+
+ int max_level_store;
+ int max_level_syslog;
+ int max_level_kmsg;
+ int max_level_console;
+ int max_level_wall;
+
+ Storage storage;
+ SplitMode split_mode;
+
+ MMapCache *mmap;
+
+ Set *deferred_closes;
+
+ uint64_t *kernel_seqnum;
+ bool dev_kmsg_readable:1;
+
+ bool send_watchdog:1;
+ bool sent_notify_ready:1;
+ bool sync_scheduled:1;
+
+ char machine_id_field[sizeof("_MACHINE_ID=") + 32];
+ char boot_id_field[sizeof("_BOOT_ID=") + 32];
+ char *hostname_field;
+
+ /* Cached cgroup root, so that we don't have to query that all the time */
+ char *cgroup_root;
+
+ usec_t watchdog_usec;
+
+ usec_t last_realtime_clock;
+
+ size_t line_max;
+
+ /* Caching of client metadata */
+ Hashmap *client_contexts;
+ Prioq *client_contexts_lru;
+
+ usec_t last_cache_pid_flush;
+
+ ClientContext *my_context; /* the context of journald itself */
+ ClientContext *pid1_context; /* the context of PID 1 */
+};
+
+#define SERVER_MACHINE_ID(s) ((s)->machine_id_field + STRLEN("_MACHINE_ID="))
+
+/* Extra fields for any log messages */
+#define N_IOVEC_META_FIELDS 22
+
+/* Extra fields for log messages that contain OBJECT_PID= (i.e. log about another process) */
+#define N_IOVEC_OBJECT_FIELDS 18
+
+/* Maximum number of fields we'll add in for driver (i.e. internal) messages */
+#define N_IOVEC_PAYLOAD_FIELDS 16
+
+/* kmsg: Maximum number of extra fields we'll import from the kernel's /dev/kmsg */
+#define N_IOVEC_KERNEL_FIELDS 64
+
+/* kmsg: Maximum number of extra fields we'll import from udev's devices */
+#define N_IOVEC_UDEV_FIELDS 32
+
+void server_dispatch_message(Server *s, struct iovec *iovec, size_t n, size_t m, ClientContext *c, const struct timeval *tv, int priority, pid_t object_pid);
+void server_driver_message(Server *s, pid_t object_pid, const char *message_id, const char *format, ...) _sentinel_ _printf_(4,0);
+
+/* gperf lookup function */
+const struct ConfigPerfItem* journald_gperf_lookup(const char *key, GPERF_LEN_TYPE length);
+
+CONFIG_PARSER_PROTOTYPE(config_parse_storage);
+CONFIG_PARSER_PROTOTYPE(config_parse_line_max);
+CONFIG_PARSER_PROTOTYPE(config_parse_compress);
+
+const char *storage_to_string(Storage s) _const_;
+Storage storage_from_string(const char *s) _pure_;
+
+CONFIG_PARSER_PROTOTYPE(config_parse_split_mode);
+
+const char *split_mode_to_string(SplitMode s) _const_;
+SplitMode split_mode_from_string(const char *s) _pure_;
+
+int server_init(Server *s);
+void server_done(Server *s);
+void server_sync(Server *s);
+int server_vacuum(Server *s, bool verbose);
+void server_rotate(Server *s);
+int server_schedule_sync(Server *s, int priority);
+int server_flush_to_var(Server *s, bool require_flag_file);
+void server_maybe_append_tags(Server *s);
+int server_process_datagram(sd_event_source *es, int fd, uint32_t revents, void *userdata);
+void server_space_usage_message(Server *s, JournalStorage *storage);
diff --git a/src/journal/journald-stream.c b/src/journal/journald-stream.c
new file mode 100644
index 0000000..137c8f0
--- /dev/null
+++ b/src/journal/journald-stream.c
@@ -0,0 +1,876 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+
+#include <stddef.h>
+#include <unistd.h>
+
+#if HAVE_SELINUX
+#include <selinux/selinux.h>
+#endif
+
+#include "sd-daemon.h"
+#include "sd-event.h"
+
+#include "alloc-util.h"
+#include "dirent-util.h"
+#include "env-file.h"
+#include "escape.h"
+#include "fd-util.h"
+#include "fileio.h"
+#include "io-util.h"
+#include "journald-console.h"
+#include "journald-context.h"
+#include "journald-kmsg.h"
+#include "journald-server.h"
+#include "journald-stream.h"
+#include "journald-syslog.h"
+#include "journald-wall.h"
+#include "mkdir.h"
+#include "parse-util.h"
+#include "process-util.h"
+#include "selinux-util.h"
+#include "socket-util.h"
+#include "stdio-util.h"
+#include "string-util.h"
+#include "syslog-util.h"
+#include "tmpfile-util.h"
+#include "unit-name.h"
+
+#define STDOUT_STREAMS_MAX 4096
+
+typedef enum StdoutStreamState {
+ STDOUT_STREAM_IDENTIFIER,
+ STDOUT_STREAM_UNIT_ID,
+ STDOUT_STREAM_PRIORITY,
+ STDOUT_STREAM_LEVEL_PREFIX,
+ STDOUT_STREAM_FORWARD_TO_SYSLOG,
+ STDOUT_STREAM_FORWARD_TO_KMSG,
+ STDOUT_STREAM_FORWARD_TO_CONSOLE,
+ STDOUT_STREAM_RUNNING
+} StdoutStreamState;
+
+/* The different types of log record terminators: a real \n was read, a NUL character was read, the maximum line length
+ * was reached, or the end of the stream was reached */
+
+typedef enum LineBreak {
+ LINE_BREAK_NEWLINE,
+ LINE_BREAK_NUL,
+ LINE_BREAK_LINE_MAX,
+ LINE_BREAK_EOF,
+} LineBreak;
+
+struct StdoutStream {
+ Server *server;
+ StdoutStreamState state;
+
+ int fd;
+
+ struct ucred ucred;
+ char *label;
+ char *identifier;
+ char *unit_id;
+ int priority;
+ bool level_prefix:1;
+ bool forward_to_syslog:1;
+ bool forward_to_kmsg:1;
+ bool forward_to_console:1;
+
+ bool fdstore:1;
+ bool in_notify_queue:1;
+
+ char *buffer;
+ size_t length;
+ size_t allocated;
+
+ sd_event_source *event_source;
+
+ char *state_file;
+
+ ClientContext *context;
+
+ LIST_FIELDS(StdoutStream, stdout_stream);
+ LIST_FIELDS(StdoutStream, stdout_stream_notify_queue);
+
+ char id_field[STRLEN("_STREAM_ID=") + SD_ID128_STRING_MAX];
+};
+
+void stdout_stream_free(StdoutStream *s) {
+ if (!s)
+ return;
+
+ if (s->server) {
+
+ if (s->context)
+ client_context_release(s->server, s->context);
+
+ assert(s->server->n_stdout_streams > 0);
+ s->server->n_stdout_streams--;
+ LIST_REMOVE(stdout_stream, s->server->stdout_streams, s);
+
+ if (s->in_notify_queue)
+ LIST_REMOVE(stdout_stream_notify_queue, s->server->stdout_streams_notify_queue, s);
+ }
+
+ if (s->event_source) {
+ sd_event_source_set_enabled(s->event_source, SD_EVENT_OFF);
+ s->event_source = sd_event_source_unref(s->event_source);
+ }
+
+ safe_close(s->fd);
+ free(s->label);
+ free(s->identifier);
+ free(s->unit_id);
+ free(s->state_file);
+ free(s->buffer);
+
+ free(s);
+}
+
+DEFINE_TRIVIAL_CLEANUP_FUNC(StdoutStream*, stdout_stream_free);
+
+void stdout_stream_destroy(StdoutStream *s) {
+ if (!s)
+ return;
+
+ if (s->state_file)
+ (void) unlink(s->state_file);
+
+ stdout_stream_free(s);
+}
+
+static int stdout_stream_save(StdoutStream *s) {
+ _cleanup_free_ char *temp_path = NULL;
+ _cleanup_fclose_ FILE *f = NULL;
+ int r;
+
+ assert(s);
+
+ if (s->state != STDOUT_STREAM_RUNNING)
+ return 0;
+
+ if (!s->state_file) {
+ struct stat st;
+
+ r = fstat(s->fd, &st);
+ if (r < 0)
+ return log_warning_errno(errno, "Failed to stat connected stream: %m");
+
+ /* We use device and inode numbers as identifier for the stream */
+ if (asprintf(&s->state_file, "/run/systemd/journal/streams/%lu:%lu", (unsigned long) st.st_dev, (unsigned long) st.st_ino) < 0)
+ return log_oom();
+ }
+
+ mkdir_p("/run/systemd/journal/streams", 0755);
+
+ r = fopen_temporary(s->state_file, &f, &temp_path);
+ if (r < 0)
+ goto fail;
+
+ fprintf(f,
+ "# This is private data. Do not parse\n"
+ "PRIORITY=%i\n"
+ "LEVEL_PREFIX=%i\n"
+ "FORWARD_TO_SYSLOG=%i\n"
+ "FORWARD_TO_KMSG=%i\n"
+ "FORWARD_TO_CONSOLE=%i\n"
+ "STREAM_ID=%s\n",
+ s->priority,
+ s->level_prefix,
+ s->forward_to_syslog,
+ s->forward_to_kmsg,
+ s->forward_to_console,
+ s->id_field + STRLEN("_STREAM_ID="));
+
+ if (!isempty(s->identifier)) {
+ _cleanup_free_ char *escaped;
+
+ escaped = cescape(s->identifier);
+ if (!escaped) {
+ r = -ENOMEM;
+ goto fail;
+ }
+
+ fprintf(f, "IDENTIFIER=%s\n", escaped);
+ }
+
+ if (!isempty(s->unit_id)) {
+ _cleanup_free_ char *escaped;
+
+ escaped = cescape(s->unit_id);
+ if (!escaped) {
+ r = -ENOMEM;
+ goto fail;
+ }
+
+ fprintf(f, "UNIT=%s\n", escaped);
+ }
+
+ r = fflush_and_check(f);
+ if (r < 0)
+ goto fail;
+
+ if (rename(temp_path, s->state_file) < 0) {
+ r = -errno;
+ goto fail;
+ }
+
+ if (!s->fdstore && !s->in_notify_queue) {
+ LIST_PREPEND(stdout_stream_notify_queue, s->server->stdout_streams_notify_queue, s);
+ s->in_notify_queue = true;
+
+ if (s->server->notify_event_source) {
+ r = sd_event_source_set_enabled(s->server->notify_event_source, SD_EVENT_ON);
+ if (r < 0)
+ log_warning_errno(r, "Failed to enable notify event source: %m");
+ }
+ }
+
+ return 0;
+
+fail:
+ (void) unlink(s->state_file);
+
+ if (temp_path)
+ (void) unlink(temp_path);
+
+ return log_error_errno(r, "Failed to save stream data %s: %m", s->state_file);
+}
+
+static int stdout_stream_log(StdoutStream *s, const char *p, LineBreak line_break) {
+ struct iovec *iovec;
+ int priority;
+ char syslog_priority[] = "PRIORITY=\0";
+ char syslog_facility[STRLEN("SYSLOG_FACILITY=") + DECIMAL_STR_MAX(int) + 1];
+ _cleanup_free_ char *message = NULL, *syslog_identifier = NULL;
+ size_t n = 0, m;
+ int r;
+
+ assert(s);
+ assert(p);
+
+ if (s->context)
+ (void) client_context_maybe_refresh(s->server, s->context, NULL, NULL, 0, NULL, USEC_INFINITY);
+ else if (pid_is_valid(s->ucred.pid)) {
+ r = client_context_acquire(s->server, s->ucred.pid, &s->ucred, s->label, strlen_ptr(s->label), s->unit_id, &s->context);
+ if (r < 0)
+ log_warning_errno(r, "Failed to acquire client context, ignoring: %m");
+ }
+
+ priority = s->priority;
+
+ if (s->level_prefix)
+ syslog_parse_priority(&p, &priority, false);
+
+ if (!client_context_test_priority(s->context, priority))
+ return 0;
+
+ if (isempty(p))
+ return 0;
+
+ if (s->forward_to_syslog || s->server->forward_to_syslog)
+ server_forward_syslog(s->server, syslog_fixup_facility(priority), s->identifier, p, &s->ucred, NULL);
+
+ if (s->forward_to_kmsg || s->server->forward_to_kmsg)
+ server_forward_kmsg(s->server, priority, s->identifier, p, &s->ucred);
+
+ if (s->forward_to_console || s->server->forward_to_console)
+ server_forward_console(s->server, priority, s->identifier, p, &s->ucred);
+
+ if (s->server->forward_to_wall)
+ server_forward_wall(s->server, priority, s->identifier, p, &s->ucred);
+
+ m = N_IOVEC_META_FIELDS + 7 + client_context_extra_fields_n_iovec(s->context);
+ iovec = newa(struct iovec, m);
+
+ iovec[n++] = IOVEC_MAKE_STRING("_TRANSPORT=stdout");
+ iovec[n++] = IOVEC_MAKE_STRING(s->id_field);
+
+ syslog_priority[STRLEN("PRIORITY=")] = '0' + LOG_PRI(priority);
+ iovec[n++] = IOVEC_MAKE_STRING(syslog_priority);
+
+ if (priority & LOG_FACMASK) {
+ xsprintf(syslog_facility, "SYSLOG_FACILITY=%i", LOG_FAC(priority));
+ iovec[n++] = IOVEC_MAKE_STRING(syslog_facility);
+ }
+
+ if (s->identifier) {
+ syslog_identifier = strappend("SYSLOG_IDENTIFIER=", s->identifier);
+ if (syslog_identifier)
+ iovec[n++] = IOVEC_MAKE_STRING(syslog_identifier);
+ }
+
+ if (line_break != LINE_BREAK_NEWLINE) {
+ const char *c;
+
+ /* If this log message was generated due to an uncommon line break then mention this in the log
+ * entry */
+
+ c = line_break == LINE_BREAK_NUL ? "_LINE_BREAK=nul" :
+ line_break == LINE_BREAK_LINE_MAX ? "_LINE_BREAK=line-max" :
+ "_LINE_BREAK=eof";
+ iovec[n++] = IOVEC_MAKE_STRING(c);
+ }
+
+ message = strappend("MESSAGE=", p);
+ if (message)
+ iovec[n++] = IOVEC_MAKE_STRING(message);
+
+ server_dispatch_message(s->server, iovec, n, m, s->context, NULL, priority, 0);
+ return 0;
+}
+
+static int stdout_stream_line(StdoutStream *s, char *p, LineBreak line_break) {
+ int r;
+ char *orig;
+
+ assert(s);
+ assert(p);
+
+ orig = p;
+ p = strstrip(p);
+
+ /* line breaks by NUL, line max length or EOF are not permissible during the negotiation part of the protocol */
+ if (line_break != LINE_BREAK_NEWLINE && s->state != STDOUT_STREAM_RUNNING) {
+ log_warning("Control protocol line not properly terminated.");
+ return -EINVAL;
+ }
+
+ switch (s->state) {
+
+ case STDOUT_STREAM_IDENTIFIER:
+ if (!isempty(p)) {
+ s->identifier = strdup(p);
+ if (!s->identifier)
+ return log_oom();
+ }
+
+ s->state = STDOUT_STREAM_UNIT_ID;
+ return 0;
+
+ case STDOUT_STREAM_UNIT_ID:
+ if (s->ucred.uid == 0 &&
+ unit_name_is_valid(p, UNIT_NAME_PLAIN|UNIT_NAME_INSTANCE)) {
+
+ s->unit_id = strdup(p);
+ if (!s->unit_id)
+ return log_oom();
+ }
+
+ s->state = STDOUT_STREAM_PRIORITY;
+ return 0;
+
+ case STDOUT_STREAM_PRIORITY:
+ r = safe_atoi(p, &s->priority);
+ if (r < 0 || s->priority < 0 || s->priority > 999) {
+ log_warning("Failed to parse log priority line.");
+ return -EINVAL;
+ }
+
+ s->state = STDOUT_STREAM_LEVEL_PREFIX;
+ return 0;
+
+ case STDOUT_STREAM_LEVEL_PREFIX:
+ r = parse_boolean(p);
+ if (r < 0) {
+ log_warning("Failed to parse level prefix line.");
+ return -EINVAL;
+ }
+
+ s->level_prefix = r;
+ s->state = STDOUT_STREAM_FORWARD_TO_SYSLOG;
+ return 0;
+
+ case STDOUT_STREAM_FORWARD_TO_SYSLOG:
+ r = parse_boolean(p);
+ if (r < 0) {
+ log_warning("Failed to parse forward to syslog line.");
+ return -EINVAL;
+ }
+
+ s->forward_to_syslog = r;
+ s->state = STDOUT_STREAM_FORWARD_TO_KMSG;
+ return 0;
+
+ case STDOUT_STREAM_FORWARD_TO_KMSG:
+ r = parse_boolean(p);
+ if (r < 0) {
+ log_warning("Failed to parse copy to kmsg line.");
+ return -EINVAL;
+ }
+
+ s->forward_to_kmsg = r;
+ s->state = STDOUT_STREAM_FORWARD_TO_CONSOLE;
+ return 0;
+
+ case STDOUT_STREAM_FORWARD_TO_CONSOLE:
+ r = parse_boolean(p);
+ if (r < 0) {
+ log_warning("Failed to parse copy to console line.");
+ return -EINVAL;
+ }
+
+ s->forward_to_console = r;
+ s->state = STDOUT_STREAM_RUNNING;
+
+ /* Try to save the stream, so that journald can be restarted and we can recover */
+ (void) stdout_stream_save(s);
+ return 0;
+
+ case STDOUT_STREAM_RUNNING:
+ return stdout_stream_log(s, orig, line_break);
+ }
+
+ assert_not_reached("Unknown stream state");
+}
+
+static int stdout_stream_scan(StdoutStream *s, bool force_flush) {
+ char *p;
+ size_t remaining;
+ int r;
+
+ assert(s);
+
+ p = s->buffer;
+ remaining = s->length;
+
+ /* XXX: This function does nothing if (s->length == 0) */
+
+ for (;;) {
+ LineBreak line_break;
+ size_t skip;
+ char *end1, *end2;
+
+ end1 = memchr(p, '\n', remaining);
+ end2 = memchr(p, 0, end1 ? (size_t) (end1 - p) : remaining);
+
+ if (end2) {
+ /* We found a NUL terminator */
+ skip = end2 - p + 1;
+ line_break = LINE_BREAK_NUL;
+ } else if (end1) {
+ /* We found a \n terminator */
+ *end1 = 0;
+ skip = end1 - p + 1;
+ line_break = LINE_BREAK_NEWLINE;
+ } else if (remaining >= s->server->line_max) {
+ /* Force a line break after the maximum line length */
+ *(p + s->server->line_max) = 0;
+ skip = remaining;
+ line_break = LINE_BREAK_LINE_MAX;
+ } else
+ break;
+
+ r = stdout_stream_line(s, p, line_break);
+ if (r < 0)
+ return r;
+
+ remaining -= skip;
+ p += skip;
+ }
+
+ if (force_flush && remaining > 0) {
+ p[remaining] = 0;
+ r = stdout_stream_line(s, p, LINE_BREAK_EOF);
+ if (r < 0)
+ return r;
+
+ p += remaining;
+ remaining = 0;
+ }
+
+ if (p > s->buffer) {
+ memmove(s->buffer, p, remaining);
+ s->length = remaining;
+ }
+
+ return 0;
+}
+
+static int stdout_stream_process(sd_event_source *es, int fd, uint32_t revents, void *userdata) {
+ StdoutStream *s = userdata;
+ size_t limit;
+ ssize_t l;
+ int r;
+
+ assert(s);
+
+ if ((revents|EPOLLIN|EPOLLHUP) != (EPOLLIN|EPOLLHUP)) {
+ log_error("Got invalid event from epoll for stdout stream: %"PRIx32, revents);
+ goto terminate;
+ }
+
+ /* If the buffer is full already (discounting the extra NUL we need), add room for another 1K */
+ if (s->length + 1 >= s->allocated) {
+ if (!GREEDY_REALLOC(s->buffer, s->allocated, s->length + 1 + 1024)) {
+ log_oom();
+ goto terminate;
+ }
+ }
+
+ /* Try to make use of the allocated buffer in full, but never read more than the configured line size. Also,
+ * always leave room for a terminating NUL we might need to add. */
+ limit = MIN(s->allocated - 1, s->server->line_max);
+
+ l = read(s->fd, s->buffer + s->length, limit - s->length);
+ if (l < 0) {
+ if (errno == EAGAIN)
+ return 0;
+
+ log_warning_errno(errno, "Failed to read from stream: %m");
+ goto terminate;
+ }
+
+ if (l == 0) {
+ stdout_stream_scan(s, true);
+ goto terminate;
+ }
+
+ s->length += l;
+ r = stdout_stream_scan(s, false);
+ if (r < 0)
+ goto terminate;
+
+ return 1;
+
+terminate:
+ stdout_stream_destroy(s);
+ return 0;
+}
+
+int stdout_stream_install(Server *s, int fd, StdoutStream **ret) {
+ _cleanup_(stdout_stream_freep) StdoutStream *stream = NULL;
+ sd_id128_t id;
+ int r;
+
+ assert(s);
+ assert(fd >= 0);
+
+ r = sd_id128_randomize(&id);
+ if (r < 0)
+ return log_error_errno(r, "Failed to generate stream ID: %m");
+
+ stream = new0(StdoutStream, 1);
+ if (!stream)
+ return log_oom();
+
+ stream->fd = -1;
+ stream->priority = LOG_INFO;
+
+ xsprintf(stream->id_field, "_STREAM_ID=" SD_ID128_FORMAT_STR, SD_ID128_FORMAT_VAL(id));
+
+ r = getpeercred(fd, &stream->ucred);
+ if (r < 0)
+ return log_error_errno(r, "Failed to determine peer credentials: %m");
+
+ if (mac_selinux_use()) {
+ r = getpeersec(fd, &stream->label);
+ if (r < 0 && r != -EOPNOTSUPP)
+ (void) log_warning_errno(r, "Failed to determine peer security context: %m");
+ }
+
+ (void) shutdown(fd, SHUT_WR);
+
+ r = sd_event_add_io(s->event, &stream->event_source, fd, EPOLLIN, stdout_stream_process, stream);
+ if (r < 0)
+ return log_error_errno(r, "Failed to add stream to event loop: %m");
+
+ r = sd_event_source_set_priority(stream->event_source, SD_EVENT_PRIORITY_NORMAL+5);
+ if (r < 0)
+ return log_error_errno(r, "Failed to adjust stdout event source priority: %m");
+
+ stream->fd = fd;
+
+ stream->server = s;
+ LIST_PREPEND(stdout_stream, s->stdout_streams, stream);
+ s->n_stdout_streams++;
+
+ if (ret)
+ *ret = stream;
+
+ stream = NULL;
+
+ return 0;
+}
+
+static int stdout_stream_new(sd_event_source *es, int listen_fd, uint32_t revents, void *userdata) {
+ _cleanup_close_ int fd = -1;
+ Server *s = userdata;
+ int r;
+
+ assert(s);
+
+ if (revents != EPOLLIN)
+ return log_error_errno(SYNTHETIC_ERRNO(EIO),
+ "Got invalid event from epoll for stdout server fd: %" PRIx32,
+ revents);
+
+ fd = accept4(s->stdout_fd, NULL, NULL, SOCK_NONBLOCK|SOCK_CLOEXEC);
+ if (fd < 0) {
+ if (errno == EAGAIN)
+ return 0;
+
+ return log_error_errno(errno, "Failed to accept stdout connection: %m");
+ }
+
+ if (s->n_stdout_streams >= STDOUT_STREAMS_MAX) {
+ struct ucred u;
+
+ r = getpeercred(fd, &u);
+
+ /* By closing fd here we make sure that the client won't wait too long for journald to
+ * gather all the data it adds to the error message to find out that the connection has
+ * just been refused.
+ */
+ fd = safe_close(fd);
+
+ server_driver_message(s, r < 0 ? 0 : u.pid, NULL, LOG_MESSAGE("Too many stdout streams, refusing connection."), NULL);
+ return 0;
+ }
+
+ r = stdout_stream_install(s, fd, NULL);
+ if (r < 0)
+ return r;
+
+ fd = -1;
+ return 0;
+}
+
+static int stdout_stream_load(StdoutStream *stream, const char *fname) {
+ _cleanup_free_ char
+ *priority = NULL,
+ *level_prefix = NULL,
+ *forward_to_syslog = NULL,
+ *forward_to_kmsg = NULL,
+ *forward_to_console = NULL,
+ *stream_id = NULL;
+ int r;
+
+ assert(stream);
+ assert(fname);
+
+ if (!stream->state_file) {
+ stream->state_file = strappend("/run/systemd/journal/streams/", fname);
+ if (!stream->state_file)
+ return log_oom();
+ }
+
+ r = parse_env_file(NULL, stream->state_file,
+ "PRIORITY", &priority,
+ "LEVEL_PREFIX", &level_prefix,
+ "FORWARD_TO_SYSLOG", &forward_to_syslog,
+ "FORWARD_TO_KMSG", &forward_to_kmsg,
+ "FORWARD_TO_CONSOLE", &forward_to_console,
+ "IDENTIFIER", &stream->identifier,
+ "UNIT", &stream->unit_id,
+ "STREAM_ID", &stream_id);
+ if (r < 0)
+ return log_error_errno(r, "Failed to read: %s", stream->state_file);
+
+ if (priority) {
+ int p;
+
+ p = log_level_from_string(priority);
+ if (p >= 0)
+ stream->priority = p;
+ }
+
+ if (level_prefix) {
+ r = parse_boolean(level_prefix);
+ if (r >= 0)
+ stream->level_prefix = r;
+ }
+
+ if (forward_to_syslog) {
+ r = parse_boolean(forward_to_syslog);
+ if (r >= 0)
+ stream->forward_to_syslog = r;
+ }
+
+ if (forward_to_kmsg) {
+ r = parse_boolean(forward_to_kmsg);
+ if (r >= 0)
+ stream->forward_to_kmsg = r;
+ }
+
+ if (forward_to_console) {
+ r = parse_boolean(forward_to_console);
+ if (r >= 0)
+ stream->forward_to_console = r;
+ }
+
+ if (stream_id) {
+ sd_id128_t id;
+
+ r = sd_id128_from_string(stream_id, &id);
+ if (r >= 0)
+ xsprintf(stream->id_field, "_STREAM_ID=" SD_ID128_FORMAT_STR, SD_ID128_FORMAT_VAL(id));
+ }
+
+ return 0;
+}
+
+static int stdout_stream_restore(Server *s, const char *fname, int fd) {
+ StdoutStream *stream;
+ int r;
+
+ assert(s);
+ assert(fname);
+ assert(fd >= 0);
+
+ if (s->n_stdout_streams >= STDOUT_STREAMS_MAX) {
+ log_warning("Too many stdout streams, refusing restoring of stream.");
+ return -ENOBUFS;
+ }
+
+ r = stdout_stream_install(s, fd, &stream);
+ if (r < 0)
+ return r;
+
+ stream->state = STDOUT_STREAM_RUNNING;
+ stream->fdstore = true;
+
+ /* Ignore all parsing errors */
+ (void) stdout_stream_load(stream, fname);
+
+ return 0;
+}
+
+int server_restore_streams(Server *s, FDSet *fds) {
+ _cleanup_closedir_ DIR *d = NULL;
+ struct dirent *de;
+ int r;
+
+ d = opendir("/run/systemd/journal/streams");
+ if (!d) {
+ if (errno == ENOENT)
+ return 0;
+
+ return log_warning_errno(errno, "Failed to enumerate /run/systemd/journal/streams: %m");
+ }
+
+ FOREACH_DIRENT(de, d, goto fail) {
+ unsigned long st_dev, st_ino;
+ bool found = false;
+ Iterator i;
+ int fd;
+
+ if (sscanf(de->d_name, "%lu:%lu", &st_dev, &st_ino) != 2)
+ continue;
+
+ FDSET_FOREACH(fd, fds, i) {
+ struct stat st;
+
+ if (fstat(fd, &st) < 0)
+ return log_error_errno(errno, "Failed to stat %s: %m", de->d_name);
+
+ if (S_ISSOCK(st.st_mode) && st.st_dev == st_dev && st.st_ino == st_ino) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ /* No file descriptor? Then let's delete the state file */
+ log_debug("Cannot restore stream file %s", de->d_name);
+ if (unlinkat(dirfd(d), de->d_name, 0) < 0)
+ log_warning_errno(errno, "Failed to remove /run/systemd/journal/streams/%s: %m",
+ de->d_name);
+ continue;
+ }
+
+ fdset_remove(fds, fd);
+
+ r = stdout_stream_restore(s, de->d_name, fd);
+ if (r < 0)
+ safe_close(fd);
+ }
+
+ return 0;
+
+fail:
+ return log_error_errno(errno, "Failed to read streams directory: %m");
+}
+
+int server_open_stdout_socket(Server *s) {
+ static const union sockaddr_union sa = {
+ .un.sun_family = AF_UNIX,
+ .un.sun_path = "/run/systemd/journal/stdout",
+ };
+ int r;
+
+ assert(s);
+
+ if (s->stdout_fd < 0) {
+ s->stdout_fd = socket(AF_UNIX, SOCK_STREAM|SOCK_CLOEXEC|SOCK_NONBLOCK, 0);
+ if (s->stdout_fd < 0)
+ return log_error_errno(errno, "socket() failed: %m");
+
+ (void) sockaddr_un_unlink(&sa.un);
+
+ r = bind(s->stdout_fd, &sa.sa, SOCKADDR_UN_LEN(sa.un));
+ if (r < 0)
+ return log_error_errno(errno, "bind(%s) failed: %m", sa.un.sun_path);
+
+ (void) chmod(sa.un.sun_path, 0666);
+
+ if (listen(s->stdout_fd, SOMAXCONN) < 0)
+ return log_error_errno(errno, "listen(%s) failed: %m", sa.un.sun_path);
+ } else
+ (void) fd_nonblock(s->stdout_fd, true);
+
+ r = sd_event_add_io(s->event, &s->stdout_event_source, s->stdout_fd, EPOLLIN, stdout_stream_new, s);
+ if (r < 0)
+ return log_error_errno(r, "Failed to add stdout server fd to event source: %m");
+
+ r = sd_event_source_set_priority(s->stdout_event_source, SD_EVENT_PRIORITY_NORMAL+5);
+ if (r < 0)
+ return log_error_errno(r, "Failed to adjust priority of stdout server event source: %m");
+
+ return 0;
+}
+
+void stdout_stream_send_notify(StdoutStream *s) {
+ struct iovec iovec = {
+ .iov_base = (char*) "FDSTORE=1",
+ .iov_len = STRLEN("FDSTORE=1"),
+ };
+ struct msghdr msghdr = {
+ .msg_iov = &iovec,
+ .msg_iovlen = 1,
+ };
+ struct cmsghdr *cmsg;
+ ssize_t l;
+
+ assert(s);
+ assert(!s->fdstore);
+ assert(s->in_notify_queue);
+ assert(s->server);
+ assert(s->server->notify_fd >= 0);
+
+ /* Store the connection fd in PID 1, so that we get it passed
+ * in again on next start */
+
+ msghdr.msg_controllen = CMSG_SPACE(sizeof(int));
+ msghdr.msg_control = alloca0(msghdr.msg_controllen);
+
+ cmsg = CMSG_FIRSTHDR(&msghdr);
+ cmsg->cmsg_level = SOL_SOCKET;
+ cmsg->cmsg_type = SCM_RIGHTS;
+ cmsg->cmsg_len = CMSG_LEN(sizeof(int));
+
+ memcpy(CMSG_DATA(cmsg), &s->fd, sizeof(int));
+
+ l = sendmsg(s->server->notify_fd, &msghdr, MSG_DONTWAIT|MSG_NOSIGNAL);
+ if (l < 0) {
+ if (errno == EAGAIN)
+ return;
+
+ log_error_errno(errno, "Failed to send stream file descriptor to service manager: %m");
+ } else {
+ log_debug("Successfully sent stream file descriptor to service manager.");
+ s->fdstore = 1;
+ }
+
+ LIST_REMOVE(stdout_stream_notify_queue, s->server->stdout_streams_notify_queue, s);
+ s->in_notify_queue = false;
+
+}
diff --git a/src/journal/journald-stream.h b/src/journal/journald-stream.h
new file mode 100644
index 0000000..487376e
--- /dev/null
+++ b/src/journal/journald-stream.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+#pragma once
+
+typedef struct StdoutStream StdoutStream;
+
+#include "fdset.h"
+#include "journald-server.h"
+
+int server_open_stdout_socket(Server *s);
+int server_restore_streams(Server *s, FDSet *fds);
+
+void stdout_stream_free(StdoutStream *s);
+int stdout_stream_install(Server *s, int fd, StdoutStream **ret);
+void stdout_stream_destroy(StdoutStream *s);
+void stdout_stream_send_notify(StdoutStream *s);
diff --git a/src/journal/journald-syslog.c b/src/journal/journald-syslog.c
new file mode 100644
index 0000000..a60a259
--- /dev/null
+++ b/src/journal/journald-syslog.c
@@ -0,0 +1,516 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+
+#include <stddef.h>
+#include <sys/epoll.h>
+#include <unistd.h>
+
+#include "sd-messages.h"
+
+#include "alloc-util.h"
+#include "fd-util.h"
+#include "format-util.h"
+#include "io-util.h"
+#include "journald-console.h"
+#include "journald-kmsg.h"
+#include "journald-server.h"
+#include "journald-syslog.h"
+#include "journald-wall.h"
+#include "process-util.h"
+#include "selinux-util.h"
+#include "socket-util.h"
+#include "stdio-util.h"
+#include "string-util.h"
+#include "syslog-util.h"
+
+/* Warn once every 30s if we missed syslog message */
+#define WARN_FORWARD_SYSLOG_MISSED_USEC (30 * USEC_PER_SEC)
+
+static void forward_syslog_iovec(Server *s, const struct iovec *iovec, unsigned n_iovec, const struct ucred *ucred, const struct timeval *tv) {
+
+ static const union sockaddr_union sa = {
+ .un.sun_family = AF_UNIX,
+ .un.sun_path = "/run/systemd/journal/syslog",
+ };
+ struct msghdr msghdr = {
+ .msg_iov = (struct iovec *) iovec,
+ .msg_iovlen = n_iovec,
+ .msg_name = (struct sockaddr*) &sa.sa,
+ .msg_namelen = SOCKADDR_UN_LEN(sa.un),
+ };
+ struct cmsghdr *cmsg;
+ union {
+ struct cmsghdr cmsghdr;
+ uint8_t buf[CMSG_SPACE(sizeof(struct ucred))];
+ } control;
+
+ assert(s);
+ assert(iovec);
+ assert(n_iovec > 0);
+
+ if (ucred) {
+ zero(control);
+ msghdr.msg_control = &control;
+ msghdr.msg_controllen = sizeof(control);
+
+ cmsg = CMSG_FIRSTHDR(&msghdr);
+ cmsg->cmsg_level = SOL_SOCKET;
+ cmsg->cmsg_type = SCM_CREDENTIALS;
+ cmsg->cmsg_len = CMSG_LEN(sizeof(struct ucred));
+ memcpy(CMSG_DATA(cmsg), ucred, sizeof(struct ucred));
+ msghdr.msg_controllen = cmsg->cmsg_len;
+ }
+
+ /* Forward the syslog message we received via /dev/log to
+ * /run/systemd/syslog. Unfortunately we currently can't set
+ * the SO_TIMESTAMP auxiliary data, and hence we don't. */
+
+ if (sendmsg(s->syslog_fd, &msghdr, MSG_NOSIGNAL) >= 0)
+ return;
+
+ /* The socket is full? I guess the syslog implementation is
+ * too slow, and we shouldn't wait for that... */
+ if (errno == EAGAIN) {
+ s->n_forward_syslog_missed++;
+ return;
+ }
+
+ if (ucred && IN_SET(errno, ESRCH, EPERM)) {
+ struct ucred u;
+
+ /* Hmm, presumably the sender process vanished
+ * by now, or we don't have CAP_SYS_AMDIN, so
+ * let's fix it as good as we can, and retry */
+
+ u = *ucred;
+ u.pid = getpid_cached();
+ memcpy(CMSG_DATA(cmsg), &u, sizeof(struct ucred));
+
+ if (sendmsg(s->syslog_fd, &msghdr, MSG_NOSIGNAL) >= 0)
+ return;
+
+ if (errno == EAGAIN) {
+ s->n_forward_syslog_missed++;
+ return;
+ }
+ }
+
+ if (errno != ENOENT)
+ log_debug_errno(errno, "Failed to forward syslog message: %m");
+}
+
+static void forward_syslog_raw(Server *s, int priority, const char *buffer, size_t buffer_len, const struct ucred *ucred, const struct timeval *tv) {
+ struct iovec iovec;
+
+ assert(s);
+ assert(buffer);
+
+ if (LOG_PRI(priority) > s->max_level_syslog)
+ return;
+
+ iovec = IOVEC_MAKE((char *) buffer, buffer_len);
+ forward_syslog_iovec(s, &iovec, 1, ucred, tv);
+}
+
+void server_forward_syslog(Server *s, int priority, const char *identifier, const char *message, const struct ucred *ucred, const struct timeval *tv) {
+ struct iovec iovec[5];
+ char header_priority[DECIMAL_STR_MAX(priority) + 3], header_time[64],
+ header_pid[STRLEN("[]: ") + DECIMAL_STR_MAX(pid_t) + 1];
+ int n = 0;
+ time_t t;
+ struct tm tm;
+ _cleanup_free_ char *ident_buf = NULL;
+
+ assert(s);
+ assert(priority >= 0);
+ assert(priority <= 999);
+ assert(message);
+
+ if (LOG_PRI(priority) > s->max_level_syslog)
+ return;
+
+ /* First: priority field */
+ xsprintf(header_priority, "<%i>", priority);
+ iovec[n++] = IOVEC_MAKE_STRING(header_priority);
+
+ /* Second: timestamp */
+ t = tv ? tv->tv_sec : ((time_t) (now(CLOCK_REALTIME) / USEC_PER_SEC));
+ if (!localtime_r(&t, &tm))
+ return;
+ if (strftime(header_time, sizeof(header_time), "%h %e %T ", &tm) <= 0)
+ return;
+ iovec[n++] = IOVEC_MAKE_STRING(header_time);
+
+ /* Third: identifier and PID */
+ if (ucred) {
+ if (!identifier) {
+ get_process_comm(ucred->pid, &ident_buf);
+ identifier = ident_buf;
+ }
+
+ xsprintf(header_pid, "["PID_FMT"]: ", ucred->pid);
+
+ if (identifier)
+ iovec[n++] = IOVEC_MAKE_STRING(identifier);
+
+ iovec[n++] = IOVEC_MAKE_STRING(header_pid);
+ } else if (identifier) {
+ iovec[n++] = IOVEC_MAKE_STRING(identifier);
+ iovec[n++] = IOVEC_MAKE_STRING(": ");
+ }
+
+ /* Fourth: message */
+ iovec[n++] = IOVEC_MAKE_STRING(message);
+
+ forward_syslog_iovec(s, iovec, n, ucred, tv);
+}
+
+int syslog_fixup_facility(int priority) {
+
+ if ((priority & LOG_FACMASK) == 0)
+ return (priority & LOG_PRIMASK) | LOG_USER;
+
+ return priority;
+}
+
+size_t syslog_parse_identifier(const char **buf, char **identifier, char **pid) {
+ const char *p;
+ char *t;
+ size_t l, e;
+
+ assert(buf);
+ assert(identifier);
+ assert(pid);
+
+ p = *buf;
+
+ p += strspn(p, WHITESPACE);
+ l = strcspn(p, WHITESPACE);
+
+ if (l <= 0 ||
+ p[l-1] != ':')
+ return 0;
+
+ e = l;
+ l--;
+
+ if (l > 0 && p[l-1] == ']') {
+ size_t k = l-1;
+
+ for (;;) {
+
+ if (p[k] == '[') {
+ t = strndup(p+k+1, l-k-2);
+ if (t)
+ *pid = t;
+
+ l = k;
+ break;
+ }
+
+ if (k == 0)
+ break;
+
+ k--;
+ }
+ }
+
+ t = strndup(p, l);
+ if (t)
+ *identifier = t;
+
+ /* Single space is used as separator */
+ if (p[e] != '\0' && strchr(WHITESPACE, p[e]))
+ e++;
+
+ l = (p - *buf) + e;
+ *buf = p + e;
+ return l;
+}
+
+static int syslog_skip_timestamp(const char **buf) {
+ enum {
+ LETTER,
+ SPACE,
+ NUMBER,
+ SPACE_OR_NUMBER,
+ COLON
+ } sequence[] = {
+ LETTER, LETTER, LETTER,
+ SPACE,
+ SPACE_OR_NUMBER, NUMBER,
+ SPACE,
+ SPACE_OR_NUMBER, NUMBER,
+ COLON,
+ SPACE_OR_NUMBER, NUMBER,
+ COLON,
+ SPACE_OR_NUMBER, NUMBER,
+ SPACE
+ };
+
+ const char *p, *t;
+ unsigned i;
+
+ assert(buf);
+ assert(*buf);
+
+ for (i = 0, p = *buf; i < ELEMENTSOF(sequence); i++, p++) {
+ if (!*p)
+ return 0;
+
+ switch (sequence[i]) {
+
+ case SPACE:
+ if (*p != ' ')
+ return 0;
+ break;
+
+ case SPACE_OR_NUMBER:
+ if (*p == ' ')
+ break;
+
+ _fallthrough_;
+ case NUMBER:
+ if (*p < '0' || *p > '9')
+ return 0;
+
+ break;
+
+ case LETTER:
+ if (!(*p >= 'A' && *p <= 'Z') &&
+ !(*p >= 'a' && *p <= 'z'))
+ return 0;
+
+ break;
+
+ case COLON:
+ if (*p != ':')
+ return 0;
+ break;
+
+ }
+ }
+
+ t = *buf;
+ *buf = p;
+ return p - t;
+}
+
+void server_process_syslog_message(
+ Server *s,
+ const char *buf,
+ size_t raw_len,
+ const struct ucred *ucred,
+ const struct timeval *tv,
+ const char *label,
+ size_t label_len) {
+
+ char *t, syslog_priority[sizeof("PRIORITY=") + DECIMAL_STR_MAX(int)],
+ syslog_facility[sizeof("SYSLOG_FACILITY=") + DECIMAL_STR_MAX(int)];
+ const char *msg, *syslog_ts, *a;
+ _cleanup_free_ char *identifier = NULL, *pid = NULL,
+ *dummy = NULL, *msg_msg = NULL, *msg_raw = NULL;
+ int priority = LOG_USER | LOG_INFO, r;
+ ClientContext *context = NULL;
+ struct iovec *iovec;
+ size_t n = 0, m, i, leading_ws, syslog_ts_len;
+ bool store_raw;
+
+ assert(s);
+ assert(buf);
+ /* The message cannot be empty. */
+ assert(raw_len > 0);
+ /* The buffer NUL-terminated and can be used a string. raw_len is the length
+ * without the terminating NUL byte, the buffer is actually one bigger. */
+ assert(buf[raw_len] == '\0');
+
+ if (ucred && pid_is_valid(ucred->pid)) {
+ r = client_context_get(s, ucred->pid, ucred, label, label_len, NULL, &context);
+ if (r < 0)
+ log_warning_errno(r, "Failed to retrieve credentials for PID " PID_FMT ", ignoring: %m", ucred->pid);
+ }
+
+ /* We are creating a copy of the message because we want to forward the original message
+ verbatim to the legacy syslog implementation */
+ for (i = raw_len; i > 0; i--)
+ if (!strchr(WHITESPACE, buf[i-1]))
+ break;
+
+ leading_ws = strspn(buf, WHITESPACE);
+
+ if (i == 0)
+ /* The message contains only whitespaces */
+ msg = buf + raw_len;
+ else if (i == raw_len)
+ /* Nice! No need to strip anything on the end, let's optimize this a bit */
+ msg = buf + leading_ws;
+ else {
+ msg = dummy = new(char, i - leading_ws + 1);
+ if (!dummy) {
+ log_oom();
+ return;
+ }
+
+ memcpy(dummy, buf + leading_ws, i - leading_ws);
+ dummy[i - leading_ws] = 0;
+ }
+
+ /* We will add the SYSLOG_RAW= field when we stripped anything
+ * _or_ if the input message contained NUL bytes. */
+ store_raw = msg != buf || strlen(msg) != raw_len;
+
+ syslog_parse_priority(&msg, &priority, true);
+
+ if (!client_context_test_priority(context, priority))
+ return;
+
+ syslog_ts = msg;
+ syslog_ts_len = syslog_skip_timestamp(&msg);
+ if (syslog_ts_len == 0)
+ /* We failed to parse the full timestamp, store the raw message too */
+ store_raw = true;
+
+ syslog_parse_identifier(&msg, &identifier, &pid);
+
+ if (s->forward_to_syslog)
+ forward_syslog_raw(s, priority, buf, raw_len, ucred, tv);
+
+ if (s->forward_to_kmsg)
+ server_forward_kmsg(s, priority, identifier, msg, ucred);
+
+ if (s->forward_to_console)
+ server_forward_console(s, priority, identifier, msg, ucred);
+
+ if (s->forward_to_wall)
+ server_forward_wall(s, priority, identifier, msg, ucred);
+
+ m = N_IOVEC_META_FIELDS + 8 + client_context_extra_fields_n_iovec(context);
+ iovec = newa(struct iovec, m);
+
+ iovec[n++] = IOVEC_MAKE_STRING("_TRANSPORT=syslog");
+
+ xsprintf(syslog_priority, "PRIORITY=%i", priority & LOG_PRIMASK);
+ iovec[n++] = IOVEC_MAKE_STRING(syslog_priority);
+
+ if (priority & LOG_FACMASK) {
+ xsprintf(syslog_facility, "SYSLOG_FACILITY=%i", LOG_FAC(priority));
+ iovec[n++] = IOVEC_MAKE_STRING(syslog_facility);
+ }
+
+ if (identifier) {
+ a = strjoina("SYSLOG_IDENTIFIER=", identifier);
+ iovec[n++] = IOVEC_MAKE_STRING(a);
+ }
+
+ if (pid) {
+ a = strjoina("SYSLOG_PID=", pid);
+ iovec[n++] = IOVEC_MAKE_STRING(a);
+ }
+
+ if (syslog_ts_len > 0) {
+ const size_t hlen = STRLEN("SYSLOG_TIMESTAMP=");
+
+ t = newa(char, hlen + syslog_ts_len);
+ memcpy(t, "SYSLOG_TIMESTAMP=", hlen);
+ memcpy(t + hlen, syslog_ts, syslog_ts_len);
+
+ iovec[n++] = IOVEC_MAKE(t, hlen + syslog_ts_len);
+ }
+
+ msg_msg = strjoin("MESSAGE=", msg);
+ if (!msg_msg) {
+ log_oom();
+ return;
+ }
+ iovec[n++] = IOVEC_MAKE_STRING(msg_msg);
+
+ if (store_raw) {
+ const size_t hlen = STRLEN("SYSLOG_RAW=");
+
+ msg_raw = new(char, hlen + raw_len);
+ if (!msg_raw) {
+ log_oom();
+ return;
+ }
+
+ memcpy(msg_raw, "SYSLOG_RAW=", hlen);
+ memcpy(msg_raw + hlen, buf, raw_len);
+
+ iovec[n++] = IOVEC_MAKE(msg_raw, hlen + raw_len);
+ }
+
+ server_dispatch_message(s, iovec, n, m, context, tv, priority, 0);
+}
+
+int server_open_syslog_socket(Server *s) {
+
+ static const union sockaddr_union sa = {
+ .un.sun_family = AF_UNIX,
+ .un.sun_path = "/run/systemd/journal/dev-log",
+ };
+ int r;
+
+ assert(s);
+
+ if (s->syslog_fd < 0) {
+ s->syslog_fd = socket(AF_UNIX, SOCK_DGRAM|SOCK_CLOEXEC|SOCK_NONBLOCK, 0);
+ if (s->syslog_fd < 0)
+ return log_error_errno(errno, "socket() failed: %m");
+
+ (void) sockaddr_un_unlink(&sa.un);
+
+ r = bind(s->syslog_fd, &sa.sa, SOCKADDR_UN_LEN(sa.un));
+ if (r < 0)
+ return log_error_errno(errno, "bind(%s) failed: %m", sa.un.sun_path);
+
+ (void) chmod(sa.un.sun_path, 0666);
+ } else
+ (void) fd_nonblock(s->syslog_fd, true);
+
+ r = setsockopt_int(s->syslog_fd, SOL_SOCKET, SO_PASSCRED, true);
+ if (r < 0)
+ return log_error_errno(r, "SO_PASSCRED failed: %m");
+
+#if HAVE_SELINUX
+ if (mac_selinux_use()) {
+ r = setsockopt_int(s->syslog_fd, SOL_SOCKET, SO_PASSSEC, true);
+ if (r < 0)
+ log_warning_errno(r, "SO_PASSSEC failed: %m");
+ }
+#endif
+
+ r = setsockopt_int(s->syslog_fd, SOL_SOCKET, SO_TIMESTAMP, true);
+ if (r < 0)
+ return log_error_errno(r, "SO_TIMESTAMP failed: %m");
+
+ r = sd_event_add_io(s->event, &s->syslog_event_source, s->syslog_fd, EPOLLIN, server_process_datagram, s);
+ if (r < 0)
+ return log_error_errno(r, "Failed to add syslog server fd to event loop: %m");
+
+ r = sd_event_source_set_priority(s->syslog_event_source, SD_EVENT_PRIORITY_NORMAL+5);
+ if (r < 0)
+ return log_error_errno(r, "Failed to adjust syslog event source priority: %m");
+
+ return 0;
+}
+
+void server_maybe_warn_forward_syslog_missed(Server *s) {
+ usec_t n;
+
+ assert(s);
+
+ if (s->n_forward_syslog_missed <= 0)
+ return;
+
+ n = now(CLOCK_MONOTONIC);
+ if (s->last_warn_forward_syslog_missed + WARN_FORWARD_SYSLOG_MISSED_USEC > n)
+ return;
+
+ server_driver_message(s, 0,
+ "MESSAGE_ID=" SD_MESSAGE_FORWARD_SYSLOG_MISSED_STR,
+ LOG_MESSAGE("Forwarding to syslog missed %u messages.",
+ s->n_forward_syslog_missed),
+ NULL);
+
+ s->n_forward_syslog_missed = 0;
+ s->last_warn_forward_syslog_missed = n;
+}
diff --git a/src/journal/journald-syslog.h b/src/journal/journald-syslog.h
new file mode 100644
index 0000000..7ca5897
--- /dev/null
+++ b/src/journal/journald-syslog.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+#pragma once
+
+#include "journald-server.h"
+
+int syslog_fixup_facility(int priority) _const_;
+
+size_t syslog_parse_identifier(const char **buf, char **identifier, char **pid);
+
+void server_forward_syslog(Server *s, int priority, const char *identifier, const char *message, const struct ucred *ucred, const struct timeval *tv);
+
+void server_process_syslog_message(Server *s, const char *buf, size_t buf_len, const struct ucred *ucred, const struct timeval *tv, const char *label, size_t label_len);
+int server_open_syslog_socket(Server *s);
+
+void server_maybe_warn_forward_syslog_missed(Server *s);
diff --git a/src/journal/journald-wall.c b/src/journal/journald-wall.c
new file mode 100644
index 0000000..370c9b3
--- /dev/null
+++ b/src/journal/journald-wall.c
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+
+#include "alloc-util.h"
+#include "format-util.h"
+#include "journald-server.h"
+#include "journald-wall.h"
+#include "process-util.h"
+#include "string-util.h"
+#include "utmp-wtmp.h"
+
+void server_forward_wall(
+ Server *s,
+ int priority,
+ const char *identifier,
+ const char *message,
+ const struct ucred *ucred) {
+
+ _cleanup_free_ char *ident_buf = NULL, *l_buf = NULL;
+ const char *l;
+ int r;
+
+ assert(s);
+ assert(message);
+
+ if (LOG_PRI(priority) > s->max_level_wall)
+ return;
+
+ if (ucred) {
+ if (!identifier) {
+ get_process_comm(ucred->pid, &ident_buf);
+ identifier = ident_buf;
+ }
+
+ if (asprintf(&l_buf, "%s["PID_FMT"]: %s", strempty(identifier), ucred->pid, message) < 0) {
+ log_oom();
+ return;
+ }
+
+ l = l_buf;
+
+ } else if (identifier) {
+
+ l = l_buf = strjoin(identifier, ": ", message);
+ if (!l_buf) {
+ log_oom();
+ return;
+ }
+ } else
+ l = message;
+
+ r = utmp_wall(l, "systemd-journald", NULL, NULL, NULL);
+ if (r < 0)
+ log_debug_errno(r, "Failed to send wall message: %m");
+}
diff --git a/src/journal/journald-wall.h b/src/journal/journald-wall.h
new file mode 100644
index 0000000..b73059a
--- /dev/null
+++ b/src/journal/journald-wall.h
@@ -0,0 +1,6 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+#pragma once
+
+#include "journald-server.h"
+
+void server_forward_wall(Server *s, int priority, const char *identifier, const char *message, const struct ucred *ucred);
diff --git a/src/journal/journald.c b/src/journal/journald.c
new file mode 100644
index 0000000..5e7b1dc
--- /dev/null
+++ b/src/journal/journald.c
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+
+#include <unistd.h>
+
+#include "sd-daemon.h"
+#include "sd-messages.h"
+
+#include "format-util.h"
+#include "journal-authenticate.h"
+#include "journald-kmsg.h"
+#include "journald-server.h"
+#include "journald-syslog.h"
+#include "process-util.h"
+#include "sigbus.h"
+
+int main(int argc, char *argv[]) {
+ Server server;
+ int r;
+
+ if (argc > 1) {
+ log_error("This program does not take arguments.");
+ return EXIT_FAILURE;
+ }
+
+ log_set_prohibit_ipc(true);
+ log_set_target(LOG_TARGET_AUTO);
+ log_set_facility(LOG_SYSLOG);
+ log_parse_environment();
+ log_open();
+
+ umask(0022);
+
+ sigbus_install();
+
+ r = server_init(&server);
+ if (r < 0)
+ goto finish;
+
+ server_vacuum(&server, false);
+ server_flush_to_var(&server, true);
+ server_flush_dev_kmsg(&server);
+
+ log_debug("systemd-journald running as pid "PID_FMT, getpid_cached());
+ server_driver_message(&server, 0,
+ "MESSAGE_ID=" SD_MESSAGE_JOURNAL_START_STR,
+ LOG_MESSAGE("Journal started"),
+ NULL);
+
+ /* Make sure to send the usage message *after* flushing the
+ * journal so entries from the runtime journals are ordered
+ * before this message. See #4190 for some details. */
+ server_space_usage_message(&server, NULL);
+
+ for (;;) {
+ usec_t t = USEC_INFINITY, n;
+
+ r = sd_event_get_state(server.event);
+ if (r < 0)
+ goto finish;
+ if (r == SD_EVENT_FINISHED)
+ break;
+
+ n = now(CLOCK_REALTIME);
+
+ if (server.max_retention_usec > 0 && server.oldest_file_usec > 0) {
+
+ /* The retention time is reached, so let's vacuum! */
+ if (server.oldest_file_usec + server.max_retention_usec < n) {
+ log_info("Retention time reached.");
+ server_rotate(&server);
+ server_vacuum(&server, false);
+ continue;
+ }
+
+ /* Calculate when to rotate the next time */
+ t = server.oldest_file_usec + server.max_retention_usec - n;
+ }
+
+#if HAVE_GCRYPT
+ if (server.system_journal) {
+ usec_t u;
+
+ if (journal_file_next_evolve_usec(server.system_journal, &u)) {
+ if (n >= u)
+ t = 0;
+ else
+ t = MIN(t, u - n);
+ }
+ }
+#endif
+
+ r = sd_event_run(server.event, t);
+ if (r < 0) {
+ log_error_errno(r, "Failed to run event loop: %m");
+ goto finish;
+ }
+
+ server_maybe_append_tags(&server);
+ server_maybe_warn_forward_syslog_missed(&server);
+ }
+
+ log_debug("systemd-journald stopped as pid "PID_FMT, getpid_cached());
+ server_driver_message(&server, 0,
+ "MESSAGE_ID=" SD_MESSAGE_JOURNAL_STOP_STR,
+ LOG_MESSAGE("Journal stopped"),
+ NULL);
+
+finish:
+ server_done(&server);
+
+ return r < 0 ? EXIT_FAILURE : EXIT_SUCCESS;
+}
diff --git a/src/journal/journald.conf b/src/journal/journald.conf
new file mode 100644
index 0000000..2f1c661
--- /dev/null
+++ b/src/journal/journald.conf
@@ -0,0 +1,43 @@
+# This file is part of systemd.
+#
+# systemd is free software; you can redistribute it and/or modify it
+# under the terms of the GNU Lesser General Public License as published by
+# the Free Software Foundation; either version 2.1 of the License, or
+# (at your option) any later version.
+#
+# Entries in this file show the compile time defaults.
+# You can change settings by editing this file.
+# Defaults can be restored by simply deleting this file.
+#
+# See journald.conf(5) for details.
+
+[Journal]
+#Storage=auto
+#Compress=yes
+#Seal=yes
+#SplitMode=uid
+#SyncIntervalSec=5m
+#RateLimitIntervalSec=30s
+#RateLimitBurst=10000
+#SystemMaxUse=
+#SystemKeepFree=
+#SystemMaxFileSize=
+#SystemMaxFiles=100
+#RuntimeMaxUse=
+#RuntimeKeepFree=
+#RuntimeMaxFileSize=
+#RuntimeMaxFiles=100
+#MaxRetentionSec=
+#MaxFileSec=1month
+#ForwardToSyslog=no
+#ForwardToKMsg=no
+#ForwardToConsole=no
+#ForwardToWall=yes
+#TTYPath=/dev/console
+#MaxLevelStore=debug
+#MaxLevelSyslog=debug
+#MaxLevelKMsg=notice
+#MaxLevelConsole=info
+#MaxLevelWall=emerg
+#LineMax=48K
+#ReadKMsg=yes
diff --git a/src/journal/lookup3.c b/src/journal/lookup3.c
new file mode 100644
index 0000000..6c61f17
--- /dev/null
+++ b/src/journal/lookup3.c
@@ -0,0 +1,1005 @@
+/* Slightly modified by Lennart Poettering, to avoid name clashes, and
+ * unexport a few functions. */
+
+#include "lookup3.h"
+
+/*
+-------------------------------------------------------------------------------
+lookup3.c, by Bob Jenkins, May 2006, Public Domain.
+
+These are functions for producing 32-bit hashes for hash table lookup.
+hashword(), hashlittle(), hashlittle2(), hashbig(), mix(), and final()
+are externally useful functions. Routines to test the hash are included
+if SELF_TEST is defined. You can use this free for any purpose. It's in
+the public domain. It has no warranty.
+
+You probably want to use hashlittle(). hashlittle() and hashbig()
+hash byte arrays. hashlittle() is faster than hashbig() on
+little-endian machines. Intel and AMD are little-endian machines.
+On second thought, you probably want hashlittle2(), which is identical to
+hashlittle() except it returns two 32-bit hashes for the price of one.
+You could implement hashbig2() if you wanted but I haven't bothered here.
+
+If you want to find a hash of, say, exactly 7 integers, do
+ a = i1; b = i2; c = i3;
+ mix(a,b,c);
+ a += i4; b += i5; c += i6;
+ mix(a,b,c);
+ a += i7;
+ final(a,b,c);
+then use c as the hash value. If you have a variable length array of
+4-byte integers to hash, use hashword(). If you have a byte array (like
+a character string), use hashlittle(). If you have several byte arrays, or
+a mix of things, see the comments above hashlittle().
+
+Why is this so big? I read 12 bytes at a time into 3 4-byte integers,
+then mix those integers. This is fast (you can do a lot more thorough
+mixing with 12*3 instructions on 3 integers than you can with 3 instructions
+on 1 byte), but shoehorning those bytes into integers efficiently is messy.
+-------------------------------------------------------------------------------
+*/
+/* #define SELF_TEST 1 */
+
+#include <stdint.h> /* defines uint32_t etc */
+#include <stdio.h> /* defines printf for tests */
+#include <sys/param.h> /* attempt to define endianness */
+#include <time.h> /* defines time_t for timings in the test */
+#ifdef linux
+# include <endian.h> /* attempt to define endianness */
+#endif
+
+#if __GNUC__ >= 7
+_Pragma("GCC diagnostic ignored \"-Wimplicit-fallthrough\"")
+#endif
+
+/*
+ * My best guess at if you are big-endian or little-endian. This may
+ * need adjustment.
+ */
+#if (defined(__BYTE_ORDER) && defined(__LITTLE_ENDIAN) && \
+ __BYTE_ORDER == __LITTLE_ENDIAN) || \
+ (defined(i386) || defined(__i386__) || defined(__i486__) || \
+ defined(__i586__) || defined(__i686__) || defined(vax) || defined(MIPSEL))
+# define HASH_LITTLE_ENDIAN 1
+# define HASH_BIG_ENDIAN 0
+#elif (defined(__BYTE_ORDER) && defined(__BIG_ENDIAN) && \
+ __BYTE_ORDER == __BIG_ENDIAN) || \
+ (defined(sparc) || defined(POWERPC) || defined(mc68000) || defined(sel))
+# define HASH_LITTLE_ENDIAN 0
+# define HASH_BIG_ENDIAN 1
+#else
+# define HASH_LITTLE_ENDIAN 0
+# define HASH_BIG_ENDIAN 0
+#endif
+
+#define hashsize(n) ((uint32_t)1<<(n))
+#define hashmask(n) (hashsize(n)-1)
+#define rot(x,k) (((x)<<(k)) | ((x)>>(32-(k))))
+
+/*
+-------------------------------------------------------------------------------
+mix -- mix 3 32-bit values reversibly.
+
+This is reversible, so any information in (a,b,c) before mix() is
+still in (a,b,c) after mix().
+
+If four pairs of (a,b,c) inputs are run through mix(), or through
+mix() in reverse, there are at least 32 bits of the output that
+are sometimes the same for one pair and different for another pair.
+This was tested for:
+* pairs that differed by one bit, by two bits, in any combination
+ of top bits of (a,b,c), or in any combination of bottom bits of
+ (a,b,c).
+* "differ" is defined as +, -, ^, or ~^. For + and -, I transformed
+ the output delta to a Gray code (a^(a>>1)) so a string of 1's (as
+ is commonly produced by subtraction) look like a single 1-bit
+ difference.
+* the base values were pseudorandom, all zero but one bit set, or
+ all zero plus a counter that starts at zero.
+
+Some k values for my "a-=c; a^=rot(c,k); c+=b;" arrangement that
+satisfy this are
+ 4 6 8 16 19 4
+ 9 15 3 18 27 15
+ 14 9 3 7 17 3
+Well, "9 15 3 18 27 15" didn't quite get 32 bits diffing
+for "differ" defined as + with a one-bit base and a two-bit delta. I
+used http://burtleburtle.net/bob/hash/avalanche.html to choose
+the operations, constants, and arrangements of the variables.
+
+This does not achieve avalanche. There are input bits of (a,b,c)
+that fail to affect some output bits of (a,b,c), especially of a. The
+most thoroughly mixed value is c, but it doesn't really even achieve
+avalanche in c.
+
+This allows some parallelism. Read-after-writes are good at doubling
+the number of bits affected, so the goal of mixing pulls in the opposite
+direction as the goal of parallelism. I did what I could. Rotates
+seem to cost as much as shifts on every machine I could lay my hands
+on, and rotates are much kinder to the top and bottom bits, so I used
+rotates.
+-------------------------------------------------------------------------------
+*/
+#define mix(a,b,c) \
+{ \
+ a -= c; a ^= rot(c, 4); c += b; \
+ b -= a; b ^= rot(a, 6); a += c; \
+ c -= b; c ^= rot(b, 8); b += a; \
+ a -= c; a ^= rot(c,16); c += b; \
+ b -= a; b ^= rot(a,19); a += c; \
+ c -= b; c ^= rot(b, 4); b += a; \
+}
+
+/*
+-------------------------------------------------------------------------------
+final -- final mixing of 3 32-bit values (a,b,c) into c
+
+Pairs of (a,b,c) values differing in only a few bits will usually
+produce values of c that look totally different. This was tested for
+* pairs that differed by one bit, by two bits, in any combination
+ of top bits of (a,b,c), or in any combination of bottom bits of
+ (a,b,c).
+* "differ" is defined as +, -, ^, or ~^. For + and -, I transformed
+ the output delta to a Gray code (a^(a>>1)) so a string of 1's (as
+ is commonly produced by subtraction) look like a single 1-bit
+ difference.
+* the base values were pseudorandom, all zero but one bit set, or
+ all zero plus a counter that starts at zero.
+
+These constants passed:
+ 14 11 25 16 4 14 24
+ 12 14 25 16 4 14 24
+and these came close:
+ 4 8 15 26 3 22 24
+ 10 8 15 26 3 22 24
+ 11 8 15 26 3 22 24
+-------------------------------------------------------------------------------
+*/
+#define final(a,b,c) \
+{ \
+ c ^= b; c -= rot(b,14); \
+ a ^= c; a -= rot(c,11); \
+ b ^= a; b -= rot(a,25); \
+ c ^= b; c -= rot(b,16); \
+ a ^= c; a -= rot(c,4); \
+ b ^= a; b -= rot(a,14); \
+ c ^= b; c -= rot(b,24); \
+}
+
+/*
+--------------------------------------------------------------------
+ This works on all machines. To be useful, it requires
+ -- that the key be an array of uint32_t's, and
+ -- that the length be the number of uint32_t's in the key
+
+ The function hashword() is identical to hashlittle() on little-endian
+ machines, and identical to hashbig() on big-endian machines,
+ except that the length has to be measured in uint32_ts rather than in
+ bytes. hashlittle() is more complicated than hashword() only because
+ hashlittle() has to dance around fitting the key bytes into registers.
+--------------------------------------------------------------------
+*/
+uint32_t jenkins_hashword(
+const uint32_t *k, /* the key, an array of uint32_t values */
+size_t length, /* the length of the key, in uint32_ts */
+uint32_t initval) /* the previous hash, or an arbitrary value */
+{
+ uint32_t a,b,c;
+
+ /* Set up the internal state */
+ a = b = c = 0xdeadbeef + (((uint32_t)length)<<2) + initval;
+
+ /*------------------------------------------------- handle most of the key */
+ while (length > 3)
+ {
+ a += k[0];
+ b += k[1];
+ c += k[2];
+ mix(a,b,c);
+ length -= 3;
+ k += 3;
+ }
+
+ /*------------------------------------------- handle the last 3 uint32_t's */
+ switch(length) /* all the case statements fall through */
+ {
+ case 3 : c+=k[2];
+ case 2 : b+=k[1];
+ case 1 : a+=k[0];
+ final(a,b,c);
+ case 0: /* case 0: nothing left to add */
+ break;
+ }
+ /*------------------------------------------------------ report the result */
+ return c;
+}
+
+/*
+--------------------------------------------------------------------
+hashword2() -- same as hashword(), but take two seeds and return two
+32-bit values. pc and pb must both be nonnull, and *pc and *pb must
+both be initialized with seeds. If you pass in (*pb)==0, the output
+(*pc) will be the same as the return value from hashword().
+--------------------------------------------------------------------
+*/
+void jenkins_hashword2 (
+const uint32_t *k, /* the key, an array of uint32_t values */
+size_t length, /* the length of the key, in uint32_ts */
+uint32_t *pc, /* IN: seed OUT: primary hash value */
+uint32_t *pb) /* IN: more seed OUT: secondary hash value */
+{
+ uint32_t a,b,c;
+
+ /* Set up the internal state */
+ a = b = c = 0xdeadbeef + ((uint32_t)(length<<2)) + *pc;
+ c += *pb;
+
+ /*------------------------------------------------- handle most of the key */
+ while (length > 3)
+ {
+ a += k[0];
+ b += k[1];
+ c += k[2];
+ mix(a,b,c);
+ length -= 3;
+ k += 3;
+ }
+
+ /*------------------------------------------- handle the last 3 uint32_t's */
+ switch(length) /* all the case statements fall through */
+ {
+ case 3 : c+=k[2];
+ case 2 : b+=k[1];
+ case 1 : a+=k[0];
+ final(a,b,c);
+ case 0: /* case 0: nothing left to add */
+ break;
+ }
+ /*------------------------------------------------------ report the result */
+ *pc=c; *pb=b;
+}
+
+/*
+-------------------------------------------------------------------------------
+hashlittle() -- hash a variable-length key into a 32-bit value
+ k : the key (the unaligned variable-length array of bytes)
+ length : the length of the key, counting by bytes
+ initval : can be any 4-byte value
+Returns a 32-bit value. Every bit of the key affects every bit of
+the return value. Two keys differing by one or two bits will have
+totally different hash values.
+
+The best hash table sizes are powers of 2. There is no need to do
+mod a prime (mod is sooo slow!). If you need less than 32 bits,
+use a bitmask. For example, if you need only 10 bits, do
+ h = (h & hashmask(10));
+In which case, the hash table should have hashsize(10) elements.
+
+If you are hashing n strings (uint8_t **)k, do it like this:
+ for (i=0, h=0; i<n; ++i) h = hashlittle( k[i], len[i], h);
+
+By Bob Jenkins, 2006. bob_jenkins@burtleburtle.net. You may use this
+code any way you wish, private, educational, or commercial. It's free.
+
+Use for hash table lookup, or anything where one collision in 2^^32 is
+acceptable. Do NOT use for cryptographic purposes.
+-------------------------------------------------------------------------------
+*/
+
+uint32_t jenkins_hashlittle( const void *key, size_t length, uint32_t initval)
+{
+ uint32_t a,b,c; /* internal state */
+ union { const void *ptr; size_t i; } u; /* needed for Mac Powerbook G4 */
+
+ /* Set up the internal state */
+ a = b = c = 0xdeadbeef + ((uint32_t)length) + initval;
+
+ u.ptr = key;
+ if (HASH_LITTLE_ENDIAN && ((u.i & 0x3) == 0)) {
+ const uint32_t *k = (const uint32_t *)key; /* read 32-bit chunks */
+
+ /*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */
+ while (length > 12)
+ {
+ a += k[0];
+ b += k[1];
+ c += k[2];
+ mix(a,b,c);
+ length -= 12;
+ k += 3;
+ }
+
+ /*----------------------------- handle the last (probably partial) block */
+ /*
+ * "k[2]&0xffffff" actually reads beyond the end of the string, but
+ * then masks off the part it's not allowed to read. Because the
+ * string is aligned, the masked-off tail is in the same word as the
+ * rest of the string. Every machine with memory protection I've seen
+ * does it on word boundaries, so is OK with this. But valgrind will
+ * still catch it and complain. The masking trick does make the hash
+ * noticeably faster for short strings (like English words).
+ */
+#if !VALGRIND && !HAS_FEATURE_ADDRESS_SANITIZER
+
+ switch(length)
+ {
+ case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
+ case 11: c+=k[2]&0xffffff; b+=k[1]; a+=k[0]; break;
+ case 10: c+=k[2]&0xffff; b+=k[1]; a+=k[0]; break;
+ case 9 : c+=k[2]&0xff; b+=k[1]; a+=k[0]; break;
+ case 8 : b+=k[1]; a+=k[0]; break;
+ case 7 : b+=k[1]&0xffffff; a+=k[0]; break;
+ case 6 : b+=k[1]&0xffff; a+=k[0]; break;
+ case 5 : b+=k[1]&0xff; a+=k[0]; break;
+ case 4 : a+=k[0]; break;
+ case 3 : a+=k[0]&0xffffff; break;
+ case 2 : a+=k[0]&0xffff; break;
+ case 1 : a+=k[0]&0xff; break;
+ case 0 : return c; /* zero length strings require no mixing */
+ }
+
+#else /* make valgrind happy */
+ {
+ const uint8_t *k8 = (const uint8_t *) k;
+
+ switch(length)
+ {
+ case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
+ case 11: c+=((uint32_t)k8[10])<<16; /* fall through */
+ case 10: c+=((uint32_t)k8[9])<<8; /* fall through */
+ case 9 : c+=k8[8]; /* fall through */
+ case 8 : b+=k[1]; a+=k[0]; break;
+ case 7 : b+=((uint32_t)k8[6])<<16; /* fall through */
+ case 6 : b+=((uint32_t)k8[5])<<8; /* fall through */
+ case 5 : b+=k8[4]; /* fall through */
+ case 4 : a+=k[0]; break;
+ case 3 : a+=((uint32_t)k8[2])<<16; /* fall through */
+ case 2 : a+=((uint32_t)k8[1])<<8; /* fall through */
+ case 1 : a+=k8[0]; break;
+ case 0 : return c;
+ }
+ }
+
+#endif /* !valgrind */
+
+ } else if (HASH_LITTLE_ENDIAN && ((u.i & 0x1) == 0)) {
+ const uint16_t *k = (const uint16_t *)key; /* read 16-bit chunks */
+ const uint8_t *k8;
+
+ /*--------------- all but last block: aligned reads and different mixing */
+ while (length > 12)
+ {
+ a += k[0] + (((uint32_t)k[1])<<16);
+ b += k[2] + (((uint32_t)k[3])<<16);
+ c += k[4] + (((uint32_t)k[5])<<16);
+ mix(a,b,c);
+ length -= 12;
+ k += 6;
+ }
+
+ /*----------------------------- handle the last (probably partial) block */
+ k8 = (const uint8_t *)k;
+ switch(length)
+ {
+ case 12: c+=k[4]+(((uint32_t)k[5])<<16);
+ b+=k[2]+(((uint32_t)k[3])<<16);
+ a+=k[0]+(((uint32_t)k[1])<<16);
+ break;
+ case 11: c+=((uint32_t)k8[10])<<16; /* fall through */
+ case 10: c+=k[4];
+ b+=k[2]+(((uint32_t)k[3])<<16);
+ a+=k[0]+(((uint32_t)k[1])<<16);
+ break;
+ case 9 : c+=k8[8]; /* fall through */
+ case 8 : b+=k[2]+(((uint32_t)k[3])<<16);
+ a+=k[0]+(((uint32_t)k[1])<<16);
+ break;
+ case 7 : b+=((uint32_t)k8[6])<<16; /* fall through */
+ case 6 : b+=k[2];
+ a+=k[0]+(((uint32_t)k[1])<<16);
+ break;
+ case 5 : b+=k8[4]; /* fall through */
+ case 4 : a+=k[0]+(((uint32_t)k[1])<<16);
+ break;
+ case 3 : a+=((uint32_t)k8[2])<<16; /* fall through */
+ case 2 : a+=k[0];
+ break;
+ case 1 : a+=k8[0];
+ break;
+ case 0 : return c; /* zero length requires no mixing */
+ }
+
+ } else { /* need to read the key one byte at a time */
+ const uint8_t *k = (const uint8_t *)key;
+
+ /*--------------- all but the last block: affect some 32 bits of (a,b,c) */
+ while (length > 12)
+ {
+ a += k[0];
+ a += ((uint32_t)k[1])<<8;
+ a += ((uint32_t)k[2])<<16;
+ a += ((uint32_t)k[3])<<24;
+ b += k[4];
+ b += ((uint32_t)k[5])<<8;
+ b += ((uint32_t)k[6])<<16;
+ b += ((uint32_t)k[7])<<24;
+ c += k[8];
+ c += ((uint32_t)k[9])<<8;
+ c += ((uint32_t)k[10])<<16;
+ c += ((uint32_t)k[11])<<24;
+ mix(a,b,c);
+ length -= 12;
+ k += 12;
+ }
+
+ /*-------------------------------- last block: affect all 32 bits of (c) */
+ switch(length) /* all the case statements fall through */
+ {
+ case 12: c+=((uint32_t)k[11])<<24;
+ case 11: c+=((uint32_t)k[10])<<16;
+ case 10: c+=((uint32_t)k[9])<<8;
+ case 9 : c+=k[8];
+ case 8 : b+=((uint32_t)k[7])<<24;
+ case 7 : b+=((uint32_t)k[6])<<16;
+ case 6 : b+=((uint32_t)k[5])<<8;
+ case 5 : b+=k[4];
+ case 4 : a+=((uint32_t)k[3])<<24;
+ case 3 : a+=((uint32_t)k[2])<<16;
+ case 2 : a+=((uint32_t)k[1])<<8;
+ case 1 : a+=k[0];
+ break;
+ case 0 : return c;
+ }
+ }
+
+ final(a,b,c);
+ return c;
+}
+
+/*
+ * hashlittle2: return 2 32-bit hash values
+ *
+ * This is identical to hashlittle(), except it returns two 32-bit hash
+ * values instead of just one. This is good enough for hash table
+ * lookup with 2^^64 buckets, or if you want a second hash if you're not
+ * happy with the first, or if you want a probably-unique 64-bit ID for
+ * the key. *pc is better mixed than *pb, so use *pc first. If you want
+ * a 64-bit value do something like "*pc + (((uint64_t)*pb)<<32)".
+ */
+void jenkins_hashlittle2(
+ const void *key, /* the key to hash */
+ size_t length, /* length of the key */
+ uint32_t *pc, /* IN: primary initval, OUT: primary hash */
+ uint32_t *pb) /* IN: secondary initval, OUT: secondary hash */
+{
+ uint32_t a,b,c; /* internal state */
+ union { const void *ptr; size_t i; } u; /* needed for Mac Powerbook G4 */
+
+ /* Set up the internal state */
+ a = b = c = 0xdeadbeef + ((uint32_t)length) + *pc;
+ c += *pb;
+
+ u.ptr = key;
+ if (HASH_LITTLE_ENDIAN && ((u.i & 0x3) == 0)) {
+ const uint32_t *k = (const uint32_t *)key; /* read 32-bit chunks */
+
+ /*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */
+ while (length > 12)
+ {
+ a += k[0];
+ b += k[1];
+ c += k[2];
+ mix(a,b,c);
+ length -= 12;
+ k += 3;
+ }
+
+ /*----------------------------- handle the last (probably partial) block */
+ /*
+ * "k[2]&0xffffff" actually reads beyond the end of the string, but
+ * then masks off the part it's not allowed to read. Because the
+ * string is aligned, the masked-off tail is in the same word as the
+ * rest of the string. Every machine with memory protection I've seen
+ * does it on word boundaries, so is OK with this. But valgrind will
+ * still catch it and complain. The masking trick does make the hash
+ * noticeably faster for short strings (like English words).
+ */
+#if !VALGRIND && !HAS_FEATURE_ADDRESS_SANITIZER
+
+ switch(length)
+ {
+ case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
+ case 11: c+=k[2]&0xffffff; b+=k[1]; a+=k[0]; break;
+ case 10: c+=k[2]&0xffff; b+=k[1]; a+=k[0]; break;
+ case 9 : c+=k[2]&0xff; b+=k[1]; a+=k[0]; break;
+ case 8 : b+=k[1]; a+=k[0]; break;
+ case 7 : b+=k[1]&0xffffff; a+=k[0]; break;
+ case 6 : b+=k[1]&0xffff; a+=k[0]; break;
+ case 5 : b+=k[1]&0xff; a+=k[0]; break;
+ case 4 : a+=k[0]; break;
+ case 3 : a+=k[0]&0xffffff; break;
+ case 2 : a+=k[0]&0xffff; break;
+ case 1 : a+=k[0]&0xff; break;
+ case 0 : *pc=c; *pb=b; return; /* zero length strings require no mixing */
+ }
+
+#else /* make valgrind happy */
+
+ {
+ const uint8_t *k8 = (const uint8_t *)k;
+ switch(length)
+ {
+ case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
+ case 11: c+=((uint32_t)k8[10])<<16; /* fall through */
+ case 10: c+=((uint32_t)k8[9])<<8; /* fall through */
+ case 9 : c+=k8[8]; /* fall through */
+ case 8 : b+=k[1]; a+=k[0]; break;
+ case 7 : b+=((uint32_t)k8[6])<<16; /* fall through */
+ case 6 : b+=((uint32_t)k8[5])<<8; /* fall through */
+ case 5 : b+=k8[4]; /* fall through */
+ case 4 : a+=k[0]; break;
+ case 3 : a+=((uint32_t)k8[2])<<16; /* fall through */
+ case 2 : a+=((uint32_t)k8[1])<<8; /* fall through */
+ case 1 : a+=k8[0]; break;
+ case 0 : *pc=c; *pb=b; return; /* zero length strings require no mixing */
+ }
+ }
+
+#endif /* !valgrind */
+
+ } else if (HASH_LITTLE_ENDIAN && ((u.i & 0x1) == 0)) {
+ const uint16_t *k = (const uint16_t *)key; /* read 16-bit chunks */
+ const uint8_t *k8;
+
+ /*--------------- all but last block: aligned reads and different mixing */
+ while (length > 12)
+ {
+ a += k[0] + (((uint32_t)k[1])<<16);
+ b += k[2] + (((uint32_t)k[3])<<16);
+ c += k[4] + (((uint32_t)k[5])<<16);
+ mix(a,b,c);
+ length -= 12;
+ k += 6;
+ }
+
+ /*----------------------------- handle the last (probably partial) block */
+ k8 = (const uint8_t *)k;
+ switch(length)
+ {
+ case 12: c+=k[4]+(((uint32_t)k[5])<<16);
+ b+=k[2]+(((uint32_t)k[3])<<16);
+ a+=k[0]+(((uint32_t)k[1])<<16);
+ break;
+ case 11: c+=((uint32_t)k8[10])<<16; /* fall through */
+ case 10: c+=k[4];
+ b+=k[2]+(((uint32_t)k[3])<<16);
+ a+=k[0]+(((uint32_t)k[1])<<16);
+ break;
+ case 9 : c+=k8[8]; /* fall through */
+ case 8 : b+=k[2]+(((uint32_t)k[3])<<16);
+ a+=k[0]+(((uint32_t)k[1])<<16);
+ break;
+ case 7 : b+=((uint32_t)k8[6])<<16; /* fall through */
+ case 6 : b+=k[2];
+ a+=k[0]+(((uint32_t)k[1])<<16);
+ break;
+ case 5 : b+=k8[4]; /* fall through */
+ case 4 : a+=k[0]+(((uint32_t)k[1])<<16);
+ break;
+ case 3 : a+=((uint32_t)k8[2])<<16; /* fall through */
+ case 2 : a+=k[0];
+ break;
+ case 1 : a+=k8[0];
+ break;
+ case 0 : *pc=c; *pb=b; return; /* zero length strings require no mixing */
+ }
+
+ } else { /* need to read the key one byte at a time */
+ const uint8_t *k = (const uint8_t *)key;
+
+ /*--------------- all but the last block: affect some 32 bits of (a,b,c) */
+ while (length > 12)
+ {
+ a += k[0];
+ a += ((uint32_t)k[1])<<8;
+ a += ((uint32_t)k[2])<<16;
+ a += ((uint32_t)k[3])<<24;
+ b += k[4];
+ b += ((uint32_t)k[5])<<8;
+ b += ((uint32_t)k[6])<<16;
+ b += ((uint32_t)k[7])<<24;
+ c += k[8];
+ c += ((uint32_t)k[9])<<8;
+ c += ((uint32_t)k[10])<<16;
+ c += ((uint32_t)k[11])<<24;
+ mix(a,b,c);
+ length -= 12;
+ k += 12;
+ }
+
+ /*-------------------------------- last block: affect all 32 bits of (c) */
+ switch(length) /* all the case statements fall through */
+ {
+ case 12: c+=((uint32_t)k[11])<<24;
+ case 11: c+=((uint32_t)k[10])<<16;
+ case 10: c+=((uint32_t)k[9])<<8;
+ case 9 : c+=k[8];
+ case 8 : b+=((uint32_t)k[7])<<24;
+ case 7 : b+=((uint32_t)k[6])<<16;
+ case 6 : b+=((uint32_t)k[5])<<8;
+ case 5 : b+=k[4];
+ case 4 : a+=((uint32_t)k[3])<<24;
+ case 3 : a+=((uint32_t)k[2])<<16;
+ case 2 : a+=((uint32_t)k[1])<<8;
+ case 1 : a+=k[0];
+ break;
+ case 0 : *pc=c; *pb=b; return; /* zero length strings require no mixing */
+ }
+ }
+
+ final(a,b,c);
+ *pc=c; *pb=b;
+}
+
+/*
+ * hashbig():
+ * This is the same as hashword() on big-endian machines. It is different
+ * from hashlittle() on all machines. hashbig() takes advantage of
+ * big-endian byte ordering.
+ */
+uint32_t jenkins_hashbig( const void *key, size_t length, uint32_t initval)
+{
+ uint32_t a,b,c;
+ union { const void *ptr; size_t i; } u; /* to cast key to (size_t) happily */
+
+ /* Set up the internal state */
+ a = b = c = 0xdeadbeef + ((uint32_t)length) + initval;
+
+ u.ptr = key;
+ if (HASH_BIG_ENDIAN && ((u.i & 0x3) == 0)) {
+ const uint32_t *k = (const uint32_t *)key; /* read 32-bit chunks */
+
+ /*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */
+ while (length > 12)
+ {
+ a += k[0];
+ b += k[1];
+ c += k[2];
+ mix(a,b,c);
+ length -= 12;
+ k += 3;
+ }
+
+ /*----------------------------- handle the last (probably partial) block */
+ /*
+ * "k[2]<<8" actually reads beyond the end of the string, but
+ * then shifts out the part it's not allowed to read. Because the
+ * string is aligned, the illegal read is in the same word as the
+ * rest of the string. Every machine with memory protection I've seen
+ * does it on word boundaries, so is OK with this. But valgrind will
+ * still catch it and complain. The masking trick does make the hash
+ * noticeably faster for short strings (like English words).
+ */
+#if !VALGRIND && !HAS_FEATURE_ADDRESS_SANITIZER
+
+ switch(length)
+ {
+ case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
+ case 11: c+=k[2]&0xffffff00; b+=k[1]; a+=k[0]; break;
+ case 10: c+=k[2]&0xffff0000; b+=k[1]; a+=k[0]; break;
+ case 9 : c+=k[2]&0xff000000; b+=k[1]; a+=k[0]; break;
+ case 8 : b+=k[1]; a+=k[0]; break;
+ case 7 : b+=k[1]&0xffffff00; a+=k[0]; break;
+ case 6 : b+=k[1]&0xffff0000; a+=k[0]; break;
+ case 5 : b+=k[1]&0xff000000; a+=k[0]; break;
+ case 4 : a+=k[0]; break;
+ case 3 : a+=k[0]&0xffffff00; break;
+ case 2 : a+=k[0]&0xffff0000; break;
+ case 1 : a+=k[0]&0xff000000; break;
+ case 0 : return c; /* zero length strings require no mixing */
+ }
+
+#else /* make valgrind happy */
+
+ {
+ const uint8_t *k8 = (const uint8_t *)k;
+ switch(length) /* all the case statements fall through */
+ {
+ case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
+ case 11: c+=((uint32_t)k8[10])<<8; /* fall through */
+ case 10: c+=((uint32_t)k8[9])<<16; /* fall through */
+ case 9 : c+=((uint32_t)k8[8])<<24; /* fall through */
+ case 8 : b+=k[1]; a+=k[0]; break;
+ case 7 : b+=((uint32_t)k8[6])<<8; /* fall through */
+ case 6 : b+=((uint32_t)k8[5])<<16; /* fall through */
+ case 5 : b+=((uint32_t)k8[4])<<24; /* fall through */
+ case 4 : a+=k[0]; break;
+ case 3 : a+=((uint32_t)k8[2])<<8; /* fall through */
+ case 2 : a+=((uint32_t)k8[1])<<16; /* fall through */
+ case 1 : a+=((uint32_t)k8[0])<<24; break;
+ case 0 : return c;
+ }
+ }
+
+#endif /* !VALGRIND */
+
+ } else { /* need to read the key one byte at a time */
+ const uint8_t *k = (const uint8_t *)key;
+
+ /*--------------- all but the last block: affect some 32 bits of (a,b,c) */
+ while (length > 12)
+ {
+ a += ((uint32_t)k[0])<<24;
+ a += ((uint32_t)k[1])<<16;
+ a += ((uint32_t)k[2])<<8;
+ a += ((uint32_t)k[3]);
+ b += ((uint32_t)k[4])<<24;
+ b += ((uint32_t)k[5])<<16;
+ b += ((uint32_t)k[6])<<8;
+ b += ((uint32_t)k[7]);
+ c += ((uint32_t)k[8])<<24;
+ c += ((uint32_t)k[9])<<16;
+ c += ((uint32_t)k[10])<<8;
+ c += ((uint32_t)k[11]);
+ mix(a,b,c);
+ length -= 12;
+ k += 12;
+ }
+
+ /*-------------------------------- last block: affect all 32 bits of (c) */
+ switch(length) /* all the case statements fall through */
+ {
+ case 12: c+=k[11];
+ case 11: c+=((uint32_t)k[10])<<8;
+ case 10: c+=((uint32_t)k[9])<<16;
+ case 9 : c+=((uint32_t)k[8])<<24;
+ case 8 : b+=k[7];
+ case 7 : b+=((uint32_t)k[6])<<8;
+ case 6 : b+=((uint32_t)k[5])<<16;
+ case 5 : b+=((uint32_t)k[4])<<24;
+ case 4 : a+=k[3];
+ case 3 : a+=((uint32_t)k[2])<<8;
+ case 2 : a+=((uint32_t)k[1])<<16;
+ case 1 : a+=((uint32_t)k[0])<<24;
+ break;
+ case 0 : return c;
+ }
+ }
+
+ final(a,b,c);
+ return c;
+}
+
+#ifdef SELF_TEST
+
+/* used for timings */
+void driver1()
+{
+ uint8_t buf[256];
+ uint32_t i;
+ uint32_t h=0;
+ time_t a,z;
+
+ time(&a);
+ for (i=0; i<256; ++i) buf[i] = 'x';
+ for (i=0; i<1; ++i)
+ {
+ h = hashlittle(&buf[0],1,h);
+ }
+ time(&z);
+ if (z-a > 0) printf("time %d %.8x\n", z-a, h);
+}
+
+/* check that every input bit changes every output bit half the time */
+#define HASHSTATE 1
+#define HASHLEN 1
+#define MAXPAIR 60
+#define MAXLEN 70
+void driver2()
+{
+ uint8_t qa[MAXLEN+1], qb[MAXLEN+2], *a = &qa[0], *b = &qb[1];
+ uint32_t c[HASHSTATE], d[HASHSTATE], i=0, j=0, k, l, m=0, z;
+ uint32_t e[HASHSTATE],f[HASHSTATE],g[HASHSTATE],h[HASHSTATE];
+ uint32_t x[HASHSTATE],y[HASHSTATE];
+ uint32_t hlen;
+
+ printf("No more than %d trials should ever be needed \n",MAXPAIR/2);
+ for (hlen=0; hlen < MAXLEN; ++hlen)
+ {
+ z=0;
+ for (i=0; i<hlen; ++i) /*----------------------- for each input byte, */
+ {
+ for (j=0; j<8; ++j) /*------------------------ for each input bit, */
+ {
+ for (m=1; m<8; ++m) /*------------ for serveral possible initvals, */
+ {
+ for (l=0; l<HASHSTATE; ++l)
+ e[l]=f[l]=g[l]=h[l]=x[l]=y[l]=~((uint32_t)0);
+
+ /*---- check that every output bit is affected by that input bit */
+ for (k=0; k<MAXPAIR; k+=2)
+ {
+ uint32_t finished=1;
+ /* keys have one bit different */
+ for (l=0; l<hlen+1; ++l) {a[l] = b[l] = (uint8_t)0;}
+ /* have a and b be two keys differing in only one bit */
+ a[i] ^= (k<<j);
+ a[i] ^= (k>>(8-j));
+ c[0] = hashlittle(a, hlen, m);
+ b[i] ^= ((k+1)<<j);
+ b[i] ^= ((k+1)>>(8-j));
+ d[0] = hashlittle(b, hlen, m);
+ /* check every bit is 1, 0, set, and not set at least once */
+ for (l=0; l<HASHSTATE; ++l)
+ {
+ e[l] &= (c[l]^d[l]);
+ f[l] &= ~(c[l]^d[l]);
+ g[l] &= c[l];
+ h[l] &= ~c[l];
+ x[l] &= d[l];
+ y[l] &= ~d[l];
+ if (e[l]|f[l]|g[l]|h[l]|x[l]|y[l]) finished=0;
+ }
+ if (finished) break;
+ }
+ if (k>z) z=k;
+ if (k==MAXPAIR)
+ {
+ printf("Some bit didn't change: ");
+ printf("%.8x %.8x %.8x %.8x %.8x %.8x ",
+ e[0],f[0],g[0],h[0],x[0],y[0]);
+ printf("i %d j %d m %d len %d\n", i, j, m, hlen);
+ }
+ if (z==MAXPAIR) goto done;
+ }
+ }
+ }
+ done:
+ if (z < MAXPAIR)
+ {
+ printf("Mix success %2d bytes %2d initvals ",i,m);
+ printf("required %d trials\n", z/2);
+ }
+ }
+ printf("\n");
+}
+
+/* Check for reading beyond the end of the buffer and alignment problems */
+void driver3()
+{
+ uint8_t buf[MAXLEN+20], *b;
+ uint32_t len;
+ uint8_t q[] = "This is the time for all good men to come to the aid of their country...";
+ uint32_t h;
+ uint8_t qq[] = "xThis is the time for all good men to come to the aid of their country...";
+ uint32_t i;
+ uint8_t qqq[] = "xxThis is the time for all good men to come to the aid of their country...";
+ uint32_t j;
+ uint8_t qqqq[] = "xxxThis is the time for all good men to come to the aid of their country...";
+ uint32_t ref,x,y;
+ uint8_t *p;
+
+ printf("Endianness. These lines should all be the same (for values filled in):\n");
+ printf("%.8x %.8x %.8x\n",
+ hashword((const uint32_t *)q, (sizeof(q)-1)/4, 13),
+ hashword((const uint32_t *)q, (sizeof(q)-5)/4, 13),
+ hashword((const uint32_t *)q, (sizeof(q)-9)/4, 13));
+ p = q;
+ printf("%.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x\n",
+ hashlittle(p, sizeof(q)-1, 13), hashlittle(p, sizeof(q)-2, 13),
+ hashlittle(p, sizeof(q)-3, 13), hashlittle(p, sizeof(q)-4, 13),
+ hashlittle(p, sizeof(q)-5, 13), hashlittle(p, sizeof(q)-6, 13),
+ hashlittle(p, sizeof(q)-7, 13), hashlittle(p, sizeof(q)-8, 13),
+ hashlittle(p, sizeof(q)-9, 13), hashlittle(p, sizeof(q)-10, 13),
+ hashlittle(p, sizeof(q)-11, 13), hashlittle(p, sizeof(q)-12, 13));
+ p = &qq[1];
+ printf("%.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x\n",
+ hashlittle(p, sizeof(q)-1, 13), hashlittle(p, sizeof(q)-2, 13),
+ hashlittle(p, sizeof(q)-3, 13), hashlittle(p, sizeof(q)-4, 13),
+ hashlittle(p, sizeof(q)-5, 13), hashlittle(p, sizeof(q)-6, 13),
+ hashlittle(p, sizeof(q)-7, 13), hashlittle(p, sizeof(q)-8, 13),
+ hashlittle(p, sizeof(q)-9, 13), hashlittle(p, sizeof(q)-10, 13),
+ hashlittle(p, sizeof(q)-11, 13), hashlittle(p, sizeof(q)-12, 13));
+ p = &qqq[2];
+ printf("%.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x\n",
+ hashlittle(p, sizeof(q)-1, 13), hashlittle(p, sizeof(q)-2, 13),
+ hashlittle(p, sizeof(q)-3, 13), hashlittle(p, sizeof(q)-4, 13),
+ hashlittle(p, sizeof(q)-5, 13), hashlittle(p, sizeof(q)-6, 13),
+ hashlittle(p, sizeof(q)-7, 13), hashlittle(p, sizeof(q)-8, 13),
+ hashlittle(p, sizeof(q)-9, 13), hashlittle(p, sizeof(q)-10, 13),
+ hashlittle(p, sizeof(q)-11, 13), hashlittle(p, sizeof(q)-12, 13));
+ p = &qqqq[3];
+ printf("%.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x %.8x\n",
+ hashlittle(p, sizeof(q)-1, 13), hashlittle(p, sizeof(q)-2, 13),
+ hashlittle(p, sizeof(q)-3, 13), hashlittle(p, sizeof(q)-4, 13),
+ hashlittle(p, sizeof(q)-5, 13), hashlittle(p, sizeof(q)-6, 13),
+ hashlittle(p, sizeof(q)-7, 13), hashlittle(p, sizeof(q)-8, 13),
+ hashlittle(p, sizeof(q)-9, 13), hashlittle(p, sizeof(q)-10, 13),
+ hashlittle(p, sizeof(q)-11, 13), hashlittle(p, sizeof(q)-12, 13));
+ printf("\n");
+
+ /* check that hashlittle2 and hashlittle produce the same results */
+ i=47; j=0;
+ hashlittle2(q, sizeof(q), &i, &j);
+ if (hashlittle(q, sizeof(q), 47) != i)
+ printf("hashlittle2 and hashlittle mismatch\n");
+
+ /* check that hashword2 and hashword produce the same results */
+ len = 0xdeadbeef;
+ i=47, j=0;
+ hashword2(&len, 1, &i, &j);
+ if (hashword(&len, 1, 47) != i)
+ printf("hashword2 and hashword mismatch %x %x\n",
+ i, hashword(&len, 1, 47));
+
+ /* check hashlittle doesn't read before or after the ends of the string */
+ for (h=0, b=buf+1; h<8; ++h, ++b)
+ {
+ for (i=0; i<MAXLEN; ++i)
+ {
+ len = i;
+ for (j=0; j<i; ++j) *(b+j)=0;
+
+ /* these should all be equal */
+ ref = hashlittle(b, len, (uint32_t)1);
+ *(b+i)=(uint8_t)~0;
+ *(b-1)=(uint8_t)~0;
+ x = hashlittle(b, len, (uint32_t)1);
+ y = hashlittle(b, len, (uint32_t)1);
+ if ((ref != x) || (ref != y))
+ {
+ printf("alignment error: %.8x %.8x %.8x %d %d\n",ref,x,y,
+ h, i);
+ }
+ }
+ }
+}
+
+/* check for problems with nulls */
+ void driver4()
+{
+ uint8_t buf[1];
+ uint32_t h,i,state[HASHSTATE];
+
+ buf[0] = ~0;
+ for (i=0; i<HASHSTATE; ++i) state[i] = 1;
+ printf("These should all be different\n");
+ for (i=0, h=0; i<8; ++i)
+ {
+ h = hashlittle(buf, 0, h);
+ printf("%2ld 0-byte strings, hash is %.8x\n", i, h);
+ }
+}
+
+void driver5()
+{
+ uint32_t b,c;
+ b=0, c=0, hashlittle2("", 0, &c, &b);
+ printf("hash is %.8lx %.8lx\n", c, b); /* deadbeef deadbeef */
+ b=0xdeadbeef, c=0, hashlittle2("", 0, &c, &b);
+ printf("hash is %.8lx %.8lx\n", c, b); /* bd5b7dde deadbeef */
+ b=0xdeadbeef, c=0xdeadbeef, hashlittle2("", 0, &c, &b);
+ printf("hash is %.8lx %.8lx\n", c, b); /* 9c093ccd bd5b7dde */
+ b=0, c=0, hashlittle2("Four score and seven years ago", 30, &c, &b);
+ printf("hash is %.8lx %.8lx\n", c, b); /* 17770551 ce7226e6 */
+ b=1, c=0, hashlittle2("Four score and seven years ago", 30, &c, &b);
+ printf("hash is %.8lx %.8lx\n", c, b); /* e3607cae bd371de4 */
+ b=0, c=1, hashlittle2("Four score and seven years ago", 30, &c, &b);
+ printf("hash is %.8lx %.8lx\n", c, b); /* cd628161 6cbea4b3 */
+ c = hashlittle("Four score and seven years ago", 30, 0);
+ printf("hash is %.8lx\n", c); /* 17770551 */
+ c = hashlittle("Four score and seven years ago", 30, 1);
+ printf("hash is %.8lx\n", c); /* cd628161 */
+}
+
+int main()
+{
+ driver1(); /* test that the key is hashed: used for timings */
+ driver2(); /* test that whole key is hashed thoroughly */
+ driver3(); /* test that nothing but the key is hashed */
+ driver4(); /* test hashing multiple buffers (all buffers are null) */
+ driver5(); /* test the hash against known vectors */
+ return 1;
+}
+
+#endif /* SELF_TEST */
diff --git a/src/journal/lookup3.h b/src/journal/lookup3.h
new file mode 100644
index 0000000..787921f
--- /dev/null
+++ b/src/journal/lookup3.h
@@ -0,0 +1,22 @@
+#pragma once
+
+#include <inttypes.h>
+#include <sys/types.h>
+
+#include "macro.h"
+
+uint32_t jenkins_hashword(const uint32_t *k, size_t length, uint32_t initval) _pure_;
+void jenkins_hashword2(const uint32_t *k, size_t length, uint32_t *pc, uint32_t *pb);
+
+uint32_t jenkins_hashlittle(const void *key, size_t length, uint32_t initval) _pure_;
+void jenkins_hashlittle2(const void *key, size_t length, uint32_t *pc, uint32_t *pb);
+
+uint32_t jenkins_hashbig(const void *key, size_t length, uint32_t initval) _pure_;
+
+static inline uint64_t hash64(const void *data, size_t length) {
+ uint32_t a = 0, b = 0;
+
+ jenkins_hashlittle2(data, length, &a, &b);
+
+ return ((uint64_t) a << 32ULL) | (uint64_t) b;
+}
diff --git a/src/journal/meson.build b/src/journal/meson.build
new file mode 100644
index 0000000..e03d6dc
--- /dev/null
+++ b/src/journal/meson.build
@@ -0,0 +1,130 @@
+# SPDX-License-Identifier: LGPL-2.1+
+
+journal_client_sources = files('''
+ audit-type.c
+ audit-type.h
+ catalog.c
+ catalog.h
+ compress.c
+ compress.h
+ journal-def.h
+ journal-file.c
+ journal-file.h
+ journal-send.c
+ journal-vacuum.c
+ journal-vacuum.h
+ journal-verify.c
+ journal-verify.h
+ lookup3.c
+ lookup3.h
+ mmap-cache.c
+ mmap-cache.h
+ sd-journal.c
+'''.split())
+
+if conf.get('HAVE_GCRYPT') == 1
+ journal_client_sources += files('''
+ journal-authenticate.c
+ journal-authenticate.h
+ fsprg.c
+ fsprg.h
+ '''.split())
+endif
+
+############################################################
+
+audit_type_includes = [config_h,
+ missing_audit_h,
+ 'linux/audit.h']
+if conf.get('HAVE_AUDIT') == 1
+ audit_type_includes += 'libaudit.h'
+endif
+
+generate_audit_type_list = find_program('generate-audit_type-list.sh')
+audit_type_list_txt = custom_target(
+ 'audit_type-list.txt',
+ output : 'audit_type-list.txt',
+ command : [generate_audit_type_list, cpp] + audit_type_includes,
+ capture : true)
+
+audit_type_to_name = custom_target(
+ 'audit_type-to-name.h',
+ input : ['audit_type-to-name.awk', audit_type_list_txt],
+ output : 'audit_type-to-name.h',
+ command : [awk, '-f', '@INPUT0@', '@INPUT1@'],
+ capture : true)
+
+journal_client_sources += [audit_type_to_name]
+
+libjournal_client = static_library(
+ 'journal-client',
+ journal_client_sources,
+ include_directories : includes,
+ c_args : ['-fvisibility=default'])
+
+############################################################
+
+libjournal_core_sources = files('''
+ journald-audit.c
+ journald-audit.h
+ journald-console.c
+ journald-console.h
+ journald-context.c
+ journald-context.h
+ journald-kmsg.c
+ journald-kmsg.h
+ journald-native.c
+ journald-native.h
+ journald-rate-limit.c
+ journald-rate-limit.h
+ journald-server.c
+ journald-server.h
+ journald-stream.c
+ journald-stream.h
+ journald-syslog.c
+ journald-syslog.h
+ journald-wall.c
+ journald-wall.h
+ journal-internal.h
+'''.split())
+
+systemd_journald_sources = files('''
+ journald.c
+ journald-server.h
+'''.split())
+
+journald_gperf_c = custom_target(
+ 'journald-gperf.c',
+ input : 'journald-gperf.gperf',
+ output : 'journald-gperf.c',
+ command : [gperf, '@INPUT@', '--output-file', '@OUTPUT@'])
+
+systemd_cat_sources = files('cat.c')
+
+journalctl_sources = files('journalctl.c')
+
+if conf.get('HAVE_QRENCODE') == 1
+ journalctl_sources += files('journal-qrcode.c',
+ 'journal-qrcode.h')
+endif
+
+install_data('journald.conf',
+ install_dir : pkgsysconfdir)
+
+meson.add_install_script(
+ 'sh', '-c',
+ mkdir_p.format('/var/log/journal'))
+meson.add_install_script(
+ 'sh', '-c',
+ '''chown 0:0 $DESTDIR/var/log/journal &&
+ chmod 755 $DESTDIR/var/log/journal || :''')
+if get_option('adm-group')
+ meson.add_install_script(
+ 'sh', '-c',
+ 'setfacl -nm g:adm:rx,d:g:adm:rx $DESTDIR/var/log/journal || :')
+endif
+if get_option('wheel-group')
+ meson.add_install_script(
+ 'sh', '-c',
+ 'setfacl -nm g:wheel:rx,d:g:wheel:rx $DESTDIR/var/log/journal || :')
+endif
diff --git a/src/journal/mmap-cache.c b/src/journal/mmap-cache.c
new file mode 100644
index 0000000..0dc453e
--- /dev/null
+++ b/src/journal/mmap-cache.c
@@ -0,0 +1,668 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+
+#include <errno.h>
+#include <stdlib.h>
+#include <sys/mman.h>
+
+#include "alloc-util.h"
+#include "fd-util.h"
+#include "hashmap.h"
+#include "list.h"
+#include "log.h"
+#include "macro.h"
+#include "mmap-cache.h"
+#include "sigbus.h"
+#include "util.h"
+
+typedef struct Window Window;
+typedef struct Context Context;
+
+struct Window {
+ MMapCache *cache;
+
+ bool invalidated:1;
+ bool keep_always:1;
+ bool in_unused:1;
+
+ int prot;
+ void *ptr;
+ uint64_t offset;
+ size_t size;
+
+ MMapFileDescriptor *fd;
+
+ LIST_FIELDS(Window, by_fd);
+ LIST_FIELDS(Window, unused);
+
+ LIST_HEAD(Context, contexts);
+};
+
+struct Context {
+ MMapCache *cache;
+ unsigned id;
+ Window *window;
+
+ LIST_FIELDS(Context, by_window);
+};
+
+struct MMapFileDescriptor {
+ MMapCache *cache;
+ int fd;
+ bool sigbus;
+ LIST_HEAD(Window, windows);
+};
+
+struct MMapCache {
+ unsigned n_ref;
+ unsigned n_windows;
+
+ unsigned n_hit, n_missed;
+
+ Hashmap *fds;
+ Context *contexts[MMAP_CACHE_MAX_CONTEXTS];
+
+ LIST_HEAD(Window, unused);
+ Window *last_unused;
+};
+
+#define WINDOWS_MIN 64
+
+#if ENABLE_DEBUG_MMAP_CACHE
+/* Tiny windows increase mmap activity and the chance of exposing unsafe use. */
+# define WINDOW_SIZE (page_size())
+#else
+# define WINDOW_SIZE (8ULL*1024ULL*1024ULL)
+#endif
+
+MMapCache* mmap_cache_new(void) {
+ MMapCache *m;
+
+ m = new0(MMapCache, 1);
+ if (!m)
+ return NULL;
+
+ m->n_ref = 1;
+ return m;
+}
+
+static void window_unlink(Window *w) {
+ Context *c;
+
+ assert(w);
+
+ if (w->ptr)
+ munmap(w->ptr, w->size);
+
+ if (w->fd)
+ LIST_REMOVE(by_fd, w->fd->windows, w);
+
+ if (w->in_unused) {
+ if (w->cache->last_unused == w)
+ w->cache->last_unused = w->unused_prev;
+
+ LIST_REMOVE(unused, w->cache->unused, w);
+ }
+
+ LIST_FOREACH(by_window, c, w->contexts) {
+ assert(c->window == w);
+ c->window = NULL;
+ }
+}
+
+static void window_invalidate(Window *w) {
+ assert(w);
+
+ if (w->invalidated)
+ return;
+
+ /* Replace the window with anonymous pages. This is useful
+ * when we hit a SIGBUS and want to make sure the file cannot
+ * trigger any further SIGBUS, possibly overrunning the sigbus
+ * queue. */
+
+ assert_se(mmap(w->ptr, w->size, w->prot, MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED, -1, 0) == w->ptr);
+ w->invalidated = true;
+}
+
+static void window_free(Window *w) {
+ assert(w);
+
+ window_unlink(w);
+ w->cache->n_windows--;
+ free(w);
+}
+
+_pure_ static bool window_matches(Window *w, int prot, uint64_t offset, size_t size) {
+ assert(w);
+ assert(size > 0);
+
+ return
+ prot == w->prot &&
+ offset >= w->offset &&
+ offset + size <= w->offset + w->size;
+}
+
+_pure_ static bool window_matches_fd(Window *w, MMapFileDescriptor *f, int prot, uint64_t offset, size_t size) {
+ assert(w);
+ assert(f);
+
+ return
+ w->fd &&
+ f->fd == w->fd->fd &&
+ window_matches(w, prot, offset, size);
+}
+
+static Window *window_add(MMapCache *m, MMapFileDescriptor *f, int prot, bool keep_always, uint64_t offset, size_t size, void *ptr) {
+ Window *w;
+
+ assert(m);
+ assert(f);
+
+ if (!m->last_unused || m->n_windows <= WINDOWS_MIN) {
+
+ /* Allocate a new window */
+ w = new0(Window, 1);
+ if (!w)
+ return NULL;
+ m->n_windows++;
+ } else {
+
+ /* Reuse an existing one */
+ w = m->last_unused;
+ window_unlink(w);
+ zero(*w);
+ }
+
+ w->cache = m;
+ w->fd = f;
+ w->prot = prot;
+ w->keep_always = keep_always;
+ w->offset = offset;
+ w->size = size;
+ w->ptr = ptr;
+
+ LIST_PREPEND(by_fd, f->windows, w);
+
+ return w;
+}
+
+static void context_detach_window(Context *c) {
+ Window *w;
+
+ assert(c);
+
+ if (!c->window)
+ return;
+
+ w = TAKE_PTR(c->window);
+ LIST_REMOVE(by_window, w->contexts, c);
+
+ if (!w->contexts && !w->keep_always) {
+ /* Not used anymore? */
+#if ENABLE_DEBUG_MMAP_CACHE
+ /* Unmap unused windows immediately to expose use-after-unmap
+ * by SIGSEGV. */
+ window_free(w);
+#else
+ LIST_PREPEND(unused, c->cache->unused, w);
+ if (!c->cache->last_unused)
+ c->cache->last_unused = w;
+
+ w->in_unused = true;
+#endif
+ }
+}
+
+static void context_attach_window(Context *c, Window *w) {
+ assert(c);
+ assert(w);
+
+ if (c->window == w)
+ return;
+
+ context_detach_window(c);
+
+ if (w->in_unused) {
+ /* Used again? */
+ LIST_REMOVE(unused, c->cache->unused, w);
+ if (c->cache->last_unused == w)
+ c->cache->last_unused = w->unused_prev;
+
+ w->in_unused = false;
+ }
+
+ c->window = w;
+ LIST_PREPEND(by_window, w->contexts, c);
+}
+
+static Context *context_add(MMapCache *m, unsigned id) {
+ Context *c;
+
+ assert(m);
+
+ c = m->contexts[id];
+ if (c)
+ return c;
+
+ c = new0(Context, 1);
+ if (!c)
+ return NULL;
+
+ c->cache = m;
+ c->id = id;
+
+ assert(!m->contexts[id]);
+ m->contexts[id] = c;
+
+ return c;
+}
+
+static void context_free(Context *c) {
+ assert(c);
+
+ context_detach_window(c);
+
+ if (c->cache) {
+ assert(c->cache->contexts[c->id] == c);
+ c->cache->contexts[c->id] = NULL;
+ }
+
+ free(c);
+}
+
+static MMapCache *mmap_cache_free(MMapCache *m) {
+ int i;
+
+ assert(m);
+
+ for (i = 0; i < MMAP_CACHE_MAX_CONTEXTS; i++)
+ if (m->contexts[i])
+ context_free(m->contexts[i]);
+
+ hashmap_free(m->fds);
+
+ while (m->unused)
+ window_free(m->unused);
+
+ return mfree(m);
+}
+
+DEFINE_TRIVIAL_REF_UNREF_FUNC(MMapCache, mmap_cache, mmap_cache_free);
+
+static int make_room(MMapCache *m) {
+ assert(m);
+
+ if (!m->last_unused)
+ return 0;
+
+ window_free(m->last_unused);
+ return 1;
+}
+
+static int try_context(
+ MMapCache *m,
+ MMapFileDescriptor *f,
+ int prot,
+ unsigned context,
+ bool keep_always,
+ uint64_t offset,
+ size_t size,
+ void **ret,
+ size_t *ret_size) {
+
+ Context *c;
+
+ assert(m);
+ assert(m->n_ref > 0);
+ assert(f);
+ assert(size > 0);
+ assert(ret);
+
+ c = m->contexts[context];
+ if (!c)
+ return 0;
+
+ assert(c->id == context);
+
+ if (!c->window)
+ return 0;
+
+ if (!window_matches_fd(c->window, f, prot, offset, size)) {
+
+ /* Drop the reference to the window, since it's unnecessary now */
+ context_detach_window(c);
+ return 0;
+ }
+
+ if (c->window->fd->sigbus)
+ return -EIO;
+
+ c->window->keep_always = c->window->keep_always || keep_always;
+
+ *ret = (uint8_t*) c->window->ptr + (offset - c->window->offset);
+ if (ret_size)
+ *ret_size = c->window->size - (offset - c->window->offset);
+
+ return 1;
+}
+
+static int find_mmap(
+ MMapCache *m,
+ MMapFileDescriptor *f,
+ int prot,
+ unsigned context,
+ bool keep_always,
+ uint64_t offset,
+ size_t size,
+ void **ret,
+ size_t *ret_size) {
+
+ Window *w;
+ Context *c;
+
+ assert(m);
+ assert(m->n_ref > 0);
+ assert(f);
+ assert(size > 0);
+
+ if (f->sigbus)
+ return -EIO;
+
+ LIST_FOREACH(by_fd, w, f->windows)
+ if (window_matches(w, prot, offset, size))
+ break;
+
+ if (!w)
+ return 0;
+
+ c = context_add(m, context);
+ if (!c)
+ return -ENOMEM;
+
+ context_attach_window(c, w);
+ w->keep_always = w->keep_always || keep_always;
+
+ *ret = (uint8_t*) w->ptr + (offset - w->offset);
+ if (ret_size)
+ *ret_size = w->size - (offset - w->offset);
+
+ return 1;
+}
+
+static int mmap_try_harder(MMapCache *m, void *addr, MMapFileDescriptor *f, int prot, int flags, uint64_t offset, size_t size, void **res) {
+ void *ptr;
+
+ assert(m);
+ assert(f);
+ assert(res);
+
+ for (;;) {
+ int r;
+
+ ptr = mmap(addr, size, prot, flags, f->fd, offset);
+ if (ptr != MAP_FAILED)
+ break;
+ if (errno != ENOMEM)
+ return negative_errno();
+
+ r = make_room(m);
+ if (r < 0)
+ return r;
+ if (r == 0)
+ return -ENOMEM;
+ }
+
+ *res = ptr;
+ return 0;
+}
+
+static int add_mmap(
+ MMapCache *m,
+ MMapFileDescriptor *f,
+ int prot,
+ unsigned context,
+ bool keep_always,
+ uint64_t offset,
+ size_t size,
+ struct stat *st,
+ void **ret,
+ size_t *ret_size) {
+
+ uint64_t woffset, wsize;
+ Context *c;
+ Window *w;
+ void *d;
+ int r;
+
+ assert(m);
+ assert(m->n_ref > 0);
+ assert(f);
+ assert(size > 0);
+ assert(ret);
+
+ woffset = offset & ~((uint64_t) page_size() - 1ULL);
+ wsize = size + (offset - woffset);
+ wsize = PAGE_ALIGN(wsize);
+
+ if (wsize < WINDOW_SIZE) {
+ uint64_t delta;
+
+ delta = PAGE_ALIGN((WINDOW_SIZE - wsize) / 2);
+
+ if (delta > offset)
+ woffset = 0;
+ else
+ woffset -= delta;
+
+ wsize = WINDOW_SIZE;
+ }
+
+ if (st) {
+ /* Memory maps that are larger then the files
+ underneath have undefined behavior. Hence, clamp
+ things to the file size if we know it */
+
+ if (woffset >= (uint64_t) st->st_size)
+ return -EADDRNOTAVAIL;
+
+ if (woffset + wsize > (uint64_t) st->st_size)
+ wsize = PAGE_ALIGN(st->st_size - woffset);
+ }
+
+ r = mmap_try_harder(m, NULL, f, prot, MAP_SHARED, woffset, wsize, &d);
+ if (r < 0)
+ return r;
+
+ c = context_add(m, context);
+ if (!c)
+ goto outofmem;
+
+ w = window_add(m, f, prot, keep_always, woffset, wsize, d);
+ if (!w)
+ goto outofmem;
+
+ context_attach_window(c, w);
+
+ *ret = (uint8_t*) w->ptr + (offset - w->offset);
+ if (ret_size)
+ *ret_size = w->size - (offset - w->offset);
+
+ return 1;
+
+outofmem:
+ (void) munmap(d, wsize);
+ return -ENOMEM;
+}
+
+int mmap_cache_get(
+ MMapCache *m,
+ MMapFileDescriptor *f,
+ int prot,
+ unsigned context,
+ bool keep_always,
+ uint64_t offset,
+ size_t size,
+ struct stat *st,
+ void **ret,
+ size_t *ret_size) {
+
+ int r;
+
+ assert(m);
+ assert(m->n_ref > 0);
+ assert(f);
+ assert(size > 0);
+ assert(ret);
+ assert(context < MMAP_CACHE_MAX_CONTEXTS);
+
+ /* Check whether the current context is the right one already */
+ r = try_context(m, f, prot, context, keep_always, offset, size, ret, ret_size);
+ if (r != 0) {
+ m->n_hit++;
+ return r;
+ }
+
+ /* Search for a matching mmap */
+ r = find_mmap(m, f, prot, context, keep_always, offset, size, ret, ret_size);
+ if (r != 0) {
+ m->n_hit++;
+ return r;
+ }
+
+ m->n_missed++;
+
+ /* Create a new mmap */
+ return add_mmap(m, f, prot, context, keep_always, offset, size, st, ret, ret_size);
+}
+
+unsigned mmap_cache_get_hit(MMapCache *m) {
+ assert(m);
+
+ return m->n_hit;
+}
+
+unsigned mmap_cache_get_missed(MMapCache *m) {
+ assert(m);
+
+ return m->n_missed;
+}
+
+static void mmap_cache_process_sigbus(MMapCache *m) {
+ bool found = false;
+ MMapFileDescriptor *f;
+ Iterator i;
+ int r;
+
+ assert(m);
+
+ /* Iterate through all triggered pages and mark their files as
+ * invalidated */
+ for (;;) {
+ bool ours;
+ void *addr;
+
+ r = sigbus_pop(&addr);
+ if (_likely_(r == 0))
+ break;
+ if (r < 0) {
+ log_error_errno(r, "SIGBUS handling failed: %m");
+ abort();
+ }
+
+ ours = false;
+ HASHMAP_FOREACH(f, m->fds, i) {
+ Window *w;
+
+ LIST_FOREACH(by_fd, w, f->windows) {
+ if ((uint8_t*) addr >= (uint8_t*) w->ptr &&
+ (uint8_t*) addr < (uint8_t*) w->ptr + w->size) {
+ found = ours = f->sigbus = true;
+ break;
+ }
+ }
+
+ if (ours)
+ break;
+ }
+
+ /* Didn't find a matching window, give up */
+ if (!ours) {
+ log_error("Unknown SIGBUS page, aborting.");
+ abort();
+ }
+ }
+
+ /* The list of triggered pages is now empty. Now, let's remap
+ * all windows of the triggered file to anonymous maps, so
+ * that no page of the file in question is triggered again, so
+ * that we can be sure not to hit the queue size limit. */
+ if (_likely_(!found))
+ return;
+
+ HASHMAP_FOREACH(f, m->fds, i) {
+ Window *w;
+
+ if (!f->sigbus)
+ continue;
+
+ LIST_FOREACH(by_fd, w, f->windows)
+ window_invalidate(w);
+ }
+}
+
+bool mmap_cache_got_sigbus(MMapCache *m, MMapFileDescriptor *f) {
+ assert(m);
+ assert(f);
+
+ mmap_cache_process_sigbus(m);
+
+ return f->sigbus;
+}
+
+MMapFileDescriptor* mmap_cache_add_fd(MMapCache *m, int fd) {
+ MMapFileDescriptor *f;
+ int r;
+
+ assert(m);
+ assert(fd >= 0);
+
+ f = hashmap_get(m->fds, FD_TO_PTR(fd));
+ if (f)
+ return f;
+
+ r = hashmap_ensure_allocated(&m->fds, NULL);
+ if (r < 0)
+ return NULL;
+
+ f = new0(MMapFileDescriptor, 1);
+ if (!f)
+ return NULL;
+
+ f->cache = m;
+ f->fd = fd;
+
+ r = hashmap_put(m->fds, FD_TO_PTR(fd), f);
+ if (r < 0)
+ return mfree(f);
+
+ return f;
+}
+
+void mmap_cache_free_fd(MMapCache *m, MMapFileDescriptor *f) {
+ assert(m);
+ assert(f);
+
+ /* Make sure that any queued SIGBUS are first dispatched, so
+ * that we don't end up with a SIGBUS entry we cannot relate
+ * to any existing memory map */
+
+ mmap_cache_process_sigbus(m);
+
+ while (f->windows)
+ window_free(f->windows);
+
+ if (f->cache)
+ assert_se(hashmap_remove(f->cache->fds, FD_TO_PTR(f->fd)));
+
+ free(f);
+}
diff --git a/src/journal/mmap-cache.h b/src/journal/mmap-cache.h
new file mode 100644
index 0000000..bf70d32
--- /dev/null
+++ b/src/journal/mmap-cache.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+#pragma once
+
+#include <stdbool.h>
+#include <sys/stat.h>
+
+/* One context per object type, plus one of the header, plus one "additional" one */
+#define MMAP_CACHE_MAX_CONTEXTS 9
+
+typedef struct MMapCache MMapCache;
+typedef struct MMapFileDescriptor MMapFileDescriptor;
+
+MMapCache* mmap_cache_new(void);
+MMapCache* mmap_cache_ref(MMapCache *m);
+MMapCache* mmap_cache_unref(MMapCache *m);
+
+int mmap_cache_get(
+ MMapCache *m,
+ MMapFileDescriptor *f,
+ int prot,
+ unsigned context,
+ bool keep_always,
+ uint64_t offset,
+ size_t size,
+ struct stat *st,
+ void **ret,
+ size_t *ret_size);
+MMapFileDescriptor * mmap_cache_add_fd(MMapCache *m, int fd);
+void mmap_cache_free_fd(MMapCache *m, MMapFileDescriptor *f);
+
+unsigned mmap_cache_get_hit(MMapCache *m);
+unsigned mmap_cache_get_missed(MMapCache *m);
+
+bool mmap_cache_got_sigbus(MMapCache *m, MMapFileDescriptor *f);
diff --git a/src/journal/sd-journal.c b/src/journal/sd-journal.c
new file mode 100644
index 0000000..0f99628
--- /dev/null
+++ b/src/journal/sd-journal.c
@@ -0,0 +1,3137 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+
+#include <errno.h>
+#include <fcntl.h>
+#include <inttypes.h>
+#include <linux/magic.h>
+#include <poll.h>
+#include <stddef.h>
+#include <sys/inotify.h>
+#include <sys/vfs.h>
+#include <unistd.h>
+
+#include "sd-journal.h"
+
+#include "alloc-util.h"
+#include "catalog.h"
+#include "compress.h"
+#include "dirent-util.h"
+#include "env-file.h"
+#include "escape.h"
+#include "fd-util.h"
+#include "fileio.h"
+#include "format-util.h"
+#include "fs-util.h"
+#include "hashmap.h"
+#include "hostname-util.h"
+#include "id128-util.h"
+#include "io-util.h"
+#include "journal-def.h"
+#include "journal-file.h"
+#include "journal-internal.h"
+#include "list.h"
+#include "lookup3.h"
+#include "missing.h"
+#include "path-util.h"
+#include "process-util.h"
+#include "replace-var.h"
+#include "stat-util.h"
+#include "stdio-util.h"
+#include "string-util.h"
+#include "strv.h"
+
+#define JOURNAL_FILES_MAX 7168
+
+#define JOURNAL_FILES_RECHECK_USEC (2 * USEC_PER_SEC)
+
+#define REPLACE_VAR_MAX 256
+
+#define DEFAULT_DATA_THRESHOLD (64*1024)
+
+static void remove_file_real(sd_journal *j, JournalFile *f);
+
+static bool journal_pid_changed(sd_journal *j) {
+ assert(j);
+
+ /* We don't support people creating a journal object and
+ * keeping it around over a fork(). Let's complain. */
+
+ return j->original_pid != getpid_cached();
+}
+
+static int journal_put_error(sd_journal *j, int r, const char *path) {
+ char *copy;
+ int k;
+
+ /* Memorize an error we encountered, and store which
+ * file/directory it was generated from. Note that we store
+ * only *one* path per error code, as the error code is the
+ * key into the hashmap, and the path is the value. This means
+ * we keep track only of all error kinds, but not of all error
+ * locations. This has the benefit that the hashmap cannot
+ * grow beyond bounds.
+ *
+ * We return an error here only if we didn't manage to
+ * memorize the real error. */
+
+ if (r >= 0)
+ return r;
+
+ k = hashmap_ensure_allocated(&j->errors, NULL);
+ if (k < 0)
+ return k;
+
+ if (path) {
+ copy = strdup(path);
+ if (!copy)
+ return -ENOMEM;
+ } else
+ copy = NULL;
+
+ k = hashmap_put(j->errors, INT_TO_PTR(r), copy);
+ if (k < 0) {
+ free(copy);
+
+ if (k == -EEXIST)
+ return 0;
+
+ return k;
+ }
+
+ return 0;
+}
+
+static void detach_location(sd_journal *j) {
+ Iterator i;
+ JournalFile *f;
+
+ assert(j);
+
+ j->current_file = NULL;
+ j->current_field = 0;
+
+ ORDERED_HASHMAP_FOREACH(f, j->files, i)
+ journal_file_reset_location(f);
+}
+
+static void reset_location(sd_journal *j) {
+ assert(j);
+
+ detach_location(j);
+ zero(j->current_location);
+}
+
+static void init_location(Location *l, LocationType type, JournalFile *f, Object *o) {
+ assert(l);
+ assert(IN_SET(type, LOCATION_DISCRETE, LOCATION_SEEK));
+ assert(f);
+ assert(o->object.type == OBJECT_ENTRY);
+
+ l->type = type;
+ l->seqnum = le64toh(o->entry.seqnum);
+ l->seqnum_id = f->header->seqnum_id;
+ l->realtime = le64toh(o->entry.realtime);
+ l->monotonic = le64toh(o->entry.monotonic);
+ l->boot_id = o->entry.boot_id;
+ l->xor_hash = le64toh(o->entry.xor_hash);
+
+ l->seqnum_set = l->realtime_set = l->monotonic_set = l->xor_hash_set = true;
+}
+
+static void set_location(sd_journal *j, JournalFile *f, Object *o) {
+ assert(j);
+ assert(f);
+ assert(o);
+
+ init_location(&j->current_location, LOCATION_DISCRETE, f, o);
+
+ j->current_file = f;
+ j->current_field = 0;
+
+ /* Let f know its candidate entry was picked. */
+ assert(f->location_type == LOCATION_SEEK);
+ f->location_type = LOCATION_DISCRETE;
+}
+
+static int match_is_valid(const void *data, size_t size) {
+ const char *b, *p;
+
+ assert(data);
+
+ if (size < 2)
+ return false;
+
+ if (startswith(data, "__"))
+ return false;
+
+ b = data;
+ for (p = b; p < b + size; p++) {
+
+ if (*p == '=')
+ return p > b;
+
+ if (*p == '_')
+ continue;
+
+ if (*p >= 'A' && *p <= 'Z')
+ continue;
+
+ if (*p >= '0' && *p <= '9')
+ continue;
+
+ return false;
+ }
+
+ return false;
+}
+
+static bool same_field(const void *_a, size_t s, const void *_b, size_t t) {
+ const uint8_t *a = _a, *b = _b;
+ size_t j;
+
+ for (j = 0; j < s && j < t; j++) {
+
+ if (a[j] != b[j])
+ return false;
+
+ if (a[j] == '=')
+ return true;
+ }
+
+ assert_not_reached("\"=\" not found");
+}
+
+static Match *match_new(Match *p, MatchType t) {
+ Match *m;
+
+ m = new0(Match, 1);
+ if (!m)
+ return NULL;
+
+ m->type = t;
+
+ if (p) {
+ m->parent = p;
+ LIST_PREPEND(matches, p->matches, m);
+ }
+
+ return m;
+}
+
+static void match_free(Match *m) {
+ assert(m);
+
+ while (m->matches)
+ match_free(m->matches);
+
+ if (m->parent)
+ LIST_REMOVE(matches, m->parent->matches, m);
+
+ free(m->data);
+ free(m);
+}
+
+static void match_free_if_empty(Match *m) {
+ if (!m || m->matches)
+ return;
+
+ match_free(m);
+}
+
+_public_ int sd_journal_add_match(sd_journal *j, const void *data, size_t size) {
+ Match *l3, *l4, *add_here = NULL, *m;
+ le64_t le_hash;
+
+ assert_return(j, -EINVAL);
+ assert_return(!journal_pid_changed(j), -ECHILD);
+ assert_return(data, -EINVAL);
+
+ if (size == 0)
+ size = strlen(data);
+
+ assert_return(match_is_valid(data, size), -EINVAL);
+
+ /* level 0: AND term
+ * level 1: OR terms
+ * level 2: AND terms
+ * level 3: OR terms
+ * level 4: concrete matches */
+
+ if (!j->level0) {
+ j->level0 = match_new(NULL, MATCH_AND_TERM);
+ if (!j->level0)
+ return -ENOMEM;
+ }
+
+ if (!j->level1) {
+ j->level1 = match_new(j->level0, MATCH_OR_TERM);
+ if (!j->level1)
+ return -ENOMEM;
+ }
+
+ if (!j->level2) {
+ j->level2 = match_new(j->level1, MATCH_AND_TERM);
+ if (!j->level2)
+ return -ENOMEM;
+ }
+
+ assert(j->level0->type == MATCH_AND_TERM);
+ assert(j->level1->type == MATCH_OR_TERM);
+ assert(j->level2->type == MATCH_AND_TERM);
+
+ le_hash = htole64(hash64(data, size));
+
+ LIST_FOREACH(matches, l3, j->level2->matches) {
+ assert(l3->type == MATCH_OR_TERM);
+
+ LIST_FOREACH(matches, l4, l3->matches) {
+ assert(l4->type == MATCH_DISCRETE);
+
+ /* Exactly the same match already? Then ignore
+ * this addition */
+ if (l4->le_hash == le_hash &&
+ l4->size == size &&
+ memcmp(l4->data, data, size) == 0)
+ return 0;
+
+ /* Same field? Then let's add this to this OR term */
+ if (same_field(data, size, l4->data, l4->size)) {
+ add_here = l3;
+ break;
+ }
+ }
+
+ if (add_here)
+ break;
+ }
+
+ if (!add_here) {
+ add_here = match_new(j->level2, MATCH_OR_TERM);
+ if (!add_here)
+ goto fail;
+ }
+
+ m = match_new(add_here, MATCH_DISCRETE);
+ if (!m)
+ goto fail;
+
+ m->le_hash = le_hash;
+ m->size = size;
+ m->data = memdup(data, size);
+ if (!m->data)
+ goto fail;
+
+ detach_location(j);
+
+ return 0;
+
+fail:
+ match_free_if_empty(add_here);
+ match_free_if_empty(j->level2);
+ match_free_if_empty(j->level1);
+ match_free_if_empty(j->level0);
+
+ return -ENOMEM;
+}
+
+_public_ int sd_journal_add_conjunction(sd_journal *j) {
+ assert_return(j, -EINVAL);
+ assert_return(!journal_pid_changed(j), -ECHILD);
+
+ if (!j->level0)
+ return 0;
+
+ if (!j->level1)
+ return 0;
+
+ if (!j->level1->matches)
+ return 0;
+
+ j->level1 = NULL;
+ j->level2 = NULL;
+
+ return 0;
+}
+
+_public_ int sd_journal_add_disjunction(sd_journal *j) {
+ assert_return(j, -EINVAL);
+ assert_return(!journal_pid_changed(j), -ECHILD);
+
+ if (!j->level0)
+ return 0;
+
+ if (!j->level1)
+ return 0;
+
+ if (!j->level2)
+ return 0;
+
+ if (!j->level2->matches)
+ return 0;
+
+ j->level2 = NULL;
+ return 0;
+}
+
+static char *match_make_string(Match *m) {
+ char *p = NULL, *r;
+ Match *i;
+ bool enclose = false;
+
+ if (!m)
+ return strdup("none");
+
+ if (m->type == MATCH_DISCRETE)
+ return cescape_length(m->data, m->size);
+
+ LIST_FOREACH(matches, i, m->matches) {
+ char *t, *k;
+
+ t = match_make_string(i);
+ if (!t)
+ return mfree(p);
+
+ if (p) {
+ k = strjoin(p, m->type == MATCH_OR_TERM ? " OR " : " AND ", t);
+ free(p);
+ free(t);
+
+ if (!k)
+ return NULL;
+
+ p = k;
+
+ enclose = true;
+ } else
+ p = t;
+ }
+
+ if (enclose) {
+ r = strjoin("(", p, ")");
+ free(p);
+ return r;
+ }
+
+ return p;
+}
+
+char *journal_make_match_string(sd_journal *j) {
+ assert(j);
+
+ return match_make_string(j->level0);
+}
+
+_public_ void sd_journal_flush_matches(sd_journal *j) {
+ if (!j)
+ return;
+
+ if (j->level0)
+ match_free(j->level0);
+
+ j->level0 = j->level1 = j->level2 = NULL;
+
+ detach_location(j);
+}
+
+_pure_ static int compare_with_location(JournalFile *f, Location *l) {
+ int r;
+
+ assert(f);
+ assert(l);
+ assert(f->location_type == LOCATION_SEEK);
+ assert(IN_SET(l->type, LOCATION_DISCRETE, LOCATION_SEEK));
+
+ if (l->monotonic_set &&
+ sd_id128_equal(f->current_boot_id, l->boot_id) &&
+ l->realtime_set &&
+ f->current_realtime == l->realtime &&
+ l->xor_hash_set &&
+ f->current_xor_hash == l->xor_hash)
+ return 0;
+
+ if (l->seqnum_set &&
+ sd_id128_equal(f->header->seqnum_id, l->seqnum_id)) {
+
+ r = CMP(f->current_seqnum, l->seqnum);
+ if (r != 0)
+ return r;
+ }
+
+ if (l->monotonic_set &&
+ sd_id128_equal(f->current_boot_id, l->boot_id)) {
+
+ r = CMP(f->current_monotonic, l->monotonic);
+ if (r != 0)
+ return r;
+ }
+
+ if (l->realtime_set) {
+
+ r = CMP(f->current_realtime, l->realtime);
+ if (r != 0)
+ return r;
+ }
+
+ if (l->xor_hash_set) {
+
+ r = CMP(f->current_xor_hash, l->xor_hash);
+ if (r != 0)
+ return r;
+ }
+
+ return 0;
+}
+
+static int next_for_match(
+ sd_journal *j,
+ Match *m,
+ JournalFile *f,
+ uint64_t after_offset,
+ direction_t direction,
+ Object **ret,
+ uint64_t *offset) {
+
+ int r;
+ uint64_t np = 0;
+ Object *n;
+
+ assert(j);
+ assert(m);
+ assert(f);
+
+ if (m->type == MATCH_DISCRETE) {
+ uint64_t dp;
+
+ r = journal_file_find_data_object_with_hash(f, m->data, m->size, le64toh(m->le_hash), NULL, &dp);
+ if (r <= 0)
+ return r;
+
+ return journal_file_move_to_entry_by_offset_for_data(f, dp, after_offset, direction, ret, offset);
+
+ } else if (m->type == MATCH_OR_TERM) {
+ Match *i;
+
+ /* Find the earliest match beyond after_offset */
+
+ LIST_FOREACH(matches, i, m->matches) {
+ uint64_t cp;
+
+ r = next_for_match(j, i, f, after_offset, direction, NULL, &cp);
+ if (r < 0)
+ return r;
+ else if (r > 0) {
+ if (np == 0 || (direction == DIRECTION_DOWN ? cp < np : cp > np))
+ np = cp;
+ }
+ }
+
+ if (np == 0)
+ return 0;
+
+ } else if (m->type == MATCH_AND_TERM) {
+ Match *i, *last_moved;
+
+ /* Always jump to the next matching entry and repeat
+ * this until we find an offset that matches for all
+ * matches. */
+
+ if (!m->matches)
+ return 0;
+
+ r = next_for_match(j, m->matches, f, after_offset, direction, NULL, &np);
+ if (r <= 0)
+ return r;
+
+ assert(direction == DIRECTION_DOWN ? np >= after_offset : np <= after_offset);
+ last_moved = m->matches;
+
+ LIST_LOOP_BUT_ONE(matches, i, m->matches, last_moved) {
+ uint64_t cp;
+
+ r = next_for_match(j, i, f, np, direction, NULL, &cp);
+ if (r <= 0)
+ return r;
+
+ assert(direction == DIRECTION_DOWN ? cp >= np : cp <= np);
+ if (direction == DIRECTION_DOWN ? cp > np : cp < np) {
+ np = cp;
+ last_moved = i;
+ }
+ }
+ }
+
+ assert(np > 0);
+
+ r = journal_file_move_to_object(f, OBJECT_ENTRY, np, &n);
+ if (r < 0)
+ return r;
+
+ if (ret)
+ *ret = n;
+ if (offset)
+ *offset = np;
+
+ return 1;
+}
+
+static int find_location_for_match(
+ sd_journal *j,
+ Match *m,
+ JournalFile *f,
+ direction_t direction,
+ Object **ret,
+ uint64_t *offset) {
+
+ int r;
+
+ assert(j);
+ assert(m);
+ assert(f);
+
+ if (m->type == MATCH_DISCRETE) {
+ uint64_t dp;
+
+ r = journal_file_find_data_object_with_hash(f, m->data, m->size, le64toh(m->le_hash), NULL, &dp);
+ if (r <= 0)
+ return r;
+
+ /* FIXME: missing: find by monotonic */
+
+ if (j->current_location.type == LOCATION_HEAD)
+ return journal_file_next_entry_for_data(f, NULL, 0, dp, DIRECTION_DOWN, ret, offset);
+ if (j->current_location.type == LOCATION_TAIL)
+ return journal_file_next_entry_for_data(f, NULL, 0, dp, DIRECTION_UP, ret, offset);
+ if (j->current_location.seqnum_set && sd_id128_equal(j->current_location.seqnum_id, f->header->seqnum_id))
+ return journal_file_move_to_entry_by_seqnum_for_data(f, dp, j->current_location.seqnum, direction, ret, offset);
+ if (j->current_location.monotonic_set) {
+ r = journal_file_move_to_entry_by_monotonic_for_data(f, dp, j->current_location.boot_id, j->current_location.monotonic, direction, ret, offset);
+ if (r != -ENOENT)
+ return r;
+ }
+ if (j->current_location.realtime_set)
+ return journal_file_move_to_entry_by_realtime_for_data(f, dp, j->current_location.realtime, direction, ret, offset);
+
+ return journal_file_next_entry_for_data(f, NULL, 0, dp, direction, ret, offset);
+
+ } else if (m->type == MATCH_OR_TERM) {
+ uint64_t np = 0;
+ Object *n;
+ Match *i;
+
+ /* Find the earliest match */
+
+ LIST_FOREACH(matches, i, m->matches) {
+ uint64_t cp;
+
+ r = find_location_for_match(j, i, f, direction, NULL, &cp);
+ if (r < 0)
+ return r;
+ else if (r > 0) {
+ if (np == 0 || (direction == DIRECTION_DOWN ? np > cp : np < cp))
+ np = cp;
+ }
+ }
+
+ if (np == 0)
+ return 0;
+
+ r = journal_file_move_to_object(f, OBJECT_ENTRY, np, &n);
+ if (r < 0)
+ return r;
+
+ if (ret)
+ *ret = n;
+ if (offset)
+ *offset = np;
+
+ return 1;
+
+ } else {
+ Match *i;
+ uint64_t np = 0;
+
+ assert(m->type == MATCH_AND_TERM);
+
+ /* First jump to the last match, and then find the
+ * next one where all matches match */
+
+ if (!m->matches)
+ return 0;
+
+ LIST_FOREACH(matches, i, m->matches) {
+ uint64_t cp;
+
+ r = find_location_for_match(j, i, f, direction, NULL, &cp);
+ if (r <= 0)
+ return r;
+
+ if (np == 0 || (direction == DIRECTION_DOWN ? cp > np : cp < np))
+ np = cp;
+ }
+
+ return next_for_match(j, m, f, np, direction, ret, offset);
+ }
+}
+
+static int find_location_with_matches(
+ sd_journal *j,
+ JournalFile *f,
+ direction_t direction,
+ Object **ret,
+ uint64_t *offset) {
+
+ int r;
+
+ assert(j);
+ assert(f);
+ assert(ret);
+ assert(offset);
+
+ if (!j->level0) {
+ /* No matches is simple */
+
+ if (j->current_location.type == LOCATION_HEAD)
+ return journal_file_next_entry(f, 0, DIRECTION_DOWN, ret, offset);
+ if (j->current_location.type == LOCATION_TAIL)
+ return journal_file_next_entry(f, 0, DIRECTION_UP, ret, offset);
+ if (j->current_location.seqnum_set && sd_id128_equal(j->current_location.seqnum_id, f->header->seqnum_id))
+ return journal_file_move_to_entry_by_seqnum(f, j->current_location.seqnum, direction, ret, offset);
+ if (j->current_location.monotonic_set) {
+ r = journal_file_move_to_entry_by_monotonic(f, j->current_location.boot_id, j->current_location.monotonic, direction, ret, offset);
+ if (r != -ENOENT)
+ return r;
+ }
+ if (j->current_location.realtime_set)
+ return journal_file_move_to_entry_by_realtime(f, j->current_location.realtime, direction, ret, offset);
+
+ return journal_file_next_entry(f, 0, direction, ret, offset);
+ } else
+ return find_location_for_match(j, j->level0, f, direction, ret, offset);
+}
+
+static int next_with_matches(
+ sd_journal *j,
+ JournalFile *f,
+ direction_t direction,
+ Object **ret,
+ uint64_t *offset) {
+
+ assert(j);
+ assert(f);
+ assert(ret);
+ assert(offset);
+
+ /* No matches is easy. We simple advance the file
+ * pointer by one. */
+ if (!j->level0)
+ return journal_file_next_entry(f, f->current_offset, direction, ret, offset);
+
+ /* If we have a match then we look for the next matching entry
+ * with an offset at least one step larger */
+ return next_for_match(j, j->level0, f,
+ direction == DIRECTION_DOWN ? f->current_offset + 1
+ : f->current_offset - 1,
+ direction, ret, offset);
+}
+
+static int next_beyond_location(sd_journal *j, JournalFile *f, direction_t direction) {
+ Object *c;
+ uint64_t cp, n_entries;
+ int r;
+
+ assert(j);
+ assert(f);
+
+ n_entries = le64toh(f->header->n_entries);
+
+ /* If we hit EOF before, we don't need to look into this file again
+ * unless direction changed or new entries appeared. */
+ if (f->last_direction == direction && f->location_type == LOCATION_TAIL &&
+ n_entries == f->last_n_entries)
+ return 0;
+
+ f->last_n_entries = n_entries;
+
+ if (f->last_direction == direction && f->current_offset > 0) {
+ /* LOCATION_SEEK here means we did the work in a previous
+ * iteration and the current location already points to a
+ * candidate entry. */
+ if (f->location_type != LOCATION_SEEK) {
+ r = next_with_matches(j, f, direction, &c, &cp);
+ if (r <= 0)
+ return r;
+
+ journal_file_save_location(f, c, cp);
+ }
+ } else {
+ f->last_direction = direction;
+
+ r = find_location_with_matches(j, f, direction, &c, &cp);
+ if (r <= 0)
+ return r;
+
+ journal_file_save_location(f, c, cp);
+ }
+
+ /* OK, we found the spot, now let's advance until an entry
+ * that is actually different from what we were previously
+ * looking at. This is necessary to handle entries which exist
+ * in two (or more) journal files, and which shall all be
+ * suppressed but one. */
+
+ for (;;) {
+ bool found;
+
+ if (j->current_location.type == LOCATION_DISCRETE) {
+ int k;
+
+ k = compare_with_location(f, &j->current_location);
+
+ found = direction == DIRECTION_DOWN ? k > 0 : k < 0;
+ } else
+ found = true;
+
+ if (found)
+ return 1;
+
+ r = next_with_matches(j, f, direction, &c, &cp);
+ if (r <= 0)
+ return r;
+
+ journal_file_save_location(f, c, cp);
+ }
+}
+
+static int real_journal_next(sd_journal *j, direction_t direction) {
+ JournalFile *new_file = NULL;
+ unsigned i, n_files;
+ const void **files;
+ Object *o;
+ int r;
+
+ assert_return(j, -EINVAL);
+ assert_return(!journal_pid_changed(j), -ECHILD);
+
+ r = iterated_cache_get(j->files_cache, NULL, &files, &n_files);
+ if (r < 0)
+ return r;
+
+ for (i = 0; i < n_files; i++) {
+ JournalFile *f = (JournalFile *)files[i];
+ bool found;
+
+ r = next_beyond_location(j, f, direction);
+ if (r < 0) {
+ log_debug_errno(r, "Can't iterate through %s, ignoring: %m", f->path);
+ remove_file_real(j, f);
+ continue;
+ } else if (r == 0) {
+ f->location_type = LOCATION_TAIL;
+ continue;
+ }
+
+ if (!new_file)
+ found = true;
+ else {
+ int k;
+
+ k = journal_file_compare_locations(f, new_file);
+
+ found = direction == DIRECTION_DOWN ? k < 0 : k > 0;
+ }
+
+ if (found)
+ new_file = f;
+ }
+
+ if (!new_file)
+ return 0;
+
+ r = journal_file_move_to_object(new_file, OBJECT_ENTRY, new_file->current_offset, &o);
+ if (r < 0)
+ return r;
+
+ set_location(j, new_file, o);
+
+ return 1;
+}
+
+_public_ int sd_journal_next(sd_journal *j) {
+ return real_journal_next(j, DIRECTION_DOWN);
+}
+
+_public_ int sd_journal_previous(sd_journal *j) {
+ return real_journal_next(j, DIRECTION_UP);
+}
+
+static int real_journal_next_skip(sd_journal *j, direction_t direction, uint64_t skip) {
+ int c = 0, r;
+
+ assert_return(j, -EINVAL);
+ assert_return(!journal_pid_changed(j), -ECHILD);
+
+ if (skip == 0) {
+ /* If this is not a discrete skip, then at least
+ * resolve the current location */
+ if (j->current_location.type != LOCATION_DISCRETE) {
+ r = real_journal_next(j, direction);
+ if (r < 0)
+ return r;
+ }
+
+ return 0;
+ }
+
+ do {
+ r = real_journal_next(j, direction);
+ if (r < 0)
+ return r;
+
+ if (r == 0)
+ return c;
+
+ skip--;
+ c++;
+ } while (skip > 0);
+
+ return c;
+}
+
+_public_ int sd_journal_next_skip(sd_journal *j, uint64_t skip) {
+ return real_journal_next_skip(j, DIRECTION_DOWN, skip);
+}
+
+_public_ int sd_journal_previous_skip(sd_journal *j, uint64_t skip) {
+ return real_journal_next_skip(j, DIRECTION_UP, skip);
+}
+
+_public_ int sd_journal_get_cursor(sd_journal *j, char **cursor) {
+ Object *o;
+ int r;
+ char bid[33], sid[33];
+
+ assert_return(j, -EINVAL);
+ assert_return(!journal_pid_changed(j), -ECHILD);
+ assert_return(cursor, -EINVAL);
+
+ if (!j->current_file || j->current_file->current_offset <= 0)
+ return -EADDRNOTAVAIL;
+
+ r = journal_file_move_to_object(j->current_file, OBJECT_ENTRY, j->current_file->current_offset, &o);
+ if (r < 0)
+ return r;
+
+ sd_id128_to_string(j->current_file->header->seqnum_id, sid);
+ sd_id128_to_string(o->entry.boot_id, bid);
+
+ if (asprintf(cursor,
+ "s=%s;i=%"PRIx64";b=%s;m=%"PRIx64";t=%"PRIx64";x=%"PRIx64,
+ sid, le64toh(o->entry.seqnum),
+ bid, le64toh(o->entry.monotonic),
+ le64toh(o->entry.realtime),
+ le64toh(o->entry.xor_hash)) < 0)
+ return -ENOMEM;
+
+ return 0;
+}
+
+_public_ int sd_journal_seek_cursor(sd_journal *j, const char *cursor) {
+ const char *word, *state;
+ size_t l;
+ unsigned long long seqnum, monotonic, realtime, xor_hash;
+ bool
+ seqnum_id_set = false,
+ seqnum_set = false,
+ boot_id_set = false,
+ monotonic_set = false,
+ realtime_set = false,
+ xor_hash_set = false;
+ sd_id128_t seqnum_id, boot_id;
+
+ assert_return(j, -EINVAL);
+ assert_return(!journal_pid_changed(j), -ECHILD);
+ assert_return(!isempty(cursor), -EINVAL);
+
+ FOREACH_WORD_SEPARATOR(word, l, cursor, ";", state) {
+ char *item;
+ int k = 0;
+
+ if (l < 2 || word[1] != '=')
+ return -EINVAL;
+
+ item = strndup(word, l);
+ if (!item)
+ return -ENOMEM;
+
+ switch (word[0]) {
+
+ case 's':
+ seqnum_id_set = true;
+ k = sd_id128_from_string(item+2, &seqnum_id);
+ break;
+
+ case 'i':
+ seqnum_set = true;
+ if (sscanf(item+2, "%llx", &seqnum) != 1)
+ k = -EINVAL;
+ break;
+
+ case 'b':
+ boot_id_set = true;
+ k = sd_id128_from_string(item+2, &boot_id);
+ break;
+
+ case 'm':
+ monotonic_set = true;
+ if (sscanf(item+2, "%llx", &monotonic) != 1)
+ k = -EINVAL;
+ break;
+
+ case 't':
+ realtime_set = true;
+ if (sscanf(item+2, "%llx", &realtime) != 1)
+ k = -EINVAL;
+ break;
+
+ case 'x':
+ xor_hash_set = true;
+ if (sscanf(item+2, "%llx", &xor_hash) != 1)
+ k = -EINVAL;
+ break;
+ }
+
+ free(item);
+
+ if (k < 0)
+ return k;
+ }
+
+ if ((!seqnum_set || !seqnum_id_set) &&
+ (!monotonic_set || !boot_id_set) &&
+ !realtime_set)
+ return -EINVAL;
+
+ reset_location(j);
+
+ j->current_location.type = LOCATION_SEEK;
+
+ if (realtime_set) {
+ j->current_location.realtime = (uint64_t) realtime;
+ j->current_location.realtime_set = true;
+ }
+
+ if (seqnum_set && seqnum_id_set) {
+ j->current_location.seqnum = (uint64_t) seqnum;
+ j->current_location.seqnum_id = seqnum_id;
+ j->current_location.seqnum_set = true;
+ }
+
+ if (monotonic_set && boot_id_set) {
+ j->current_location.monotonic = (uint64_t) monotonic;
+ j->current_location.boot_id = boot_id;
+ j->current_location.monotonic_set = true;
+ }
+
+ if (xor_hash_set) {
+ j->current_location.xor_hash = (uint64_t) xor_hash;
+ j->current_location.xor_hash_set = true;
+ }
+
+ return 0;
+}
+
+_public_ int sd_journal_test_cursor(sd_journal *j, const char *cursor) {
+ int r;
+ Object *o;
+
+ assert_return(j, -EINVAL);
+ assert_return(!journal_pid_changed(j), -ECHILD);
+ assert_return(!isempty(cursor), -EINVAL);
+
+ if (!j->current_file || j->current_file->current_offset <= 0)
+ return -EADDRNOTAVAIL;
+
+ r = journal_file_move_to_object(j->current_file, OBJECT_ENTRY, j->current_file->current_offset, &o);
+ if (r < 0)
+ return r;
+
+ for (;;) {
+ _cleanup_free_ char *item = NULL;
+ unsigned long long ll;
+ sd_id128_t id;
+ int k = 0;
+
+ r = extract_first_word(&cursor, &item, ";", EXTRACT_DONT_COALESCE_SEPARATORS);
+ if (r < 0)
+ return r;
+
+ if (r == 0)
+ break;
+
+ if (strlen(item) < 2 || item[1] != '=')
+ return -EINVAL;
+
+ switch (item[0]) {
+
+ case 's':
+ k = sd_id128_from_string(item+2, &id);
+ if (k < 0)
+ return k;
+ if (!sd_id128_equal(id, j->current_file->header->seqnum_id))
+ return 0;
+ break;
+
+ case 'i':
+ if (sscanf(item+2, "%llx", &ll) != 1)
+ return -EINVAL;
+ if (ll != le64toh(o->entry.seqnum))
+ return 0;
+ break;
+
+ case 'b':
+ k = sd_id128_from_string(item+2, &id);
+ if (k < 0)
+ return k;
+ if (!sd_id128_equal(id, o->entry.boot_id))
+ return 0;
+ break;
+
+ case 'm':
+ if (sscanf(item+2, "%llx", &ll) != 1)
+ return -EINVAL;
+ if (ll != le64toh(o->entry.monotonic))
+ return 0;
+ break;
+
+ case 't':
+ if (sscanf(item+2, "%llx", &ll) != 1)
+ return -EINVAL;
+ if (ll != le64toh(o->entry.realtime))
+ return 0;
+ break;
+
+ case 'x':
+ if (sscanf(item+2, "%llx", &ll) != 1)
+ return -EINVAL;
+ if (ll != le64toh(o->entry.xor_hash))
+ return 0;
+ break;
+ }
+ }
+
+ return 1;
+}
+
+_public_ int sd_journal_seek_monotonic_usec(sd_journal *j, sd_id128_t boot_id, uint64_t usec) {
+ assert_return(j, -EINVAL);
+ assert_return(!journal_pid_changed(j), -ECHILD);
+
+ reset_location(j);
+ j->current_location.type = LOCATION_SEEK;
+ j->current_location.boot_id = boot_id;
+ j->current_location.monotonic = usec;
+ j->current_location.monotonic_set = true;
+
+ return 0;
+}
+
+_public_ int sd_journal_seek_realtime_usec(sd_journal *j, uint64_t usec) {
+ assert_return(j, -EINVAL);
+ assert_return(!journal_pid_changed(j), -ECHILD);
+
+ reset_location(j);
+ j->current_location.type = LOCATION_SEEK;
+ j->current_location.realtime = usec;
+ j->current_location.realtime_set = true;
+
+ return 0;
+}
+
+_public_ int sd_journal_seek_head(sd_journal *j) {
+ assert_return(j, -EINVAL);
+ assert_return(!journal_pid_changed(j), -ECHILD);
+
+ reset_location(j);
+ j->current_location.type = LOCATION_HEAD;
+
+ return 0;
+}
+
+_public_ int sd_journal_seek_tail(sd_journal *j) {
+ assert_return(j, -EINVAL);
+ assert_return(!journal_pid_changed(j), -ECHILD);
+
+ reset_location(j);
+ j->current_location.type = LOCATION_TAIL;
+
+ return 0;
+}
+
+static void check_network(sd_journal *j, int fd) {
+ assert(j);
+
+ if (j->on_network)
+ return;
+
+ j->on_network = fd_is_network_fs(fd);
+}
+
+static bool file_has_type_prefix(const char *prefix, const char *filename) {
+ const char *full, *tilded, *atted;
+
+ full = strjoina(prefix, ".journal");
+ tilded = strjoina(full, "~");
+ atted = strjoina(prefix, "@");
+
+ return STR_IN_SET(filename, full, tilded) ||
+ startswith(filename, atted);
+}
+
+static bool file_type_wanted(int flags, const char *filename) {
+ assert(filename);
+
+ if (!endswith(filename, ".journal") && !endswith(filename, ".journal~"))
+ return false;
+
+ /* no flags set → every type is OK */
+ if (!(flags & (SD_JOURNAL_SYSTEM | SD_JOURNAL_CURRENT_USER)))
+ return true;
+
+ if (flags & SD_JOURNAL_SYSTEM && file_has_type_prefix("system", filename))
+ return true;
+
+ if (flags & SD_JOURNAL_CURRENT_USER) {
+ char prefix[5 + DECIMAL_STR_MAX(uid_t) + 1];
+
+ xsprintf(prefix, "user-"UID_FMT, getuid());
+
+ if (file_has_type_prefix(prefix, filename))
+ return true;
+ }
+
+ return false;
+}
+
+static bool path_has_prefix(sd_journal *j, const char *path, const char *prefix) {
+ assert(j);
+ assert(path);
+ assert(prefix);
+
+ if (j->toplevel_fd >= 0)
+ return false;
+
+ return path_startswith(path, prefix);
+}
+
+static void track_file_disposition(sd_journal *j, JournalFile *f) {
+ assert(j);
+ assert(f);
+
+ if (!j->has_runtime_files && path_has_prefix(j, f->path, "/run"))
+ j->has_runtime_files = true;
+ else if (!j->has_persistent_files && path_has_prefix(j, f->path, "/var"))
+ j->has_persistent_files = true;
+}
+
+static const char *skip_slash(const char *p) {
+
+ if (!p)
+ return NULL;
+
+ while (*p == '/')
+ p++;
+
+ return p;
+}
+
+static int add_any_file(
+ sd_journal *j,
+ int fd,
+ const char *path) {
+
+ bool close_fd = false;
+ JournalFile *f;
+ struct stat st;
+ int r, k;
+
+ assert(j);
+ assert(fd >= 0 || path);
+
+ if (fd < 0) {
+ if (j->toplevel_fd >= 0)
+ /* If there's a top-level fd defined make the path relative, explicitly, since otherwise
+ * openat() ignores the first argument. */
+
+ fd = openat(j->toplevel_fd, skip_slash(path), O_RDONLY|O_CLOEXEC|O_NONBLOCK);
+ else
+ fd = open(path, O_RDONLY|O_CLOEXEC|O_NONBLOCK);
+ if (fd < 0) {
+ r = log_debug_errno(errno, "Failed to open journal file %s: %m", path);
+ goto finish;
+ }
+
+ close_fd = true;
+
+ r = fd_nonblock(fd, false);
+ if (r < 0) {
+ r = log_debug_errno(errno, "Failed to turn off O_NONBLOCK for %s: %m", path);
+ goto finish;
+ }
+ }
+
+ if (fstat(fd, &st) < 0) {
+ r = log_debug_errno(errno, "Failed to fstat file '%s': %m", path);
+ goto finish;
+ }
+
+ r = stat_verify_regular(&st);
+ if (r < 0) {
+ log_debug_errno(r, "Refusing to open '%s', as it is not a regular file.", path);
+ goto finish;
+ }
+
+ f = ordered_hashmap_get(j->files, path);
+ if (f) {
+ if (f->last_stat.st_dev == st.st_dev &&
+ f->last_stat.st_ino == st.st_ino) {
+
+ /* We already track this file, under the same path and with the same device/inode numbers, it's
+ * hence really the same. Mark this file as seen in this generation. This is used to GC old
+ * files in process_q_overflow() to detect journal files that are still there and discern them
+ * from those which are gone. */
+
+ f->last_seen_generation = j->generation;
+ r = 0;
+ goto finish;
+ }
+
+ /* So we tracked a file under this name, but it has a different inode/device. In that case, it got
+ * replaced (probably due to rotation?), let's drop it hence from our list. */
+ remove_file_real(j, f);
+ f = NULL;
+ }
+
+ if (ordered_hashmap_size(j->files) >= JOURNAL_FILES_MAX) {
+ log_debug("Too many open journal files, not adding %s.", path);
+ r = -ETOOMANYREFS;
+ goto finish;
+ }
+
+ r = journal_file_open(fd, path, O_RDONLY, 0, false, 0, false, NULL, j->mmap, NULL, NULL, &f);
+ if (r < 0) {
+ log_debug_errno(r, "Failed to open journal file %s: %m", path);
+ goto finish;
+ }
+
+ /* journal_file_dump(f); */
+
+ r = ordered_hashmap_put(j->files, f->path, f);
+ if (r < 0) {
+ f->close_fd = false; /* make sure journal_file_close() doesn't close the caller's fd (or our own). We'll let the caller do that, or ourselves */
+ (void) journal_file_close(f);
+ goto finish;
+ }
+
+ close_fd = false; /* the fd is now owned by the JournalFile object */
+
+ f->last_seen_generation = j->generation;
+
+ track_file_disposition(j, f);
+ check_network(j, f->fd);
+
+ j->current_invalidate_counter++;
+
+ log_debug("File %s added.", f->path);
+
+ r = 0;
+
+finish:
+ if (close_fd)
+ safe_close(fd);
+
+ if (r < 0) {
+ k = journal_put_error(j, r, path);
+ if (k < 0)
+ return k;
+ }
+
+ return r;
+}
+
+static int add_file_by_name(
+ sd_journal *j,
+ const char *prefix,
+ const char *filename) {
+
+ const char *path;
+
+ assert(j);
+ assert(prefix);
+ assert(filename);
+
+ if (j->no_new_files)
+ return 0;
+
+ if (!file_type_wanted(j->flags, filename))
+ return 0;
+
+ path = strjoina(prefix, "/", filename);
+ return add_any_file(j, -1, path);
+}
+
+static void remove_file_by_name(
+ sd_journal *j,
+ const char *prefix,
+ const char *filename) {
+
+ const char *path;
+ JournalFile *f;
+
+ assert(j);
+ assert(prefix);
+ assert(filename);
+
+ path = strjoina(prefix, "/", filename);
+ f = ordered_hashmap_get(j->files, path);
+ if (!f)
+ return;
+
+ remove_file_real(j, f);
+}
+
+static void remove_file_real(sd_journal *j, JournalFile *f) {
+ assert(j);
+ assert(f);
+
+ (void) ordered_hashmap_remove(j->files, f->path);
+
+ log_debug("File %s removed.", f->path);
+
+ if (j->current_file == f) {
+ j->current_file = NULL;
+ j->current_field = 0;
+ }
+
+ if (j->unique_file == f) {
+ /* Jump to the next unique_file or NULL if that one was last */
+ j->unique_file = ordered_hashmap_next(j->files, j->unique_file->path);
+ j->unique_offset = 0;
+ if (!j->unique_file)
+ j->unique_file_lost = true;
+ }
+
+ if (j->fields_file == f) {
+ j->fields_file = ordered_hashmap_next(j->files, j->fields_file->path);
+ j->fields_offset = 0;
+ if (!j->fields_file)
+ j->fields_file_lost = true;
+ }
+
+ (void) journal_file_close(f);
+
+ j->current_invalidate_counter++;
+}
+
+static int dirname_is_machine_id(const char *fn) {
+ sd_id128_t id, machine;
+ int r;
+
+ r = sd_id128_get_machine(&machine);
+ if (r < 0)
+ return r;
+
+ r = sd_id128_from_string(fn, &id);
+ if (r < 0)
+ return r;
+
+ return sd_id128_equal(id, machine);
+}
+
+static bool dirent_is_journal_file(const struct dirent *de) {
+ assert(de);
+
+ if (!IN_SET(de->d_type, DT_REG, DT_LNK, DT_UNKNOWN))
+ return false;
+
+ return endswith(de->d_name, ".journal") ||
+ endswith(de->d_name, ".journal~");
+}
+
+static bool dirent_is_id128_subdir(const struct dirent *de) {
+ assert(de);
+
+ if (!IN_SET(de->d_type, DT_DIR, DT_LNK, DT_UNKNOWN))
+ return false;
+
+ return id128_is_valid(de->d_name);
+}
+
+static int directory_open(sd_journal *j, const char *path, DIR **ret) {
+ DIR *d;
+
+ assert(j);
+ assert(path);
+ assert(ret);
+
+ if (j->toplevel_fd < 0)
+ d = opendir(path);
+ else
+ /* Open the specified directory relative to the toplevel fd. Enforce that the path specified is
+ * relative, by dropping the initial slash */
+ d = xopendirat(j->toplevel_fd, skip_slash(path), 0);
+ if (!d)
+ return -errno;
+
+ *ret = d;
+ return 0;
+}
+
+static int add_directory(sd_journal *j, const char *prefix, const char *dirname);
+
+static void directory_enumerate(sd_journal *j, Directory *m, DIR *d) {
+ struct dirent *de;
+
+ assert(j);
+ assert(m);
+ assert(d);
+
+ FOREACH_DIRENT_ALL(de, d, goto fail) {
+
+ if (dirent_is_journal_file(de))
+ (void) add_file_by_name(j, m->path, de->d_name);
+
+ if (m->is_root && dirent_is_id128_subdir(de))
+ (void) add_directory(j, m->path, de->d_name);
+ }
+
+ return;
+
+fail:
+ log_debug_errno(errno, "Failed to enumerate directory %s, ignoring: %m", m->path);
+}
+
+static void directory_watch(sd_journal *j, Directory *m, int fd, uint32_t mask) {
+ int r;
+
+ assert(j);
+ assert(m);
+ assert(fd >= 0);
+
+ /* Watch this directory if that's enabled and if it not being watched yet. */
+
+ if (m->wd > 0) /* Already have a watch? */
+ return;
+ if (j->inotify_fd < 0) /* Not watching at all? */
+ return;
+
+ m->wd = inotify_add_watch_fd(j->inotify_fd, fd, mask);
+ if (m->wd < 0) {
+ log_debug_errno(errno, "Failed to watch journal directory '%s', ignoring: %m", m->path);
+ return;
+ }
+
+ r = hashmap_put(j->directories_by_wd, INT_TO_PTR(m->wd), m);
+ if (r == -EEXIST)
+ log_debug_errno(r, "Directory '%s' already being watched under a different path, ignoring: %m", m->path);
+ if (r < 0) {
+ log_debug_errno(r, "Failed to add watch for journal directory '%s' to hashmap, ignoring: %m", m->path);
+ (void) inotify_rm_watch(j->inotify_fd, m->wd);
+ m->wd = -1;
+ }
+}
+
+static int add_directory(sd_journal *j, const char *prefix, const char *dirname) {
+ _cleanup_free_ char *path = NULL;
+ _cleanup_closedir_ DIR *d = NULL;
+ Directory *m;
+ int r, k;
+
+ assert(j);
+ assert(prefix);
+
+ /* Adds a journal file directory to watch. If the directory is already tracked this updates the inotify watch
+ * and reenumerates directory contents */
+
+ if (dirname)
+ path = strjoin(prefix, "/", dirname);
+ else
+ path = strdup(prefix);
+ if (!path) {
+ r = -ENOMEM;
+ goto fail;
+ }
+
+ log_debug("Considering directory '%s'.", path);
+
+ /* We consider everything local that is in a directory for the local machine ID, or that is stored in /run */
+ if ((j->flags & SD_JOURNAL_LOCAL_ONLY) &&
+ !((dirname && dirname_is_machine_id(dirname) > 0) || path_has_prefix(j, path, "/run")))
+ return 0;
+
+ r = directory_open(j, path, &d);
+ if (r < 0) {
+ log_debug_errno(r, "Failed to open directory '%s': %m", path);
+ goto fail;
+ }
+
+ m = hashmap_get(j->directories_by_path, path);
+ if (!m) {
+ m = new0(Directory, 1);
+ if (!m) {
+ r = -ENOMEM;
+ goto fail;
+ }
+
+ m->is_root = false;
+ m->path = path;
+
+ if (hashmap_put(j->directories_by_path, m->path, m) < 0) {
+ free(m);
+ r = -ENOMEM;
+ goto fail;
+ }
+
+ path = NULL; /* avoid freeing in cleanup */
+ j->current_invalidate_counter++;
+
+ log_debug("Directory %s added.", m->path);
+
+ } else if (m->is_root)
+ return 0; /* Don't 'downgrade' from root directory */
+
+ m->last_seen_generation = j->generation;
+
+ directory_watch(j, m, dirfd(d),
+ IN_CREATE|IN_MOVED_TO|IN_MODIFY|IN_ATTRIB|IN_DELETE|
+ IN_DELETE_SELF|IN_MOVE_SELF|IN_UNMOUNT|IN_MOVED_FROM|
+ IN_ONLYDIR);
+
+ if (!j->no_new_files)
+ directory_enumerate(j, m, d);
+
+ check_network(j, dirfd(d));
+
+ return 0;
+
+fail:
+ k = journal_put_error(j, r, path ?: prefix);
+ if (k < 0)
+ return k;
+
+ return r;
+}
+
+static int add_root_directory(sd_journal *j, const char *p, bool missing_ok) {
+
+ _cleanup_closedir_ DIR *d = NULL;
+ Directory *m;
+ int r, k;
+
+ assert(j);
+
+ /* Adds a root directory to our set of directories to use. If the root directory is already in the set, we
+ * update the inotify logic, and renumerate the directory entries. This call may hence be called to initially
+ * populate the set, as well as to update it later. */
+
+ if (p) {
+ /* If there's a path specified, use it. */
+
+ log_debug("Considering root directory '%s'.", p);
+
+ if ((j->flags & SD_JOURNAL_RUNTIME_ONLY) &&
+ !path_has_prefix(j, p, "/run"))
+ return -EINVAL;
+
+ if (j->prefix)
+ p = strjoina(j->prefix, p);
+
+ r = directory_open(j, p, &d);
+ if (r == -ENOENT && missing_ok)
+ return 0;
+ if (r < 0) {
+ log_debug_errno(r, "Failed to open root directory %s: %m", p);
+ goto fail;
+ }
+ } else {
+ int dfd;
+
+ /* If there's no path specified, then we use the top-level fd itself. We duplicate the fd here, since
+ * opendir() will take possession of the fd, and close it, which we don't want. */
+
+ p = "."; /* store this as "." in the directories hashmap */
+
+ dfd = fcntl(j->toplevel_fd, F_DUPFD_CLOEXEC, 3);
+ if (dfd < 0) {
+ r = -errno;
+ goto fail;
+ }
+
+ d = fdopendir(dfd);
+ if (!d) {
+ r = -errno;
+ safe_close(dfd);
+ goto fail;
+ }
+
+ rewinddir(d);
+ }
+
+ m = hashmap_get(j->directories_by_path, p);
+ if (!m) {
+ m = new0(Directory, 1);
+ if (!m) {
+ r = -ENOMEM;
+ goto fail;
+ }
+
+ m->is_root = true;
+
+ m->path = strdup(p);
+ if (!m->path) {
+ free(m);
+ r = -ENOMEM;
+ goto fail;
+ }
+
+ if (hashmap_put(j->directories_by_path, m->path, m) < 0) {
+ free(m->path);
+ free(m);
+ r = -ENOMEM;
+ goto fail;
+ }
+
+ j->current_invalidate_counter++;
+
+ log_debug("Root directory %s added.", m->path);
+
+ } else if (!m->is_root)
+ return 0;
+
+ directory_watch(j, m, dirfd(d),
+ IN_CREATE|IN_MOVED_TO|IN_MODIFY|IN_ATTRIB|IN_DELETE|
+ IN_ONLYDIR);
+
+ if (!j->no_new_files)
+ directory_enumerate(j, m, d);
+
+ check_network(j, dirfd(d));
+
+ return 0;
+
+fail:
+ k = journal_put_error(j, r, p);
+ if (k < 0)
+ return k;
+
+ return r;
+}
+
+static void remove_directory(sd_journal *j, Directory *d) {
+ assert(j);
+
+ if (d->wd > 0) {
+ hashmap_remove(j->directories_by_wd, INT_TO_PTR(d->wd));
+
+ if (j->inotify_fd >= 0)
+ inotify_rm_watch(j->inotify_fd, d->wd);
+ }
+
+ hashmap_remove(j->directories_by_path, d->path);
+
+ if (d->is_root)
+ log_debug("Root directory %s removed.", d->path);
+ else
+ log_debug("Directory %s removed.", d->path);
+
+ free(d->path);
+ free(d);
+}
+
+static int add_search_paths(sd_journal *j) {
+
+ static const char search_paths[] =
+ "/run/log/journal\0"
+ "/var/log/journal\0";
+ const char *p;
+
+ assert(j);
+
+ /* We ignore most errors here, since the idea is to only open
+ * what's actually accessible, and ignore the rest. */
+
+ NULSTR_FOREACH(p, search_paths)
+ (void) add_root_directory(j, p, true);
+
+ if (!(j->flags & SD_JOURNAL_LOCAL_ONLY))
+ (void) add_root_directory(j, "/var/log/journal/remote", true);
+
+ return 0;
+}
+
+static int add_current_paths(sd_journal *j) {
+ Iterator i;
+ JournalFile *f;
+
+ assert(j);
+ assert(j->no_new_files);
+
+ /* Simply adds all directories for files we have open as directories. We don't expect errors here, so we
+ * treat them as fatal. */
+
+ ORDERED_HASHMAP_FOREACH(f, j->files, i) {
+ _cleanup_free_ char *dir;
+ int r;
+
+ dir = dirname_malloc(f->path);
+ if (!dir)
+ return -ENOMEM;
+
+ r = add_directory(j, dir, NULL);
+ if (r < 0)
+ return r;
+ }
+
+ return 0;
+}
+
+static int allocate_inotify(sd_journal *j) {
+ assert(j);
+
+ if (j->inotify_fd < 0) {
+ j->inotify_fd = inotify_init1(IN_NONBLOCK|IN_CLOEXEC);
+ if (j->inotify_fd < 0)
+ return -errno;
+ }
+
+ return hashmap_ensure_allocated(&j->directories_by_wd, NULL);
+}
+
+static sd_journal *journal_new(int flags, const char *path) {
+ _cleanup_(sd_journal_closep) sd_journal *j = NULL;
+
+ j = new0(sd_journal, 1);
+ if (!j)
+ return NULL;
+
+ j->original_pid = getpid_cached();
+ j->toplevel_fd = -1;
+ j->inotify_fd = -1;
+ j->flags = flags;
+ j->data_threshold = DEFAULT_DATA_THRESHOLD;
+
+ if (path) {
+ char *t;
+
+ t = strdup(path);
+ if (!t)
+ return NULL;
+
+ if (flags & SD_JOURNAL_OS_ROOT)
+ j->prefix = t;
+ else
+ j->path = t;
+ }
+
+ j->files = ordered_hashmap_new(&path_hash_ops);
+ if (!j->files)
+ return NULL;
+
+ j->files_cache = ordered_hashmap_iterated_cache_new(j->files);
+ j->directories_by_path = hashmap_new(&path_hash_ops);
+ j->mmap = mmap_cache_new();
+ if (!j->files_cache || !j->directories_by_path || !j->mmap)
+ return NULL;
+
+ return TAKE_PTR(j);
+}
+
+#define OPEN_ALLOWED_FLAGS \
+ (SD_JOURNAL_LOCAL_ONLY | \
+ SD_JOURNAL_RUNTIME_ONLY | \
+ SD_JOURNAL_SYSTEM | SD_JOURNAL_CURRENT_USER)
+
+_public_ int sd_journal_open(sd_journal **ret, int flags) {
+ _cleanup_(sd_journal_closep) sd_journal *j = NULL;
+ int r;
+
+ assert_return(ret, -EINVAL);
+ assert_return((flags & ~OPEN_ALLOWED_FLAGS) == 0, -EINVAL);
+
+ j = journal_new(flags, NULL);
+ if (!j)
+ return -ENOMEM;
+
+ r = add_search_paths(j);
+ if (r < 0)
+ return r;
+
+ *ret = TAKE_PTR(j);
+ return 0;
+}
+
+#define OPEN_CONTAINER_ALLOWED_FLAGS \
+ (SD_JOURNAL_LOCAL_ONLY | SD_JOURNAL_SYSTEM)
+
+_public_ int sd_journal_open_container(sd_journal **ret, const char *machine, int flags) {
+ _cleanup_free_ char *root = NULL, *class = NULL;
+ _cleanup_(sd_journal_closep) sd_journal *j = NULL;
+ char *p;
+ int r;
+
+ /* This is pretty much deprecated, people should use machined's OpenMachineRootDirectory() call instead in
+ * combination with sd_journal_open_directory_fd(). */
+
+ assert_return(machine, -EINVAL);
+ assert_return(ret, -EINVAL);
+ assert_return((flags & ~OPEN_CONTAINER_ALLOWED_FLAGS) == 0, -EINVAL);
+ assert_return(machine_name_is_valid(machine), -EINVAL);
+
+ p = strjoina("/run/systemd/machines/", machine);
+ r = parse_env_file(NULL, p,
+ "ROOT", &root,
+ "CLASS", &class);
+ if (r == -ENOENT)
+ return -EHOSTDOWN;
+ if (r < 0)
+ return r;
+ if (!root)
+ return -ENODATA;
+
+ if (!streq_ptr(class, "container"))
+ return -EIO;
+
+ j = journal_new(flags, root);
+ if (!j)
+ return -ENOMEM;
+
+ r = add_search_paths(j);
+ if (r < 0)
+ return r;
+
+ *ret = TAKE_PTR(j);
+ return 0;
+}
+
+#define OPEN_DIRECTORY_ALLOWED_FLAGS \
+ (SD_JOURNAL_OS_ROOT | \
+ SD_JOURNAL_SYSTEM | SD_JOURNAL_CURRENT_USER )
+
+_public_ int sd_journal_open_directory(sd_journal **ret, const char *path, int flags) {
+ _cleanup_(sd_journal_closep) sd_journal *j = NULL;
+ int r;
+
+ assert_return(ret, -EINVAL);
+ assert_return(path, -EINVAL);
+ assert_return((flags & ~OPEN_DIRECTORY_ALLOWED_FLAGS) == 0, -EINVAL);
+
+ j = journal_new(flags, path);
+ if (!j)
+ return -ENOMEM;
+
+ if (flags & SD_JOURNAL_OS_ROOT)
+ r = add_search_paths(j);
+ else
+ r = add_root_directory(j, path, false);
+ if (r < 0)
+ return r;
+
+ *ret = TAKE_PTR(j);
+ return 0;
+}
+
+_public_ int sd_journal_open_files(sd_journal **ret, const char **paths, int flags) {
+ _cleanup_(sd_journal_closep) sd_journal *j = NULL;
+ const char **path;
+ int r;
+
+ assert_return(ret, -EINVAL);
+ assert_return(flags == 0, -EINVAL);
+
+ j = journal_new(flags, NULL);
+ if (!j)
+ return -ENOMEM;
+
+ STRV_FOREACH(path, paths) {
+ r = add_any_file(j, -1, *path);
+ if (r < 0)
+ return r;
+ }
+
+ j->no_new_files = true;
+
+ *ret = TAKE_PTR(j);
+ return 0;
+}
+
+#define OPEN_DIRECTORY_FD_ALLOWED_FLAGS \
+ (SD_JOURNAL_OS_ROOT | \
+ SD_JOURNAL_SYSTEM | SD_JOURNAL_CURRENT_USER )
+
+_public_ int sd_journal_open_directory_fd(sd_journal **ret, int fd, int flags) {
+ _cleanup_(sd_journal_closep) sd_journal *j = NULL;
+ struct stat st;
+ int r;
+
+ assert_return(ret, -EINVAL);
+ assert_return(fd >= 0, -EBADF);
+ assert_return((flags & ~OPEN_DIRECTORY_FD_ALLOWED_FLAGS) == 0, -EINVAL);
+
+ if (fstat(fd, &st) < 0)
+ return -errno;
+
+ if (!S_ISDIR(st.st_mode))
+ return -EBADFD;
+
+ j = journal_new(flags, NULL);
+ if (!j)
+ return -ENOMEM;
+
+ j->toplevel_fd = fd;
+
+ if (flags & SD_JOURNAL_OS_ROOT)
+ r = add_search_paths(j);
+ else
+ r = add_root_directory(j, NULL, false);
+ if (r < 0)
+ return r;
+
+ *ret = TAKE_PTR(j);
+ return 0;
+}
+
+_public_ int sd_journal_open_files_fd(sd_journal **ret, int fds[], unsigned n_fds, int flags) {
+ Iterator iterator;
+ JournalFile *f;
+ _cleanup_(sd_journal_closep) sd_journal *j = NULL;
+ unsigned i;
+ int r;
+
+ assert_return(ret, -EINVAL);
+ assert_return(n_fds > 0, -EBADF);
+ assert_return(flags == 0, -EINVAL);
+
+ j = journal_new(flags, NULL);
+ if (!j)
+ return -ENOMEM;
+
+ for (i = 0; i < n_fds; i++) {
+ struct stat st;
+
+ if (fds[i] < 0) {
+ r = -EBADF;
+ goto fail;
+ }
+
+ if (fstat(fds[i], &st) < 0) {
+ r = -errno;
+ goto fail;
+ }
+
+ r = stat_verify_regular(&st);
+ if (r < 0)
+ goto fail;
+
+ r = add_any_file(j, fds[i], NULL);
+ if (r < 0)
+ goto fail;
+ }
+
+ j->no_new_files = true;
+ j->no_inotify = true;
+
+ *ret = TAKE_PTR(j);
+ return 0;
+
+fail:
+ /* If we fail, make sure we don't take possession of the files we managed to make use of successfully, and they
+ * remain open */
+ ORDERED_HASHMAP_FOREACH(f, j->files, iterator)
+ f->close_fd = false;
+
+ return r;
+}
+
+_public_ void sd_journal_close(sd_journal *j) {
+ Directory *d;
+
+ if (!j)
+ return;
+
+ sd_journal_flush_matches(j);
+
+ ordered_hashmap_free_with_destructor(j->files, journal_file_close);
+ iterated_cache_free(j->files_cache);
+
+ while ((d = hashmap_first(j->directories_by_path)))
+ remove_directory(j, d);
+
+ while ((d = hashmap_first(j->directories_by_wd)))
+ remove_directory(j, d);
+
+ hashmap_free(j->directories_by_path);
+ hashmap_free(j->directories_by_wd);
+
+ safe_close(j->inotify_fd);
+
+ if (j->mmap) {
+ log_debug("mmap cache statistics: %u hit, %u miss", mmap_cache_get_hit(j->mmap), mmap_cache_get_missed(j->mmap));
+ mmap_cache_unref(j->mmap);
+ }
+
+ hashmap_free_free(j->errors);
+
+ free(j->path);
+ free(j->prefix);
+ free(j->unique_field);
+ free(j->fields_buffer);
+ free(j);
+}
+
+_public_ int sd_journal_get_realtime_usec(sd_journal *j, uint64_t *ret) {
+ Object *o;
+ JournalFile *f;
+ int r;
+
+ assert_return(j, -EINVAL);
+ assert_return(!journal_pid_changed(j), -ECHILD);
+ assert_return(ret, -EINVAL);
+
+ f = j->current_file;
+ if (!f)
+ return -EADDRNOTAVAIL;
+
+ if (f->current_offset <= 0)
+ return -EADDRNOTAVAIL;
+
+ r = journal_file_move_to_object(f, OBJECT_ENTRY, f->current_offset, &o);
+ if (r < 0)
+ return r;
+
+ *ret = le64toh(o->entry.realtime);
+ return 0;
+}
+
+_public_ int sd_journal_get_monotonic_usec(sd_journal *j, uint64_t *ret, sd_id128_t *ret_boot_id) {
+ Object *o;
+ JournalFile *f;
+ int r;
+ sd_id128_t id;
+
+ assert_return(j, -EINVAL);
+ assert_return(!journal_pid_changed(j), -ECHILD);
+
+ f = j->current_file;
+ if (!f)
+ return -EADDRNOTAVAIL;
+
+ if (f->current_offset <= 0)
+ return -EADDRNOTAVAIL;
+
+ r = journal_file_move_to_object(f, OBJECT_ENTRY, f->current_offset, &o);
+ if (r < 0)
+ return r;
+
+ if (ret_boot_id)
+ *ret_boot_id = o->entry.boot_id;
+ else {
+ r = sd_id128_get_boot(&id);
+ if (r < 0)
+ return r;
+
+ if (!sd_id128_equal(id, o->entry.boot_id))
+ return -ESTALE;
+ }
+
+ if (ret)
+ *ret = le64toh(o->entry.monotonic);
+
+ return 0;
+}
+
+static bool field_is_valid(const char *field) {
+ const char *p;
+
+ assert(field);
+
+ if (isempty(field))
+ return false;
+
+ if (startswith(field, "__"))
+ return false;
+
+ for (p = field; *p; p++) {
+
+ if (*p == '_')
+ continue;
+
+ if (*p >= 'A' && *p <= 'Z')
+ continue;
+
+ if (*p >= '0' && *p <= '9')
+ continue;
+
+ return false;
+ }
+
+ return true;
+}
+
+_public_ int sd_journal_get_data(sd_journal *j, const char *field, const void **data, size_t *size) {
+ JournalFile *f;
+ uint64_t i, n;
+ size_t field_length;
+ int r;
+ Object *o;
+
+ assert_return(j, -EINVAL);
+ assert_return(!journal_pid_changed(j), -ECHILD);
+ assert_return(field, -EINVAL);
+ assert_return(data, -EINVAL);
+ assert_return(size, -EINVAL);
+ assert_return(field_is_valid(field), -EINVAL);
+
+ f = j->current_file;
+ if (!f)
+ return -EADDRNOTAVAIL;
+
+ if (f->current_offset <= 0)
+ return -EADDRNOTAVAIL;
+
+ r = journal_file_move_to_object(f, OBJECT_ENTRY, f->current_offset, &o);
+ if (r < 0)
+ return r;
+
+ field_length = strlen(field);
+
+ n = journal_file_entry_n_items(o);
+ for (i = 0; i < n; i++) {
+ uint64_t p, l;
+ le64_t le_hash;
+ size_t t;
+ int compression;
+
+ p = le64toh(o->entry.items[i].object_offset);
+ le_hash = o->entry.items[i].hash;
+ r = journal_file_move_to_object(f, OBJECT_DATA, p, &o);
+ if (r < 0)
+ return r;
+
+ if (le_hash != o->data.hash)
+ return -EBADMSG;
+
+ l = le64toh(o->object.size) - offsetof(Object, data.payload);
+
+ compression = o->object.flags & OBJECT_COMPRESSION_MASK;
+ if (compression) {
+#if HAVE_XZ || HAVE_LZ4
+ r = decompress_startswith(compression,
+ o->data.payload, l,
+ &f->compress_buffer, &f->compress_buffer_size,
+ field, field_length, '=');
+ if (r < 0)
+ log_debug_errno(r, "Cannot decompress %s object of length %"PRIu64" at offset "OFSfmt": %m",
+ object_compressed_to_string(compression), l, p);
+ else if (r > 0) {
+
+ size_t rsize;
+
+ r = decompress_blob(compression,
+ o->data.payload, l,
+ &f->compress_buffer, &f->compress_buffer_size, &rsize,
+ j->data_threshold);
+ if (r < 0)
+ return r;
+
+ *data = f->compress_buffer;
+ *size = (size_t) rsize;
+
+ return 0;
+ }
+#else
+ return -EPROTONOSUPPORT;
+#endif
+ } else if (l >= field_length+1 &&
+ memcmp(o->data.payload, field, field_length) == 0 &&
+ o->data.payload[field_length] == '=') {
+
+ t = (size_t) l;
+
+ if ((uint64_t) t != l)
+ return -E2BIG;
+
+ *data = o->data.payload;
+ *size = t;
+
+ return 0;
+ }
+
+ r = journal_file_move_to_object(f, OBJECT_ENTRY, f->current_offset, &o);
+ if (r < 0)
+ return r;
+ }
+
+ return -ENOENT;
+}
+
+static int return_data(sd_journal *j, JournalFile *f, Object *o, const void **data, size_t *size) {
+ size_t t;
+ uint64_t l;
+ int compression;
+
+ l = le64toh(o->object.size) - offsetof(Object, data.payload);
+ t = (size_t) l;
+
+ /* We can't read objects larger than 4G on a 32bit machine */
+ if ((uint64_t) t != l)
+ return -E2BIG;
+
+ compression = o->object.flags & OBJECT_COMPRESSION_MASK;
+ if (compression) {
+#if HAVE_XZ || HAVE_LZ4
+ size_t rsize;
+ int r;
+
+ r = decompress_blob(compression,
+ o->data.payload, l, &f->compress_buffer,
+ &f->compress_buffer_size, &rsize, j->data_threshold);
+ if (r < 0)
+ return r;
+
+ *data = f->compress_buffer;
+ *size = (size_t) rsize;
+#else
+ return -EPROTONOSUPPORT;
+#endif
+ } else {
+ *data = o->data.payload;
+ *size = t;
+ }
+
+ return 0;
+}
+
+_public_ int sd_journal_enumerate_data(sd_journal *j, const void **data, size_t *size) {
+ JournalFile *f;
+ uint64_t p, n;
+ le64_t le_hash;
+ int r;
+ Object *o;
+
+ assert_return(j, -EINVAL);
+ assert_return(!journal_pid_changed(j), -ECHILD);
+ assert_return(data, -EINVAL);
+ assert_return(size, -EINVAL);
+
+ f = j->current_file;
+ if (!f)
+ return -EADDRNOTAVAIL;
+
+ if (f->current_offset <= 0)
+ return -EADDRNOTAVAIL;
+
+ r = journal_file_move_to_object(f, OBJECT_ENTRY, f->current_offset, &o);
+ if (r < 0)
+ return r;
+
+ n = journal_file_entry_n_items(o);
+ if (j->current_field >= n)
+ return 0;
+
+ p = le64toh(o->entry.items[j->current_field].object_offset);
+ le_hash = o->entry.items[j->current_field].hash;
+ r = journal_file_move_to_object(f, OBJECT_DATA, p, &o);
+ if (r < 0)
+ return r;
+
+ if (le_hash != o->data.hash)
+ return -EBADMSG;
+
+ r = return_data(j, f, o, data, size);
+ if (r < 0)
+ return r;
+
+ j->current_field++;
+
+ return 1;
+}
+
+_public_ void sd_journal_restart_data(sd_journal *j) {
+ if (!j)
+ return;
+
+ j->current_field = 0;
+}
+
+static int reiterate_all_paths(sd_journal *j) {
+ assert(j);
+
+ if (j->no_new_files)
+ return add_current_paths(j);
+
+ if (j->flags & SD_JOURNAL_OS_ROOT)
+ return add_search_paths(j);
+
+ if (j->toplevel_fd >= 0)
+ return add_root_directory(j, NULL, false);
+
+ if (j->path)
+ return add_root_directory(j, j->path, true);
+
+ return add_search_paths(j);
+}
+
+_public_ int sd_journal_get_fd(sd_journal *j) {
+ int r;
+
+ assert_return(j, -EINVAL);
+ assert_return(!journal_pid_changed(j), -ECHILD);
+
+ if (j->no_inotify)
+ return -EMEDIUMTYPE;
+
+ if (j->inotify_fd >= 0)
+ return j->inotify_fd;
+
+ r = allocate_inotify(j);
+ if (r < 0)
+ return r;
+
+ log_debug("Reiterating files to get inotify watches established.");
+
+ /* Iterate through all dirs again, to add them to the inotify */
+ r = reiterate_all_paths(j);
+ if (r < 0)
+ return r;
+
+ return j->inotify_fd;
+}
+
+_public_ int sd_journal_get_events(sd_journal *j) {
+ int fd;
+
+ assert_return(j, -EINVAL);
+ assert_return(!journal_pid_changed(j), -ECHILD);
+
+ fd = sd_journal_get_fd(j);
+ if (fd < 0)
+ return fd;
+
+ return POLLIN;
+}
+
+_public_ int sd_journal_get_timeout(sd_journal *j, uint64_t *timeout_usec) {
+ int fd;
+
+ assert_return(j, -EINVAL);
+ assert_return(!journal_pid_changed(j), -ECHILD);
+ assert_return(timeout_usec, -EINVAL);
+
+ fd = sd_journal_get_fd(j);
+ if (fd < 0)
+ return fd;
+
+ if (!j->on_network) {
+ *timeout_usec = (uint64_t) -1;
+ return 0;
+ }
+
+ /* If we are on the network we need to regularly check for
+ * changes manually */
+
+ *timeout_usec = j->last_process_usec + JOURNAL_FILES_RECHECK_USEC;
+ return 1;
+}
+
+static void process_q_overflow(sd_journal *j) {
+ JournalFile *f;
+ Directory *m;
+ Iterator i;
+
+ assert(j);
+
+ /* When the inotify queue overruns we need to enumerate and re-validate all journal files to bring our list
+ * back in sync with what's on disk. For this we pick a new generation counter value. It'll be assigned to all
+ * journal files we encounter. All journal files and all directories that don't carry it after reenumeration
+ * are subject for unloading. */
+
+ log_debug("Inotify queue overrun, reiterating everything.");
+
+ j->generation++;
+ (void) reiterate_all_paths(j);
+
+ ORDERED_HASHMAP_FOREACH(f, j->files, i) {
+
+ if (f->last_seen_generation == j->generation)
+ continue;
+
+ log_debug("File '%s' hasn't been seen in this enumeration, removing.", f->path);
+ remove_file_real(j, f);
+ }
+
+ HASHMAP_FOREACH(m, j->directories_by_path, i) {
+
+ if (m->last_seen_generation == j->generation)
+ continue;
+
+ if (m->is_root) /* Never GC root directories */
+ continue;
+
+ log_debug("Directory '%s' hasn't been seen in this enumeration, removing.", f->path);
+ remove_directory(j, m);
+ }
+
+ log_debug("Reiteration complete.");
+}
+
+static void process_inotify_event(sd_journal *j, struct inotify_event *e) {
+ Directory *d;
+
+ assert(j);
+ assert(e);
+
+ if (e->mask & IN_Q_OVERFLOW) {
+ process_q_overflow(j);
+ return;
+ }
+
+ /* Is this a subdirectory we watch? */
+ d = hashmap_get(j->directories_by_wd, INT_TO_PTR(e->wd));
+ if (d) {
+ if (!(e->mask & IN_ISDIR) && e->len > 0 &&
+ (endswith(e->name, ".journal") ||
+ endswith(e->name, ".journal~"))) {
+
+ /* Event for a journal file */
+
+ if (e->mask & (IN_CREATE|IN_MOVED_TO|IN_MODIFY|IN_ATTRIB))
+ (void) add_file_by_name(j, d->path, e->name);
+ else if (e->mask & (IN_DELETE|IN_MOVED_FROM|IN_UNMOUNT))
+ remove_file_by_name(j, d->path, e->name);
+
+ } else if (!d->is_root && e->len == 0) {
+
+ /* Event for a subdirectory */
+
+ if (e->mask & (IN_DELETE_SELF|IN_MOVE_SELF|IN_UNMOUNT))
+ remove_directory(j, d);
+
+ } else if (d->is_root && (e->mask & IN_ISDIR) && e->len > 0 && id128_is_valid(e->name)) {
+
+ /* Event for root directory */
+
+ if (e->mask & (IN_CREATE|IN_MOVED_TO|IN_MODIFY|IN_ATTRIB))
+ (void) add_directory(j, d->path, e->name);
+ }
+
+ return;
+ }
+
+ if (e->mask & IN_IGNORED)
+ return;
+
+ log_debug("Unexpected inotify event.");
+}
+
+static int determine_change(sd_journal *j) {
+ bool b;
+
+ assert(j);
+
+ b = j->current_invalidate_counter != j->last_invalidate_counter;
+ j->last_invalidate_counter = j->current_invalidate_counter;
+
+ return b ? SD_JOURNAL_INVALIDATE : SD_JOURNAL_APPEND;
+}
+
+_public_ int sd_journal_process(sd_journal *j) {
+ bool got_something = false;
+
+ assert_return(j, -EINVAL);
+ assert_return(!journal_pid_changed(j), -ECHILD);
+
+ if (j->inotify_fd < 0) /* We have no inotify fd yet? Then there's noting to process. */
+ return 0;
+
+ j->last_process_usec = now(CLOCK_MONOTONIC);
+ j->last_invalidate_counter = j->current_invalidate_counter;
+
+ for (;;) {
+ union inotify_event_buffer buffer;
+ struct inotify_event *e;
+ ssize_t l;
+
+ l = read(j->inotify_fd, &buffer, sizeof(buffer));
+ if (l < 0) {
+ if (IN_SET(errno, EAGAIN, EINTR))
+ return got_something ? determine_change(j) : SD_JOURNAL_NOP;
+
+ return -errno;
+ }
+
+ got_something = true;
+
+ FOREACH_INOTIFY_EVENT(e, buffer, l)
+ process_inotify_event(j, e);
+ }
+}
+
+_public_ int sd_journal_wait(sd_journal *j, uint64_t timeout_usec) {
+ int r;
+ uint64_t t;
+
+ assert_return(j, -EINVAL);
+ assert_return(!journal_pid_changed(j), -ECHILD);
+
+ if (j->inotify_fd < 0) {
+
+ /* This is the first invocation, hence create the
+ * inotify watch */
+ r = sd_journal_get_fd(j);
+ if (r < 0)
+ return r;
+
+ /* The journal might have changed since the context
+ * object was created and we weren't watching before,
+ * hence don't wait for anything, and return
+ * immediately. */
+ return determine_change(j);
+ }
+
+ r = sd_journal_get_timeout(j, &t);
+ if (r < 0)
+ return r;
+
+ if (t != (uint64_t) -1) {
+ usec_t n;
+
+ n = now(CLOCK_MONOTONIC);
+ t = t > n ? t - n : 0;
+
+ if (timeout_usec == (uint64_t) -1 || timeout_usec > t)
+ timeout_usec = t;
+ }
+
+ do {
+ r = fd_wait_for_event(j->inotify_fd, POLLIN, timeout_usec);
+ } while (r == -EINTR);
+
+ if (r < 0)
+ return r;
+
+ return sd_journal_process(j);
+}
+
+_public_ int sd_journal_get_cutoff_realtime_usec(sd_journal *j, uint64_t *from, uint64_t *to) {
+ Iterator i;
+ JournalFile *f;
+ bool first = true;
+ uint64_t fmin = 0, tmax = 0;
+ int r;
+
+ assert_return(j, -EINVAL);
+ assert_return(!journal_pid_changed(j), -ECHILD);
+ assert_return(from || to, -EINVAL);
+ assert_return(from != to, -EINVAL);
+
+ ORDERED_HASHMAP_FOREACH(f, j->files, i) {
+ usec_t fr, t;
+
+ r = journal_file_get_cutoff_realtime_usec(f, &fr, &t);
+ if (r == -ENOENT)
+ continue;
+ if (r < 0)
+ return r;
+ if (r == 0)
+ continue;
+
+ if (first) {
+ fmin = fr;
+ tmax = t;
+ first = false;
+ } else {
+ fmin = MIN(fr, fmin);
+ tmax = MAX(t, tmax);
+ }
+ }
+
+ if (from)
+ *from = fmin;
+ if (to)
+ *to = tmax;
+
+ return first ? 0 : 1;
+}
+
+_public_ int sd_journal_get_cutoff_monotonic_usec(sd_journal *j, sd_id128_t boot_id, uint64_t *from, uint64_t *to) {
+ Iterator i;
+ JournalFile *f;
+ bool found = false;
+ int r;
+
+ assert_return(j, -EINVAL);
+ assert_return(!journal_pid_changed(j), -ECHILD);
+ assert_return(from || to, -EINVAL);
+ assert_return(from != to, -EINVAL);
+
+ ORDERED_HASHMAP_FOREACH(f, j->files, i) {
+ usec_t fr, t;
+
+ r = journal_file_get_cutoff_monotonic_usec(f, boot_id, &fr, &t);
+ if (r == -ENOENT)
+ continue;
+ if (r < 0)
+ return r;
+ if (r == 0)
+ continue;
+
+ if (found) {
+ if (from)
+ *from = MIN(fr, *from);
+ if (to)
+ *to = MAX(t, *to);
+ } else {
+ if (from)
+ *from = fr;
+ if (to)
+ *to = t;
+ found = true;
+ }
+ }
+
+ return found;
+}
+
+void journal_print_header(sd_journal *j) {
+ Iterator i;
+ JournalFile *f;
+ bool newline = false;
+
+ assert(j);
+
+ ORDERED_HASHMAP_FOREACH(f, j->files, i) {
+ if (newline)
+ putchar('\n');
+ else
+ newline = true;
+
+ journal_file_print_header(f);
+ }
+}
+
+_public_ int sd_journal_get_usage(sd_journal *j, uint64_t *bytes) {
+ Iterator i;
+ JournalFile *f;
+ uint64_t sum = 0;
+
+ assert_return(j, -EINVAL);
+ assert_return(!journal_pid_changed(j), -ECHILD);
+ assert_return(bytes, -EINVAL);
+
+ ORDERED_HASHMAP_FOREACH(f, j->files, i) {
+ struct stat st;
+
+ if (fstat(f->fd, &st) < 0)
+ return -errno;
+
+ sum += (uint64_t) st.st_blocks * 512ULL;
+ }
+
+ *bytes = sum;
+ return 0;
+}
+
+_public_ int sd_journal_query_unique(sd_journal *j, const char *field) {
+ char *f;
+
+ assert_return(j, -EINVAL);
+ assert_return(!journal_pid_changed(j), -ECHILD);
+ assert_return(!isempty(field), -EINVAL);
+ assert_return(field_is_valid(field), -EINVAL);
+
+ f = strdup(field);
+ if (!f)
+ return -ENOMEM;
+
+ free(j->unique_field);
+ j->unique_field = f;
+ j->unique_file = NULL;
+ j->unique_offset = 0;
+ j->unique_file_lost = false;
+
+ return 0;
+}
+
+_public_ int sd_journal_enumerate_unique(sd_journal *j, const void **data, size_t *l) {
+ size_t k;
+
+ assert_return(j, -EINVAL);
+ assert_return(!journal_pid_changed(j), -ECHILD);
+ assert_return(data, -EINVAL);
+ assert_return(l, -EINVAL);
+ assert_return(j->unique_field, -EINVAL);
+
+ k = strlen(j->unique_field);
+
+ if (!j->unique_file) {
+ if (j->unique_file_lost)
+ return 0;
+
+ j->unique_file = ordered_hashmap_first(j->files);
+ if (!j->unique_file)
+ return 0;
+
+ j->unique_offset = 0;
+ }
+
+ for (;;) {
+ JournalFile *of;
+ Iterator i;
+ Object *o;
+ const void *odata;
+ size_t ol;
+ bool found;
+ int r;
+
+ /* Proceed to next data object in the field's linked list */
+ if (j->unique_offset == 0) {
+ r = journal_file_find_field_object(j->unique_file, j->unique_field, k, &o, NULL);
+ if (r < 0)
+ return r;
+
+ j->unique_offset = r > 0 ? le64toh(o->field.head_data_offset) : 0;
+ } else {
+ r = journal_file_move_to_object(j->unique_file, OBJECT_DATA, j->unique_offset, &o);
+ if (r < 0)
+ return r;
+
+ j->unique_offset = le64toh(o->data.next_field_offset);
+ }
+
+ /* We reached the end of the list? Then start again, with the next file */
+ if (j->unique_offset == 0) {
+ j->unique_file = ordered_hashmap_next(j->files, j->unique_file->path);
+ if (!j->unique_file)
+ return 0;
+
+ continue;
+ }
+
+ /* We do not use OBJECT_DATA context here, but OBJECT_UNUSED
+ * instead, so that we can look at this data object at the same
+ * time as one on another file */
+ r = journal_file_move_to_object(j->unique_file, OBJECT_UNUSED, j->unique_offset, &o);
+ if (r < 0)
+ return r;
+
+ /* Let's do the type check by hand, since we used 0 context above. */
+ if (o->object.type != OBJECT_DATA)
+ return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
+ "%s:offset " OFSfmt ": object has type %d, expected %d",
+ j->unique_file->path,
+ j->unique_offset,
+ o->object.type, OBJECT_DATA);
+
+ r = return_data(j, j->unique_file, o, &odata, &ol);
+ if (r < 0)
+ return r;
+
+ /* Check if we have at least the field name and "=". */
+ if (ol <= k)
+ return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
+ "%s:offset " OFSfmt ": object has size %zu, expected at least %zu",
+ j->unique_file->path,
+ j->unique_offset, ol, k + 1);
+
+ if (memcmp(odata, j->unique_field, k) || ((const char*) odata)[k] != '=')
+ return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
+ "%s:offset " OFSfmt ": object does not start with \"%s=\"",
+ j->unique_file->path,
+ j->unique_offset,
+ j->unique_field);
+
+ /* OK, now let's see if we already returned this data
+ * object by checking if it exists in the earlier
+ * traversed files. */
+ found = false;
+ ORDERED_HASHMAP_FOREACH(of, j->files, i) {
+ if (of == j->unique_file)
+ break;
+
+ /* Skip this file it didn't have any fields indexed */
+ if (JOURNAL_HEADER_CONTAINS(of->header, n_fields) && le64toh(of->header->n_fields) <= 0)
+ continue;
+
+ r = journal_file_find_data_object_with_hash(of, odata, ol, le64toh(o->data.hash), NULL, NULL);
+ if (r < 0)
+ return r;
+ if (r > 0) {
+ found = true;
+ break;
+ }
+ }
+
+ if (found)
+ continue;
+
+ r = return_data(j, j->unique_file, o, data, l);
+ if (r < 0)
+ return r;
+
+ return 1;
+ }
+}
+
+_public_ void sd_journal_restart_unique(sd_journal *j) {
+ if (!j)
+ return;
+
+ j->unique_file = NULL;
+ j->unique_offset = 0;
+ j->unique_file_lost = false;
+}
+
+_public_ int sd_journal_enumerate_fields(sd_journal *j, const char **field) {
+ int r;
+
+ assert_return(j, -EINVAL);
+ assert_return(!journal_pid_changed(j), -ECHILD);
+ assert_return(field, -EINVAL);
+
+ if (!j->fields_file) {
+ if (j->fields_file_lost)
+ return 0;
+
+ j->fields_file = ordered_hashmap_first(j->files);
+ if (!j->fields_file)
+ return 0;
+
+ j->fields_hash_table_index = 0;
+ j->fields_offset = 0;
+ }
+
+ for (;;) {
+ JournalFile *f, *of;
+ Iterator i;
+ uint64_t m;
+ Object *o;
+ size_t sz;
+ bool found;
+
+ f = j->fields_file;
+
+ if (j->fields_offset == 0) {
+ bool eof = false;
+
+ /* We are not yet positioned at any field. Let's pick the first one */
+ r = journal_file_map_field_hash_table(f);
+ if (r < 0)
+ return r;
+
+ m = le64toh(f->header->field_hash_table_size) / sizeof(HashItem);
+ for (;;) {
+ if (j->fields_hash_table_index >= m) {
+ /* Reached the end of the hash table, go to the next file. */
+ eof = true;
+ break;
+ }
+
+ j->fields_offset = le64toh(f->field_hash_table[j->fields_hash_table_index].head_hash_offset);
+
+ if (j->fields_offset != 0)
+ break;
+
+ /* Empty hash table bucket, go to next one */
+ j->fields_hash_table_index++;
+ }
+
+ if (eof) {
+ /* Proceed with next file */
+ j->fields_file = ordered_hashmap_next(j->files, f->path);
+ if (!j->fields_file) {
+ *field = NULL;
+ return 0;
+ }
+
+ j->fields_offset = 0;
+ j->fields_hash_table_index = 0;
+ continue;
+ }
+
+ } else {
+ /* We are already positioned at a field. If so, let's figure out the next field from it */
+
+ r = journal_file_move_to_object(f, OBJECT_FIELD, j->fields_offset, &o);
+ if (r < 0)
+ return r;
+
+ j->fields_offset = le64toh(o->field.next_hash_offset);
+ if (j->fields_offset == 0) {
+ /* Reached the end of the hash table chain */
+ j->fields_hash_table_index++;
+ continue;
+ }
+ }
+
+ /* We use OBJECT_UNUSED here, so that the iterator below doesn't remove our mmap window */
+ r = journal_file_move_to_object(f, OBJECT_UNUSED, j->fields_offset, &o);
+ if (r < 0)
+ return r;
+
+ /* Because we used OBJECT_UNUSED above, we need to do our type check manually */
+ if (o->object.type != OBJECT_FIELD)
+ return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
+ "%s:offset " OFSfmt ": object has type %i, expected %i",
+ f->path, j->fields_offset,
+ o->object.type, OBJECT_FIELD);
+
+ sz = le64toh(o->object.size) - offsetof(Object, field.payload);
+
+ /* Let's see if we already returned this field name before. */
+ found = false;
+ ORDERED_HASHMAP_FOREACH(of, j->files, i) {
+ if (of == f)
+ break;
+
+ /* Skip this file it didn't have any fields indexed */
+ if (JOURNAL_HEADER_CONTAINS(of->header, n_fields) && le64toh(of->header->n_fields) <= 0)
+ continue;
+
+ r = journal_file_find_field_object_with_hash(of, o->field.payload, sz, le64toh(o->field.hash), NULL, NULL);
+ if (r < 0)
+ return r;
+ if (r > 0) {
+ found = true;
+ break;
+ }
+ }
+
+ if (found)
+ continue;
+
+ /* Check if this is really a valid string containing no NUL byte */
+ if (memchr(o->field.payload, 0, sz))
+ return -EBADMSG;
+
+ if (sz > j->data_threshold)
+ sz = j->data_threshold;
+
+ if (!GREEDY_REALLOC(j->fields_buffer, j->fields_buffer_allocated, sz + 1))
+ return -ENOMEM;
+
+ memcpy(j->fields_buffer, o->field.payload, sz);
+ j->fields_buffer[sz] = 0;
+
+ if (!field_is_valid(j->fields_buffer))
+ return -EBADMSG;
+
+ *field = j->fields_buffer;
+ return 1;
+ }
+}
+
+_public_ void sd_journal_restart_fields(sd_journal *j) {
+ if (!j)
+ return;
+
+ j->fields_file = NULL;
+ j->fields_hash_table_index = 0;
+ j->fields_offset = 0;
+ j->fields_file_lost = false;
+}
+
+_public_ int sd_journal_reliable_fd(sd_journal *j) {
+ assert_return(j, -EINVAL);
+ assert_return(!journal_pid_changed(j), -ECHILD);
+
+ return !j->on_network;
+}
+
+static char *lookup_field(const char *field, void *userdata) {
+ sd_journal *j = userdata;
+ const void *data;
+ size_t size, d;
+ int r;
+
+ assert(field);
+ assert(j);
+
+ r = sd_journal_get_data(j, field, &data, &size);
+ if (r < 0 ||
+ size > REPLACE_VAR_MAX)
+ return strdup(field);
+
+ d = strlen(field) + 1;
+
+ return strndup((const char*) data + d, size - d);
+}
+
+_public_ int sd_journal_get_catalog(sd_journal *j, char **ret) {
+ const void *data;
+ size_t size;
+ sd_id128_t id;
+ _cleanup_free_ char *text = NULL, *cid = NULL;
+ char *t;
+ int r;
+
+ assert_return(j, -EINVAL);
+ assert_return(!journal_pid_changed(j), -ECHILD);
+ assert_return(ret, -EINVAL);
+
+ r = sd_journal_get_data(j, "MESSAGE_ID", &data, &size);
+ if (r < 0)
+ return r;
+
+ cid = strndup((const char*) data + 11, size - 11);
+ if (!cid)
+ return -ENOMEM;
+
+ r = sd_id128_from_string(cid, &id);
+ if (r < 0)
+ return r;
+
+ r = catalog_get(CATALOG_DATABASE, id, &text);
+ if (r < 0)
+ return r;
+
+ t = replace_var(text, lookup_field, j);
+ if (!t)
+ return -ENOMEM;
+
+ *ret = t;
+ return 0;
+}
+
+_public_ int sd_journal_get_catalog_for_message_id(sd_id128_t id, char **ret) {
+ assert_return(ret, -EINVAL);
+
+ return catalog_get(CATALOG_DATABASE, id, ret);
+}
+
+_public_ int sd_journal_set_data_threshold(sd_journal *j, size_t sz) {
+ assert_return(j, -EINVAL);
+ assert_return(!journal_pid_changed(j), -ECHILD);
+
+ j->data_threshold = sz;
+ return 0;
+}
+
+_public_ int sd_journal_get_data_threshold(sd_journal *j, size_t *sz) {
+ assert_return(j, -EINVAL);
+ assert_return(!journal_pid_changed(j), -ECHILD);
+ assert_return(sz, -EINVAL);
+
+ *sz = j->data_threshold;
+ return 0;
+}
+
+_public_ int sd_journal_has_runtime_files(sd_journal *j) {
+ assert_return(j, -EINVAL);
+
+ return j->has_runtime_files;
+}
+
+_public_ int sd_journal_has_persistent_files(sd_journal *j) {
+ assert_return(j, -EINVAL);
+
+ return j->has_persistent_files;
+}
diff --git a/src/journal/test-audit-type.c b/src/journal/test-audit-type.c
new file mode 100644
index 0000000..20e654b
--- /dev/null
+++ b/src/journal/test-audit-type.c
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+
+#include <stdio.h>
+#include <linux/audit.h>
+
+#include "audit-type.h"
+
+static void print_audit_label(int i) {
+ const char *name;
+
+ name = audit_type_name_alloca(i);
+ /* This is a separate function only because of alloca */
+ printf("%i → %s → %s\n", i, audit_type_to_string(i), name);
+}
+
+static void test_audit_type(void) {
+ int i;
+
+ for (i = 0; i <= AUDIT_KERNEL; i++)
+ print_audit_label(i);
+}
+
+int main(int argc, char **argv) {
+ test_audit_type();
+}
diff --git a/src/journal/test-catalog.c b/src/journal/test-catalog.c
new file mode 100644
index 0000000..192bb0c
--- /dev/null
+++ b/src/journal/test-catalog.c
@@ -0,0 +1,240 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+
+#include <errno.h>
+#include <fcntl.h>
+#include <locale.h>
+#include <unistd.h>
+
+#include "sd-messages.h"
+
+#include "alloc-util.h"
+#include "catalog.h"
+#include "fd-util.h"
+#include "fs-util.h"
+#include "log.h"
+#include "macro.h"
+#include "path-util.h"
+#include "string-util.h"
+#include "strv.h"
+#include "tests.h"
+#include "tmpfile-util.h"
+#include "util.h"
+
+static char** catalog_dirs = NULL;
+static const char *no_catalog_dirs[] = {
+ "/bin/hopefully/with/no/catalog",
+ NULL
+};
+
+static Hashmap* test_import(const char* contents, ssize_t size, int code) {
+ _cleanup_(unlink_tempfilep) char name[] = "/tmp/test-catalog.XXXXXX";
+ _cleanup_close_ int fd;
+ Hashmap *h;
+
+ if (size < 0)
+ size = strlen(contents);
+
+ assert_se(h = hashmap_new(&catalog_hash_ops));
+
+ fd = mkostemp_safe(name);
+ assert_se(fd >= 0);
+ assert_se(write(fd, contents, size) == size);
+
+ assert_se(catalog_import_file(h, name) == code);
+
+ return h;
+}
+
+static void test_catalog_import_invalid(void) {
+ _cleanup_hashmap_free_free_free_ Hashmap *h = NULL;
+
+ h = test_import("xxx", -1, -EINVAL);
+ assert_se(hashmap_isempty(h));
+}
+
+static void test_catalog_import_badid(void) {
+ _cleanup_hashmap_free_free_free_ Hashmap *h = NULL;
+ const char *input =
+"-- 0027229ca0644181a76c4e92458afaff dededededededededededededededede\n" \
+"Subject: message\n" \
+"\n" \
+"payload\n";
+ h = test_import(input, -1, -EINVAL);
+}
+
+static void test_catalog_import_one(void) {
+ _cleanup_hashmap_free_free_free_ Hashmap *h = NULL;
+ char *payload;
+ Iterator j;
+
+ const char *input =
+"-- 0027229ca0644181a76c4e92458afaff dededededededededededededededed\n" \
+"Subject: message\n" \
+"\n" \
+"payload\n";
+ const char *expect =
+"Subject: message\n" \
+"\n" \
+"payload\n";
+
+ h = test_import(input, -1, 0);
+ assert_se(hashmap_size(h) == 1);
+
+ HASHMAP_FOREACH(payload, h, j) {
+ printf("expect: %s\n", expect);
+ printf("actual: %s\n", payload);
+ assert_se(streq(expect, payload));
+ }
+}
+
+static void test_catalog_import_merge(void) {
+ _cleanup_hashmap_free_free_free_ Hashmap *h = NULL;
+ char *payload;
+ Iterator j;
+
+ const char *input =
+"-- 0027229ca0644181a76c4e92458afaff dededededededededededededededed\n" \
+"Subject: message\n" \
+"Defined-By: me\n" \
+"\n" \
+"payload\n" \
+"\n" \
+"-- 0027229ca0644181a76c4e92458afaff dededededededededededededededed\n" \
+"Subject: override subject\n" \
+"X-Header: hello\n" \
+"\n" \
+"override payload\n";
+
+ const char *combined =
+"Subject: override subject\n" \
+"X-Header: hello\n" \
+"Subject: message\n" \
+"Defined-By: me\n" \
+"\n" \
+"override payload\n";
+
+ h = test_import(input, -1, 0);
+ assert_se(hashmap_size(h) == 1);
+
+ HASHMAP_FOREACH(payload, h, j) {
+ assert_se(streq(combined, payload));
+ }
+}
+
+static void test_catalog_import_merge_no_body(void) {
+ _cleanup_hashmap_free_free_free_ Hashmap *h = NULL;
+ char *payload;
+ Iterator j;
+
+ const char *input =
+"-- 0027229ca0644181a76c4e92458afaff dededededededededededededededed\n" \
+"Subject: message\n" \
+"Defined-By: me\n" \
+"\n" \
+"payload\n" \
+"\n" \
+"-- 0027229ca0644181a76c4e92458afaff dededededededededededededededed\n" \
+"Subject: override subject\n" \
+"X-Header: hello\n" \
+"\n";
+
+ const char *combined =
+"Subject: override subject\n" \
+"X-Header: hello\n" \
+"Subject: message\n" \
+"Defined-By: me\n" \
+"\n" \
+"payload\n";
+
+ h = test_import(input, -1, 0);
+ assert_se(hashmap_size(h) == 1);
+
+ HASHMAP_FOREACH(payload, h, j) {
+ assert_se(streq(combined, payload));
+ }
+}
+
+static void test_catalog_update(const char *database) {
+ int r;
+
+ /* Test what happens if there are no files. */
+ r = catalog_update(database, NULL, NULL);
+ assert_se(r == 0);
+
+ /* Test what happens if there are no files in the directory. */
+ r = catalog_update(database, NULL, no_catalog_dirs);
+ assert_se(r == 0);
+
+ /* Make sure that we at least have some files loaded or the
+ * catalog_list below will fail. */
+ r = catalog_update(database, NULL, (const char * const *) catalog_dirs);
+ assert_se(r == 0);
+}
+
+static void test_catalog_file_lang(void) {
+ _cleanup_free_ char *lang = NULL, *lang2 = NULL, *lang3 = NULL, *lang4 = NULL;
+
+ assert_se(catalog_file_lang("systemd.de_DE.catalog", &lang) == 1);
+ assert_se(streq(lang, "de_DE"));
+
+ assert_se(catalog_file_lang("systemd..catalog", &lang2) == 0);
+ assert_se(lang2 == NULL);
+
+ assert_se(catalog_file_lang("systemd.fr.catalog", &lang2) == 1);
+ assert_se(streq(lang2, "fr"));
+
+ assert_se(catalog_file_lang("systemd.fr.catalog.gz", &lang3) == 0);
+ assert_se(lang3 == NULL);
+
+ assert_se(catalog_file_lang("systemd.01234567890123456789012345678901.catalog", &lang3) == 0);
+ assert_se(lang3 == NULL);
+
+ assert_se(catalog_file_lang("systemd.0123456789012345678901234567890.catalog", &lang3) == 1);
+ assert_se(streq(lang3, "0123456789012345678901234567890"));
+
+ assert_se(catalog_file_lang("/x/y/systemd.catalog", &lang4) == 0);
+ assert_se(lang4 == NULL);
+
+ assert_se(catalog_file_lang("/x/y/systemd.ru_RU.catalog", &lang4) == 1);
+ assert_se(streq(lang4, "ru_RU"));
+}
+
+int main(int argc, char *argv[]) {
+ _cleanup_(unlink_tempfilep) char database[] = "/tmp/test-catalog.XXXXXX";
+ _cleanup_free_ char *text = NULL;
+ int r;
+
+ setlocale(LC_ALL, "de_DE.UTF-8");
+
+ test_setup_logging(LOG_DEBUG);
+
+ /* If test-catalog is located at the build directory, then use catalogs in that.
+ * If it is not, e.g. installed by systemd-tests package, then use installed catalogs. */
+ catalog_dirs = STRV_MAKE(get_catalog_dir());
+
+ assert_se(access(catalog_dirs[0], F_OK) >= 0);
+ log_notice("Using catalog directory '%s'", catalog_dirs[0]);
+
+ test_catalog_file_lang();
+
+ test_catalog_import_invalid();
+ test_catalog_import_badid();
+ test_catalog_import_one();
+ test_catalog_import_merge();
+ test_catalog_import_merge_no_body();
+
+ assert_se(mkostemp_safe(database) >= 0);
+
+ test_catalog_update(database);
+
+ r = catalog_list(stdout, database, true);
+ assert_se(r >= 0);
+
+ r = catalog_list(stdout, database, false);
+ assert_se(r >= 0);
+
+ assert_se(catalog_get(database, SD_MESSAGE_COREDUMP, &text) >= 0);
+ printf(">>>%s<<<\n", text);
+
+ return 0;
+}
diff --git a/src/journal/test-compress-benchmark.c b/src/journal/test-compress-benchmark.c
new file mode 100644
index 0000000..7f13b61
--- /dev/null
+++ b/src/journal/test-compress-benchmark.c
@@ -0,0 +1,175 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+
+#include "alloc-util.h"
+#include "compress.h"
+#include "env-util.h"
+#include "macro.h"
+#include "parse-util.h"
+#include "process-util.h"
+#include "random-util.h"
+#include "string-util.h"
+#include "tests.h"
+#include "util.h"
+
+typedef int (compress_t)(const void *src, uint64_t src_size, void *dst,
+ size_t dst_alloc_size, size_t *dst_size);
+typedef int (decompress_t)(const void *src, uint64_t src_size,
+ void **dst, size_t *dst_alloc_size, size_t* dst_size, size_t dst_max);
+
+#if HAVE_XZ || HAVE_LZ4
+
+static usec_t arg_duration;
+static size_t arg_start;
+
+#define MAX_SIZE (1024*1024LU)
+#define PRIME 1048571 /* A prime close enough to one megabyte that mod 4 == 3 */
+
+static size_t _permute(size_t x) {
+ size_t residue;
+
+ if (x >= PRIME)
+ return x;
+
+ residue = x*x % PRIME;
+ if (x <= PRIME / 2)
+ return residue;
+ else
+ return PRIME - residue;
+}
+
+static size_t permute(size_t x) {
+ return _permute((_permute(x) + arg_start) % MAX_SIZE ^ 0xFF345);
+}
+
+static char* make_buf(size_t count, const char *type) {
+ char *buf;
+ size_t i;
+
+ buf = malloc(count);
+ assert_se(buf);
+
+ if (streq(type, "zeros"))
+ memzero(buf, count);
+ else if (streq(type, "simple"))
+ for (i = 0; i < count; i++)
+ buf[i] = 'a' + i % ('z' - 'a' + 1);
+ else if (streq(type, "random")) {
+ size_t step = count / 10;
+
+ random_bytes(buf, step);
+ memzero(buf + 1*step, step);
+ random_bytes(buf + 2*step, step);
+ memzero(buf + 3*step, step);
+ random_bytes(buf + 4*step, step);
+ memzero(buf + 5*step, step);
+ random_bytes(buf + 6*step, step);
+ memzero(buf + 7*step, step);
+ random_bytes(buf + 8*step, step);
+ memzero(buf + 9*step, step);
+ } else
+ assert_not_reached("here");
+
+ return buf;
+}
+
+static void test_compress_decompress(const char* label, const char* type,
+ compress_t compress, decompress_t decompress) {
+ usec_t n, n2 = 0;
+ float dt;
+
+ _cleanup_free_ char *text, *buf;
+ _cleanup_free_ void *buf2 = NULL;
+ size_t buf2_allocated = 0;
+ size_t skipped = 0, compressed = 0, total = 0;
+
+ text = make_buf(MAX_SIZE, type);
+ buf = calloc(MAX_SIZE + 1, 1);
+ assert_se(text && buf);
+
+ n = now(CLOCK_MONOTONIC);
+
+ for (size_t i = 0; i <= MAX_SIZE; i++) {
+ size_t j = 0, k = 0, size;
+ int r;
+
+ size = permute(i);
+ if (size == 0)
+ continue;
+
+ log_debug("%s %zu %zu", type, i, size);
+
+ memzero(buf, MIN(size + 1000, MAX_SIZE));
+
+ r = compress(text, size, buf, size, &j);
+ /* assume compression must be successful except for small or random inputs */
+ assert_se(r == 0 || (size < 2048 && r == -ENOBUFS) || streq(type, "random"));
+
+ /* check for overwrites */
+ assert_se(buf[size] == 0);
+ if (r != 0) {
+ skipped += size;
+ continue;
+ }
+
+ assert_se(j > 0);
+ if (j >= size)
+ log_error("%s \"compressed\" %zu -> %zu", label, size, j);
+
+ r = decompress(buf, j, &buf2, &buf2_allocated, &k, 0);
+ assert_se(r == 0);
+ assert_se(buf2_allocated >= k);
+ assert_se(k == size);
+
+ assert_se(memcmp(text, buf2, size) == 0);
+
+ total += size;
+ compressed += j;
+
+ n2 = now(CLOCK_MONOTONIC);
+ if (n2 - n > arg_duration)
+ break;
+ }
+
+ dt = (n2-n) / 1e6;
+
+ log_info("%s/%s: compressed & decompressed %zu bytes in %.2fs (%.2fMiB/s), "
+ "mean compression %.2f%%, skipped %zu bytes",
+ label, type, total, dt,
+ total / 1024. / 1024 / dt,
+ 100 - compressed * 100. / total,
+ skipped);
+}
+#endif
+
+int main(int argc, char *argv[]) {
+#if HAVE_XZ || HAVE_LZ4
+ test_setup_logging(LOG_INFO);
+
+ if (argc >= 2) {
+ unsigned x;
+
+ assert_se(safe_atou(argv[1], &x) >= 0);
+ arg_duration = x * USEC_PER_SEC;
+ } else
+ arg_duration = slow_tests_enabled() ?
+ 2 * USEC_PER_SEC : USEC_PER_SEC / 50;
+
+ if (argc == 3)
+ (void) safe_atozu(argv[2], &arg_start);
+ else
+ arg_start = getpid_cached();
+
+ const char *i;
+ NULSTR_FOREACH(i, "zeros\0simple\0random\0") {
+#if HAVE_XZ
+ test_compress_decompress("XZ", i, compress_blob_xz, decompress_blob_xz);
+#endif
+#if HAVE_LZ4
+ test_compress_decompress("LZ4", i, compress_blob_lz4, decompress_blob_lz4);
+#endif
+ }
+ return 0;
+#else
+ return log_tests_skipped("No compression feature is enabled");
+#endif
+}
diff --git a/src/journal/test-compress.c b/src/journal/test-compress.c
new file mode 100644
index 0000000..1b050b7
--- /dev/null
+++ b/src/journal/test-compress.c
@@ -0,0 +1,341 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+
+#if HAVE_LZ4
+#include <lz4.h>
+#endif
+
+#include "alloc-util.h"
+#include "compress.h"
+#include "fd-util.h"
+#include "fs-util.h"
+#include "macro.h"
+#include "path-util.h"
+#include "random-util.h"
+#include "tests.h"
+#include "tmpfile-util.h"
+#include "util.h"
+
+#if HAVE_XZ
+# define XZ_OK 0
+#else
+# define XZ_OK -EPROTONOSUPPORT
+#endif
+
+#if HAVE_LZ4
+# define LZ4_OK 0
+#else
+# define LZ4_OK -EPROTONOSUPPORT
+#endif
+
+typedef int (compress_blob_t)(const void *src, uint64_t src_size,
+ void *dst, size_t dst_alloc_size, size_t *dst_size);
+typedef int (decompress_blob_t)(const void *src, uint64_t src_size,
+ void **dst, size_t *dst_alloc_size,
+ size_t* dst_size, size_t dst_max);
+typedef int (decompress_sw_t)(const void *src, uint64_t src_size,
+ void **buffer, size_t *buffer_size,
+ const void *prefix, size_t prefix_len,
+ uint8_t extra);
+
+typedef int (compress_stream_t)(int fdf, int fdt, uint64_t max_bytes);
+typedef int (decompress_stream_t)(int fdf, int fdt, uint64_t max_size);
+
+#if HAVE_XZ || HAVE_LZ4
+static void test_compress_decompress(int compression,
+ compress_blob_t compress,
+ decompress_blob_t decompress,
+ const char *data,
+ size_t data_len,
+ bool may_fail) {
+ char compressed[512];
+ size_t csize, usize = 0;
+ _cleanup_free_ char *decompressed = NULL;
+ int r;
+
+ log_info("/* testing %s %s blob compression/decompression */",
+ object_compressed_to_string(compression), data);
+
+ r = compress(data, data_len, compressed, sizeof(compressed), &csize);
+ if (r == -ENOBUFS) {
+ log_info_errno(r, "compression failed: %m");
+ assert_se(may_fail);
+ } else {
+ assert_se(r == 0);
+ r = decompress(compressed, csize,
+ (void **) &decompressed, &usize, &csize, 0);
+ assert_se(r == 0);
+ assert_se(decompressed);
+ assert_se(memcmp(decompressed, data, data_len) == 0);
+ }
+
+ r = decompress("garbage", 7,
+ (void **) &decompressed, &usize, &csize, 0);
+ assert_se(r < 0);
+
+ /* make sure to have the minimal lz4 compressed size */
+ r = decompress("00000000\1g", 9,
+ (void **) &decompressed, &usize, &csize, 0);
+ assert_se(r < 0);
+
+ r = decompress("\100000000g", 9,
+ (void **) &decompressed, &usize, &csize, 0);
+ assert_se(r < 0);
+
+ memzero(decompressed, usize);
+}
+
+static void test_decompress_startswith(int compression,
+ compress_blob_t compress,
+ decompress_sw_t decompress_sw,
+ const char *data,
+ size_t data_len,
+ bool may_fail) {
+
+ char *compressed;
+ _cleanup_free_ char *compressed1 = NULL, *compressed2 = NULL, *decompressed = NULL;
+ size_t csize, usize = 0, len;
+ int r;
+
+ log_info("/* testing decompress_startswith with %s on %.20s text */",
+ object_compressed_to_string(compression), data);
+
+#define BUFSIZE_1 512
+#define BUFSIZE_2 20000
+
+ compressed = compressed1 = malloc(BUFSIZE_1);
+ assert_se(compressed1);
+ r = compress(data, data_len, compressed, BUFSIZE_1, &csize);
+ if (r == -ENOBUFS) {
+ log_info_errno(r, "compression failed: %m");
+ assert_se(may_fail);
+
+ compressed = compressed2 = malloc(BUFSIZE_2);
+ assert_se(compressed2);
+ r = compress(data, data_len, compressed, BUFSIZE_2, &csize);
+ assert(r == 0);
+ }
+ assert_se(r == 0);
+
+ len = strlen(data);
+
+ r = decompress_sw(compressed, csize, (void **) &decompressed, &usize, data, len, '\0');
+ assert_se(r > 0);
+ r = decompress_sw(compressed, csize, (void **) &decompressed, &usize, data, len, 'w');
+ assert_se(r == 0);
+ r = decompress_sw(compressed, csize, (void **) &decompressed, &usize, "barbarbar", 9, ' ');
+ assert_se(r == 0);
+ r = decompress_sw(compressed, csize, (void **) &decompressed, &usize, data, len - 1, data[len-1]);
+ assert_se(r > 0);
+ r = decompress_sw(compressed, csize, (void **) &decompressed, &usize, data, len - 1, 'w');
+ assert_se(r == 0);
+ r = decompress_sw(compressed, csize, (void **) &decompressed, &usize, data, len, '\0');
+ assert_se(r > 0);
+}
+
+static void test_decompress_startswith_short(int compression,
+ compress_blob_t compress,
+ decompress_sw_t decompress_sw) {
+
+#define TEXT "HUGE=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
+
+ char buf[1024];
+ size_t i, csize;
+ int r;
+
+ log_info("/* %s with %s */", __func__, object_compressed_to_string(compression));
+
+ r = compress(TEXT, sizeof TEXT, buf, sizeof buf, &csize);
+ assert_se(r == 0);
+
+ for (i = 1; i < strlen(TEXT); i++) {
+ size_t alloc_size = i;
+ _cleanup_free_ void *buf2 = NULL;
+
+ assert_se(buf2 = malloc(i));
+
+ assert_se(decompress_sw(buf, csize, &buf2, &alloc_size, TEXT, i, TEXT[i]) == 1);
+ assert_se(decompress_sw(buf, csize, &buf2, &alloc_size, TEXT, i, 'y') == 0);
+ }
+}
+
+static void test_compress_stream(int compression,
+ const char* cat,
+ compress_stream_t compress,
+ decompress_stream_t decompress,
+ const char *srcfile) {
+
+ _cleanup_close_ int src = -1, dst = -1, dst2 = -1;
+ _cleanup_(unlink_tempfilep) char
+ pattern[] = "/tmp/systemd-test.compressed.XXXXXX",
+ pattern2[] = "/tmp/systemd-test.compressed.XXXXXX";
+ int r;
+ _cleanup_free_ char *cmd = NULL, *cmd2 = NULL;
+ struct stat st = {};
+
+ r = find_binary(cat, NULL);
+ if (r < 0) {
+ log_error_errno(r, "Skipping %s, could not find %s binary: %m", __func__, cat);
+ return;
+ }
+
+ log_debug("/* testing %s compression */",
+ object_compressed_to_string(compression));
+
+ log_debug("/* create source from %s */", srcfile);
+
+ assert_se((src = open(srcfile, O_RDONLY|O_CLOEXEC)) >= 0);
+
+ log_debug("/* test compression */");
+
+ assert_se((dst = mkostemp_safe(pattern)) >= 0);
+
+ assert_se(compress(src, dst, -1) == 0);
+
+ if (cat) {
+ assert_se(asprintf(&cmd, "%s %s | diff %s -", cat, pattern, srcfile) > 0);
+ assert_se(system(cmd) == 0);
+ }
+
+ log_debug("/* test decompression */");
+
+ assert_se((dst2 = mkostemp_safe(pattern2)) >= 0);
+
+ assert_se(stat(srcfile, &st) == 0);
+
+ assert_se(lseek(dst, 0, SEEK_SET) == 0);
+ r = decompress(dst, dst2, st.st_size);
+ assert_se(r == 0);
+
+ assert_se(asprintf(&cmd2, "diff %s %s", srcfile, pattern2) > 0);
+ assert_se(system(cmd2) == 0);
+
+ log_debug("/* test faulty decompression */");
+
+ assert_se(lseek(dst, 1, SEEK_SET) == 1);
+ r = decompress(dst, dst2, st.st_size);
+ assert_se(IN_SET(r, 0, -EBADMSG));
+
+ assert_se(lseek(dst, 0, SEEK_SET) == 0);
+ assert_se(lseek(dst2, 0, SEEK_SET) == 0);
+ r = decompress(dst, dst2, st.st_size - 1);
+ assert_se(r == -EFBIG);
+}
+#endif
+
+#if HAVE_LZ4
+static void test_lz4_decompress_partial(void) {
+ char buf[20000], buf2[100];
+ size_t buf_size = sizeof(buf), compressed;
+ int r;
+ _cleanup_free_ char *huge = NULL;
+
+#define HUGE_SIZE (4096*1024)
+ assert_se(huge = malloc(HUGE_SIZE));
+ memset(huge, 'x', HUGE_SIZE);
+ memcpy(huge, "HUGE=", 5);
+
+ r = LZ4_compress_default(huge, buf, HUGE_SIZE, buf_size);
+ assert_se(r >= 0);
+ compressed = r;
+ log_info("Compressed %i → %zu", HUGE_SIZE, compressed);
+
+ r = LZ4_decompress_safe(buf, huge, r, HUGE_SIZE);
+ assert_se(r >= 0);
+ log_info("Decompressed → %i", r);
+
+ r = LZ4_decompress_safe_partial(buf, huge,
+ compressed,
+ 12, HUGE_SIZE);
+ assert_se(r >= 0);
+ log_info("Decompressed partial %i/%i → %i", 12, HUGE_SIZE, r);
+
+ for (size_t size = 1; size < sizeof(buf2); size++) {
+ /* This failed in older lz4s but works in newer ones. */
+ r = LZ4_decompress_safe_partial(buf, buf2, compressed, size, size);
+ log_info("Decompressed partial %zu/%zu → %i (%s)", size, size, r,
+ r < 0 ? "bad" : "good");
+ if (r >= 0 && LZ4_versionNumber() >= 10803)
+ /* lz4 <= 1.8.2 should fail that test, let's only check for newer ones */
+ assert_se(memcmp(buf2, huge, r) == 0);
+ }
+}
+#endif
+
+int main(int argc, char *argv[]) {
+#if HAVE_XZ || HAVE_LZ4
+ const char text[] =
+ "text\0foofoofoofoo AAAA aaaaaaaaa ghost busters barbarbar FFF"
+ "foofoofoofoo AAAA aaaaaaaaa ghost busters barbarbar FFF";
+
+ /* The file to test compression on can be specified as the first argument */
+ const char *srcfile = argc > 1 ? argv[1] : argv[0];
+
+ char data[512] = "random\0";
+
+ char huge[4096*1024];
+ memset(huge, 'x', sizeof(huge));
+ memcpy(huge, "HUGE=", 5);
+ char_array_0(huge);
+
+ test_setup_logging(LOG_DEBUG);
+
+ random_bytes(data + 7, sizeof(data) - 7);
+
+#if HAVE_XZ
+ test_compress_decompress(OBJECT_COMPRESSED_XZ, compress_blob_xz, decompress_blob_xz,
+ text, sizeof(text), false);
+ test_compress_decompress(OBJECT_COMPRESSED_XZ, compress_blob_xz, decompress_blob_xz,
+ data, sizeof(data), true);
+
+ test_decompress_startswith(OBJECT_COMPRESSED_XZ,
+ compress_blob_xz, decompress_startswith_xz,
+ text, sizeof(text), false);
+ test_decompress_startswith(OBJECT_COMPRESSED_XZ,
+ compress_blob_xz, decompress_startswith_xz,
+ data, sizeof(data), true);
+ test_decompress_startswith(OBJECT_COMPRESSED_XZ,
+ compress_blob_xz, decompress_startswith_xz,
+ huge, sizeof(huge), true);
+
+ test_compress_stream(OBJECT_COMPRESSED_XZ, "xzcat",
+ compress_stream_xz, decompress_stream_xz, srcfile);
+
+ test_decompress_startswith_short(OBJECT_COMPRESSED_XZ, compress_blob_xz, decompress_startswith_xz);
+
+#else
+ log_info("/* XZ test skipped */");
+#endif
+
+#if HAVE_LZ4
+ test_compress_decompress(OBJECT_COMPRESSED_LZ4, compress_blob_lz4, decompress_blob_lz4,
+ text, sizeof(text), false);
+ test_compress_decompress(OBJECT_COMPRESSED_LZ4, compress_blob_lz4, decompress_blob_lz4,
+ data, sizeof(data), true);
+
+ test_decompress_startswith(OBJECT_COMPRESSED_LZ4,
+ compress_blob_lz4, decompress_startswith_lz4,
+ text, sizeof(text), false);
+ test_decompress_startswith(OBJECT_COMPRESSED_LZ4,
+ compress_blob_lz4, decompress_startswith_lz4,
+ data, sizeof(data), true);
+ test_decompress_startswith(OBJECT_COMPRESSED_LZ4,
+ compress_blob_lz4, decompress_startswith_lz4,
+ huge, sizeof(huge), true);
+
+ test_compress_stream(OBJECT_COMPRESSED_LZ4, "lz4cat",
+ compress_stream_lz4, decompress_stream_lz4, srcfile);
+
+ test_lz4_decompress_partial();
+
+ test_decompress_startswith_short(OBJECT_COMPRESSED_LZ4, compress_blob_lz4, decompress_startswith_lz4);
+
+#else
+ log_info("/* LZ4 test skipped */");
+#endif
+
+ return 0;
+#else
+ log_info("/* XZ and LZ4 tests skipped */");
+ return EXIT_TEST_SKIP;
+#endif
+}
diff --git a/src/journal/test-journal-config.c b/src/journal/test-journal-config.c
new file mode 100644
index 0000000..1482490
--- /dev/null
+++ b/src/journal/test-journal-config.c
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+
+#include <stdbool.h>
+
+#include "journald-server.h"
+
+#define _COMPRESS_PARSE_CHECK(str, enab, thresh, varname) \
+ do { \
+ JournalCompressOptions varname = {true, 111}; \
+ config_parse_compress("", "", 0, "", 0, "", 0, str, \
+ &varname, NULL); \
+ assert_se((enab) == varname.enabled); \
+ if (varname.enabled) \
+ assert_se((thresh) == varname.threshold_bytes); \
+ } while (0)
+
+#define COMPRESS_PARSE_CHECK(str, enabled, threshold) \
+ _COMPRESS_PARSE_CHECK(str, enabled, threshold, conf##__COUNTER__)
+
+static void test_config_compress(void) {
+ COMPRESS_PARSE_CHECK("yes", true, 111);
+ COMPRESS_PARSE_CHECK("no", false, 111);
+ COMPRESS_PARSE_CHECK("y", true, 111);
+ COMPRESS_PARSE_CHECK("n", false, 111);
+ COMPRESS_PARSE_CHECK("true", true, 111);
+ COMPRESS_PARSE_CHECK("false", false, 111);
+ COMPRESS_PARSE_CHECK("t", true, 111);
+ COMPRESS_PARSE_CHECK("f", false, 111);
+ COMPRESS_PARSE_CHECK("on", true, 111);
+ COMPRESS_PARSE_CHECK("off", false, 111);
+
+ /* Weird size/bool overlapping case. We preserve backward compatibility instead of assuming these are byte
+ * counts. */
+ COMPRESS_PARSE_CHECK("1", true, 111);
+ COMPRESS_PARSE_CHECK("0", false, 111);
+
+ /* IEC sizing */
+ COMPRESS_PARSE_CHECK("1B", true, 1);
+ COMPRESS_PARSE_CHECK("1K", true, 1024);
+ COMPRESS_PARSE_CHECK("1M", true, 1024 * 1024);
+ COMPRESS_PARSE_CHECK("1G", true, 1024 * 1024 * 1024);
+
+ /* Invalid Case */
+ COMPRESS_PARSE_CHECK("-1", true, 111);
+ COMPRESS_PARSE_CHECK("blah blah", true, 111);
+ COMPRESS_PARSE_CHECK("", true, (uint64_t)-1);
+}
+
+int main(int argc, char *argv[]) {
+ test_config_compress();
+
+ return 0;
+}
diff --git a/src/journal/test-journal-enum.c b/src/journal/test-journal-enum.c
new file mode 100644
index 0000000..8e83992
--- /dev/null
+++ b/src/journal/test-journal-enum.c
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+
+#include <stdio.h>
+
+#include "sd-journal.h"
+
+#include "journal-internal.h"
+#include "log.h"
+#include "macro.h"
+#include "tests.h"
+
+int main(int argc, char *argv[]) {
+ unsigned n = 0;
+ _cleanup_(sd_journal_closep) sd_journal *j = NULL;
+
+ test_setup_logging(LOG_DEBUG);
+
+ assert_se(sd_journal_open(&j, SD_JOURNAL_LOCAL_ONLY) >= 0);
+
+ assert_se(sd_journal_add_match(j, "_TRANSPORT=syslog", 0) >= 0);
+ assert_se(sd_journal_add_match(j, "_UID=0", 0) >= 0);
+
+ SD_JOURNAL_FOREACH_BACKWARDS(j) {
+ const void *d;
+ size_t l;
+
+ assert_se(sd_journal_get_data(j, "MESSAGE", &d, &l) >= 0);
+
+ printf("%.*s\n", (int) l, (char*) d);
+
+ n++;
+ if (n >= 10)
+ break;
+ }
+
+ return 0;
+}
diff --git a/src/journal/test-journal-flush.c b/src/journal/test-journal-flush.c
new file mode 100644
index 0000000..81dbc22
--- /dev/null
+++ b/src/journal/test-journal-flush.c
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+
+#include <fcntl.h>
+
+#include "sd-journal.h"
+
+#include "alloc-util.h"
+#include "journal-file.h"
+#include "journal-internal.h"
+#include "macro.h"
+#include "string-util.h"
+
+int main(int argc, char *argv[]) {
+ _cleanup_free_ char *fn = NULL;
+ char dn[] = "/var/tmp/test-journal-flush.XXXXXX";
+ JournalFile *new_journal = NULL;
+ sd_journal *j = NULL;
+ unsigned n = 0;
+ int r;
+
+ assert_se(mkdtemp(dn));
+ fn = strappend(dn, "/test.journal");
+
+ r = journal_file_open(-1, fn, O_CREAT|O_RDWR, 0644, false, 0, false, NULL, NULL, NULL, NULL, &new_journal);
+ assert_se(r >= 0);
+
+ r = sd_journal_open(&j, 0);
+ assert_se(r >= 0);
+
+ sd_journal_set_data_threshold(j, 0);
+
+ SD_JOURNAL_FOREACH(j) {
+ Object *o;
+ JournalFile *f;
+
+ f = j->current_file;
+ assert_se(f && f->current_offset > 0);
+
+ r = journal_file_move_to_object(f, OBJECT_ENTRY, f->current_offset, &o);
+ assert_se(r >= 0);
+
+ r = journal_file_copy_entry(f, new_journal, o, f->current_offset);
+ assert_se(r >= 0);
+
+ n++;
+ if (n > 10000)
+ break;
+ }
+
+ sd_journal_close(j);
+
+ (void) journal_file_close(new_journal);
+
+ unlink(fn);
+ assert_se(rmdir(dn) == 0);
+
+ return 0;
+}
diff --git a/src/journal/test-journal-init.c b/src/journal/test-journal-init.c
new file mode 100644
index 0000000..860baca
--- /dev/null
+++ b/src/journal/test-journal-init.c
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+
+#include "sd-journal.h"
+
+#include "log.h"
+#include "parse-util.h"
+#include "rm-rf.h"
+#include "tests.h"
+#include "util.h"
+
+int main(int argc, char *argv[]) {
+ sd_journal *j;
+ int r, i, I = 100;
+ char t[] = "/tmp/journal-stream-XXXXXX";
+
+ test_setup_logging(LOG_DEBUG);
+
+ if (argc >= 2) {
+ r = safe_atoi(argv[1], &I);
+ if (r < 0)
+ log_info("Could not parse loop count argument. Using default.");
+ }
+
+ log_info("Running %d loops", I);
+
+ assert_se(mkdtemp(t));
+
+ for (i = 0; i < I; i++) {
+ r = sd_journal_open(&j, SD_JOURNAL_LOCAL_ONLY);
+ assert_se(r == 0);
+
+ sd_journal_close(j);
+
+ r = sd_journal_open_directory(&j, t, 0);
+ assert_se(r == 0);
+
+ sd_journal_close(j);
+
+ j = NULL;
+ r = sd_journal_open_directory(&j, t, SD_JOURNAL_LOCAL_ONLY);
+ assert_se(r == -EINVAL);
+ assert_se(j == NULL);
+ }
+
+ assert_se(rm_rf(t, REMOVE_ROOT|REMOVE_PHYSICAL) >= 0);
+
+ return 0;
+}
diff --git a/src/journal/test-journal-interleaving.c b/src/journal/test-journal-interleaving.c
new file mode 100644
index 0000000..cf0561d
--- /dev/null
+++ b/src/journal/test-journal-interleaving.c
@@ -0,0 +1,288 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+
+#include <fcntl.h>
+#include <unistd.h>
+
+#include "sd-journal.h"
+
+#include "alloc-util.h"
+#include "io-util.h"
+#include "journal-file.h"
+#include "journal-vacuum.h"
+#include "log.h"
+#include "parse-util.h"
+#include "rm-rf.h"
+#include "tests.h"
+#include "util.h"
+
+/* This program tests skipping around in a multi-file journal. */
+
+static bool arg_keep = false;
+
+_noreturn_ static void log_assert_errno(const char *text, int error, const char *file, int line, const char *func) {
+ log_internal(LOG_CRIT, error, file, line, func,
+ "'%s' failed at %s:%u (%s): %m", text, file, line, func);
+ abort();
+}
+
+#define assert_ret(expr) \
+ do { \
+ int _r_ = (expr); \
+ if (_unlikely_(_r_ < 0)) \
+ log_assert_errno(#expr, -_r_, __FILE__, __LINE__, __PRETTY_FUNCTION__); \
+ } while (false)
+
+static JournalFile *test_open(const char *name) {
+ JournalFile *f;
+ assert_ret(journal_file_open(-1, name, O_RDWR|O_CREAT, 0644, true, (uint64_t) -1, false, NULL, NULL, NULL, NULL, &f));
+ return f;
+}
+
+static void test_close(JournalFile *f) {
+ (void) journal_file_close (f);
+}
+
+static void append_number(JournalFile *f, int n, uint64_t *seqnum) {
+ char *p;
+ dual_timestamp ts;
+ static dual_timestamp previous_ts = {};
+ struct iovec iovec[1];
+
+ dual_timestamp_get(&ts);
+
+ if (ts.monotonic <= previous_ts.monotonic)
+ ts.monotonic = previous_ts.monotonic + 1;
+
+ if (ts.realtime <= previous_ts.realtime)
+ ts.realtime = previous_ts.realtime + 1;
+
+ previous_ts = ts;
+
+ assert_se(asprintf(&p, "NUMBER=%d", n) >= 0);
+ iovec[0] = IOVEC_MAKE_STRING(p);
+ assert_ret(journal_file_append_entry(f, &ts, NULL, iovec, 1, seqnum, NULL, NULL));
+ free(p);
+}
+
+static void test_check_number (sd_journal *j, int n) {
+ const void *d;
+ _cleanup_free_ char *k;
+ size_t l;
+ int x;
+
+ assert_ret(sd_journal_get_data(j, "NUMBER", &d, &l));
+ assert_se(k = strndup(d, l));
+ printf("%s\n", k);
+
+ assert_se(safe_atoi(k + 7, &x) >= 0);
+ assert_se(n == x);
+}
+
+static void test_check_numbers_down (sd_journal *j, int count) {
+ int i;
+
+ for (i = 1; i <= count; i++) {
+ int r;
+ test_check_number(j, i);
+ assert_ret(r = sd_journal_next(j));
+ if (i == count)
+ assert_se(r == 0);
+ else
+ assert_se(r == 1);
+ }
+
+}
+
+static void test_check_numbers_up (sd_journal *j, int count) {
+ for (int i = count; i >= 1; i--) {
+ int r;
+ test_check_number(j, i);
+ assert_ret(r = sd_journal_previous(j));
+ if (i == 1)
+ assert_se(r == 0);
+ else
+ assert_se(r == 1);
+ }
+
+}
+
+static void setup_sequential(void) {
+ JournalFile *one, *two;
+ one = test_open("one.journal");
+ two = test_open("two.journal");
+ append_number(one, 1, NULL);
+ append_number(one, 2, NULL);
+ append_number(two, 3, NULL);
+ append_number(two, 4, NULL);
+ test_close(one);
+ test_close(two);
+}
+
+static void setup_interleaved(void) {
+ JournalFile *one, *two;
+ one = test_open("one.journal");
+ two = test_open("two.journal");
+ append_number(one, 1, NULL);
+ append_number(two, 2, NULL);
+ append_number(one, 3, NULL);
+ append_number(two, 4, NULL);
+ test_close(one);
+ test_close(two);
+}
+
+static void test_skip(void (*setup)(void)) {
+ char t[] = "/tmp/journal-skip-XXXXXX";
+ sd_journal *j;
+ int r;
+
+ assert_se(mkdtemp(t));
+ assert_se(chdir(t) >= 0);
+
+ setup();
+
+ /* Seek to head, iterate down.
+ */
+ assert_ret(sd_journal_open_directory(&j, t, 0));
+ assert_ret(sd_journal_seek_head(j));
+ assert_ret(sd_journal_next(j));
+ test_check_numbers_down(j, 4);
+ sd_journal_close(j);
+
+ /* Seek to tail, iterate up.
+ */
+ assert_ret(sd_journal_open_directory(&j, t, 0));
+ assert_ret(sd_journal_seek_tail(j));
+ assert_ret(sd_journal_previous(j));
+ test_check_numbers_up(j, 4);
+ sd_journal_close(j);
+
+ /* Seek to tail, skip to head, iterate down.
+ */
+ assert_ret(sd_journal_open_directory(&j, t, 0));
+ assert_ret(sd_journal_seek_tail(j));
+ assert_ret(r = sd_journal_previous_skip(j, 4));
+ assert_se(r == 4);
+ test_check_numbers_down(j, 4);
+ sd_journal_close(j);
+
+ /* Seek to head, skip to tail, iterate up.
+ */
+ assert_ret(sd_journal_open_directory(&j, t, 0));
+ assert_ret(sd_journal_seek_head(j));
+ assert_ret(r = sd_journal_next_skip(j, 4));
+ assert_se(r == 4);
+ test_check_numbers_up(j, 4);
+ sd_journal_close(j);
+
+ log_info("Done...");
+
+ if (arg_keep)
+ log_info("Not removing %s", t);
+ else {
+ journal_directory_vacuum(".", 3000000, 0, 0, NULL, true);
+
+ assert_se(rm_rf(t, REMOVE_ROOT|REMOVE_PHYSICAL) >= 0);
+ }
+
+ puts("------------------------------------------------------------");
+}
+
+static void test_sequence_numbers(void) {
+
+ char t[] = "/tmp/journal-seq-XXXXXX";
+ JournalFile *one, *two;
+ uint64_t seqnum = 0;
+ sd_id128_t seqnum_id;
+
+ assert_se(mkdtemp(t));
+ assert_se(chdir(t) >= 0);
+
+ assert_se(journal_file_open(-1, "one.journal", O_RDWR|O_CREAT, 0644,
+ true, (uint64_t) -1, false, NULL, NULL, NULL, NULL, &one) == 0);
+
+ append_number(one, 1, &seqnum);
+ printf("seqnum=%"PRIu64"\n", seqnum);
+ assert_se(seqnum == 1);
+ append_number(one, 2, &seqnum);
+ printf("seqnum=%"PRIu64"\n", seqnum);
+ assert_se(seqnum == 2);
+
+ assert_se(one->header->state == STATE_ONLINE);
+ assert_se(!sd_id128_equal(one->header->file_id, one->header->machine_id));
+ assert_se(!sd_id128_equal(one->header->file_id, one->header->boot_id));
+ assert_se(sd_id128_equal(one->header->file_id, one->header->seqnum_id));
+
+ memcpy(&seqnum_id, &one->header->seqnum_id, sizeof(sd_id128_t));
+
+ assert_se(journal_file_open(-1, "two.journal", O_RDWR|O_CREAT, 0644,
+ true, (uint64_t) -1, false, NULL, NULL, NULL, one, &two) == 0);
+
+ assert_se(two->header->state == STATE_ONLINE);
+ assert_se(!sd_id128_equal(two->header->file_id, one->header->file_id));
+ assert_se(sd_id128_equal(one->header->machine_id, one->header->machine_id));
+ assert_se(sd_id128_equal(one->header->boot_id, one->header->boot_id));
+ assert_se(sd_id128_equal(one->header->seqnum_id, one->header->seqnum_id));
+
+ append_number(two, 3, &seqnum);
+ printf("seqnum=%"PRIu64"\n", seqnum);
+ assert_se(seqnum == 3);
+ append_number(two, 4, &seqnum);
+ printf("seqnum=%"PRIu64"\n", seqnum);
+ assert_se(seqnum == 4);
+
+ test_close(two);
+
+ append_number(one, 5, &seqnum);
+ printf("seqnum=%"PRIu64"\n", seqnum);
+ assert_se(seqnum == 5);
+
+ append_number(one, 6, &seqnum);
+ printf("seqnum=%"PRIu64"\n", seqnum);
+ assert_se(seqnum == 6);
+
+ test_close(one);
+
+ /* restart server */
+ seqnum = 0;
+
+ assert_se(journal_file_open(-1, "two.journal", O_RDWR, 0,
+ true, (uint64_t) -1, false, NULL, NULL, NULL, NULL, &two) == 0);
+
+ assert_se(sd_id128_equal(two->header->seqnum_id, seqnum_id));
+
+ append_number(two, 7, &seqnum);
+ printf("seqnum=%"PRIu64"\n", seqnum);
+ assert_se(seqnum == 5);
+
+ /* So..., here we have the same seqnum in two files with the
+ * same seqnum_id. */
+
+ test_close(two);
+
+ log_info("Done...");
+
+ if (arg_keep)
+ log_info("Not removing %s", t);
+ else {
+ journal_directory_vacuum(".", 3000000, 0, 0, NULL, true);
+
+ assert_se(rm_rf(t, REMOVE_ROOT|REMOVE_PHYSICAL) >= 0);
+ }
+}
+
+int main(int argc, char *argv[]) {
+ test_setup_logging(LOG_DEBUG);
+
+ /* journal_file_open requires a valid machine id */
+ if (access("/etc/machine-id", F_OK) != 0)
+ return log_tests_skipped("/etc/machine-id not found");
+
+ arg_keep = argc > 1;
+
+ test_skip(setup_sequential);
+ test_skip(setup_interleaved);
+
+ test_sequence_numbers();
+
+ return 0;
+}
diff --git a/src/journal/test-journal-match.c b/src/journal/test-journal-match.c
new file mode 100644
index 0000000..b175279
--- /dev/null
+++ b/src/journal/test-journal-match.c
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+
+#include <stdio.h>
+
+#include "sd-journal.h"
+
+#include "alloc-util.h"
+#include "journal-internal.h"
+#include "log.h"
+#include "string-util.h"
+#include "tests.h"
+#include "util.h"
+
+int main(int argc, char *argv[]) {
+ _cleanup_(sd_journal_closep) sd_journal *j = NULL;
+ _cleanup_free_ char *t;
+
+ test_setup_logging(LOG_DEBUG);
+
+ assert_se(sd_journal_open(&j, 0) >= 0);
+
+ assert_se(sd_journal_add_match(j, "foobar", 0) < 0);
+ assert_se(sd_journal_add_match(j, "foobar=waldo", 0) < 0);
+ assert_se(sd_journal_add_match(j, "", 0) < 0);
+ assert_se(sd_journal_add_match(j, "=", 0) < 0);
+ assert_se(sd_journal_add_match(j, "=xxxxx", 0) < 0);
+ assert_se(sd_journal_add_match(j, (uint8_t[4]){'A', '=', '\1', '\2'}, 4) >= 0);
+ assert_se(sd_journal_add_match(j, (uint8_t[5]){'B', '=', 'C', '\0', 'D'}, 5) >= 0);
+ assert_se(sd_journal_add_match(j, "HALLO=WALDO", 0) >= 0);
+ assert_se(sd_journal_add_match(j, "QUUX=mmmm", 0) >= 0);
+ assert_se(sd_journal_add_match(j, "QUUX=xxxxx", 0) >= 0);
+ assert_se(sd_journal_add_match(j, "HALLO=", 0) >= 0);
+ assert_se(sd_journal_add_match(j, "QUUX=xxxxx", 0) >= 0);
+ assert_se(sd_journal_add_match(j, "QUUX=yyyyy", 0) >= 0);
+ assert_se(sd_journal_add_match(j, "PIFF=paff", 0) >= 0);
+
+ assert_se(sd_journal_add_disjunction(j) >= 0);
+
+ assert_se(sd_journal_add_match(j, "ONE=one", 0) >= 0);
+ assert_se(sd_journal_add_match(j, "ONE=two", 0) >= 0);
+ assert_se(sd_journal_add_match(j, "TWO=two", 0) >= 0);
+
+ assert_se(sd_journal_add_conjunction(j) >= 0);
+
+ assert_se(sd_journal_add_match(j, "L4_1=yes", 0) >= 0);
+ assert_se(sd_journal_add_match(j, "L4_1=ok", 0) >= 0);
+ assert_se(sd_journal_add_match(j, "L4_2=yes", 0) >= 0);
+ assert_se(sd_journal_add_match(j, "L4_2=ok", 0) >= 0);
+
+ assert_se(sd_journal_add_disjunction(j) >= 0);
+
+ assert_se(sd_journal_add_match(j, "L3=yes", 0) >= 0);
+ assert_se(sd_journal_add_match(j, "L3=ok", 0) >= 0);
+
+ assert_se(t = journal_make_match_string(j));
+
+ printf("resulting match expression is: %s\n", t);
+
+ assert_se(streq(t, "(((L3=ok OR L3=yes) OR ((L4_2=ok OR L4_2=yes) AND (L4_1=ok OR L4_1=yes))) AND ((TWO=two AND (ONE=two OR ONE=one)) OR (PIFF=paff AND (QUUX=yyyyy OR QUUX=xxxxx OR QUUX=mmmm) AND (HALLO= OR HALLO=WALDO) AND B=C\\000D AND A=\\001\\002)))"));
+
+ return 0;
+}
diff --git a/src/journal/test-journal-send.c b/src/journal/test-journal-send.c
new file mode 100644
index 0000000..484308e
--- /dev/null
+++ b/src/journal/test-journal-send.c
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+
+#include <errno.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+#include "sd-journal.h"
+
+#include "macro.h"
+
+int main(int argc, char *argv[]) {
+ char huge[4096*1024];
+
+ /* utf-8 and non-utf-8, message-less and message-ful iovecs */
+ struct iovec graph1[] = {
+ {(char*) "GRAPH=graph", STRLEN("GRAPH=graph")}
+ };
+ struct iovec graph2[] = {
+ {(char*) "GRAPH=graph\n", STRLEN("GRAPH=graph\n")}
+ };
+ struct iovec message1[] = {
+ {(char*) "MESSAGE=graph", STRLEN("MESSAGE=graph")}
+ };
+ struct iovec message2[] = {
+ {(char*) "MESSAGE=graph\n", STRLEN("MESSAGE=graph\n")}
+ };
+
+ assert_se(sd_journal_print(LOG_INFO, "piepapo") == 0);
+
+ assert_se(sd_journal_send("MESSAGE=foobar",
+ "VALUE=%i", 7,
+ NULL) == 0);
+
+ errno = ENOENT;
+ assert_se(sd_journal_perror("Foobar") == 0);
+
+ assert_se(sd_journal_perror("") == 0);
+
+ memset(huge, 'x', sizeof(huge));
+ memcpy(huge, "HUGE=", 5);
+ char_array_0(huge);
+
+ assert_se(sd_journal_send("MESSAGE=Huge field attached",
+ huge,
+ NULL) == 0);
+
+ assert_se(sd_journal_send("MESSAGE=uiui",
+ "VALUE=A",
+ "VALUE=B",
+ "VALUE=C",
+ "SINGLETON=1",
+ "OTHERVALUE=X",
+ "OTHERVALUE=Y",
+ "WITH_BINARY=this is a binary value \a",
+ NULL) == 0);
+
+ syslog(LOG_NOTICE, "Hello World!");
+
+ assert_se(sd_journal_print(LOG_NOTICE, "Hello World") == 0);
+
+ assert_se(sd_journal_send("MESSAGE=Hello World!",
+ "MESSAGE_ID=52fb62f99e2c49d89cfbf9d6de5e3555",
+ "PRIORITY=5",
+ "HOME=%s", getenv("HOME"),
+ "TERM=%s", getenv("TERM"),
+ "PAGE_SIZE=%li", sysconf(_SC_PAGESIZE),
+ "N_CPUS=%li", sysconf(_SC_NPROCESSORS_ONLN),
+ NULL) == 0);
+
+ assert_se(sd_journal_sendv(graph1, 1) == 0);
+ assert_se(sd_journal_sendv(graph2, 1) == 0);
+ assert_se(sd_journal_sendv(message1, 1) == 0);
+ assert_se(sd_journal_sendv(message2, 1) == 0);
+
+ /* test without location fields */
+#undef sd_journal_sendv
+ assert_se(sd_journal_sendv(graph1, 1) == 0);
+ assert_se(sd_journal_sendv(graph2, 1) == 0);
+ assert_se(sd_journal_sendv(message1, 1) == 0);
+ assert_se(sd_journal_sendv(message2, 1) == 0);
+
+ sleep(1);
+
+ return 0;
+}
diff --git a/src/journal/test-journal-stream.c b/src/journal/test-journal-stream.c
new file mode 100644
index 0000000..226c30f
--- /dev/null
+++ b/src/journal/test-journal-stream.c
@@ -0,0 +1,180 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+
+#include <fcntl.h>
+#include <unistd.h>
+
+#include "sd-journal.h"
+
+#include "alloc-util.h"
+#include "journal-file.h"
+#include "journal-internal.h"
+#include "log.h"
+#include "macro.h"
+#include "parse-util.h"
+#include "rm-rf.h"
+#include "tests.h"
+#include "util.h"
+
+#define N_ENTRIES 200
+
+static void verify_contents(sd_journal *j, unsigned skip) {
+ unsigned i;
+
+ assert_se(j);
+
+ i = 0;
+ SD_JOURNAL_FOREACH(j) {
+ const void *d;
+ char *k, *c;
+ size_t l;
+ unsigned u = 0;
+
+ assert_se(sd_journal_get_cursor(j, &k) >= 0);
+ printf("cursor: %s\n", k);
+ free(k);
+
+ assert_se(sd_journal_get_data(j, "MAGIC", &d, &l) >= 0);
+ printf("\t%.*s\n", (int) l, (const char*) d);
+
+ assert_se(sd_journal_get_data(j, "NUMBER", &d, &l) >= 0);
+ assert_se(k = strndup(d, l));
+ printf("\t%s\n", k);
+
+ if (skip > 0) {
+ assert_se(safe_atou(k + 7, &u) >= 0);
+ assert_se(i == u);
+ i += skip;
+ }
+
+ free(k);
+
+ assert_se(sd_journal_get_cursor(j, &c) >= 0);
+ assert_se(sd_journal_test_cursor(j, c) > 0);
+ free(c);
+ }
+
+ if (skip > 0)
+ assert_se(i == N_ENTRIES);
+}
+
+int main(int argc, char *argv[]) {
+ JournalFile *one, *two, *three;
+ char t[] = "/tmp/journal-stream-XXXXXX";
+ unsigned i;
+ _cleanup_(sd_journal_closep) sd_journal *j = NULL;
+ char *z;
+ const void *data;
+ size_t l;
+ dual_timestamp previous_ts = DUAL_TIMESTAMP_NULL;
+
+ /* journal_file_open requires a valid machine id */
+ if (access("/etc/machine-id", F_OK) != 0)
+ return log_tests_skipped("/etc/machine-id not found");
+
+ test_setup_logging(LOG_DEBUG);
+
+ assert_se(mkdtemp(t));
+ assert_se(chdir(t) >= 0);
+
+ assert_se(journal_file_open(-1, "one.journal", O_RDWR|O_CREAT, 0666, true, (uint64_t) -1, false, NULL, NULL, NULL, NULL, &one) == 0);
+ assert_se(journal_file_open(-1, "two.journal", O_RDWR|O_CREAT, 0666, true, (uint64_t) -1, false, NULL, NULL, NULL, NULL, &two) == 0);
+ assert_se(journal_file_open(-1, "three.journal", O_RDWR|O_CREAT, 0666, true, (uint64_t) -1, false, NULL, NULL, NULL, NULL, &three) == 0);
+
+ for (i = 0; i < N_ENTRIES; i++) {
+ char *p, *q;
+ dual_timestamp ts;
+ struct iovec iovec[2];
+
+ dual_timestamp_get(&ts);
+
+ if (ts.monotonic <= previous_ts.monotonic)
+ ts.monotonic = previous_ts.monotonic + 1;
+
+ if (ts.realtime <= previous_ts.realtime)
+ ts.realtime = previous_ts.realtime + 1;
+
+ previous_ts = ts;
+
+ assert_se(asprintf(&p, "NUMBER=%u", i) >= 0);
+ iovec[0].iov_base = p;
+ iovec[0].iov_len = strlen(p);
+
+ assert_se(asprintf(&q, "MAGIC=%s", i % 5 == 0 ? "quux" : "waldo") >= 0);
+
+ iovec[1].iov_base = q;
+ iovec[1].iov_len = strlen(q);
+
+ if (i % 10 == 0)
+ assert_se(journal_file_append_entry(three, &ts, NULL, iovec, 2, NULL, NULL, NULL) == 0);
+ else {
+ if (i % 3 == 0)
+ assert_se(journal_file_append_entry(two, &ts, NULL, iovec, 2, NULL, NULL, NULL) == 0);
+
+ assert_se(journal_file_append_entry(one, &ts, NULL, iovec, 2, NULL, NULL, NULL) == 0);
+ }
+
+ free(p);
+ free(q);
+ }
+
+ (void) journal_file_close(one);
+ (void) journal_file_close(two);
+ (void) journal_file_close(three);
+
+ assert_se(sd_journal_open_directory(&j, t, 0) >= 0);
+
+ assert_se(sd_journal_add_match(j, "MAGIC=quux", 0) >= 0);
+ SD_JOURNAL_FOREACH_BACKWARDS(j) {
+ _cleanup_free_ char *c;
+
+ assert_se(sd_journal_get_data(j, "NUMBER", &data, &l) >= 0);
+ printf("\t%.*s\n", (int) l, (const char*) data);
+
+ assert_se(sd_journal_get_cursor(j, &c) >= 0);
+ assert_se(sd_journal_test_cursor(j, c) > 0);
+ }
+
+ SD_JOURNAL_FOREACH(j) {
+ _cleanup_free_ char *c;
+
+ assert_se(sd_journal_get_data(j, "NUMBER", &data, &l) >= 0);
+ printf("\t%.*s\n", (int) l, (const char*) data);
+
+ assert_se(sd_journal_get_cursor(j, &c) >= 0);
+ assert_se(sd_journal_test_cursor(j, c) > 0);
+ }
+
+ sd_journal_flush_matches(j);
+
+ verify_contents(j, 1);
+
+ printf("NEXT TEST\n");
+ assert_se(sd_journal_add_match(j, "MAGIC=quux", 0) >= 0);
+
+ assert_se(z = journal_make_match_string(j));
+ printf("resulting match expression is: %s\n", z);
+ free(z);
+
+ verify_contents(j, 5);
+
+ printf("NEXT TEST\n");
+ sd_journal_flush_matches(j);
+ assert_se(sd_journal_add_match(j, "MAGIC=waldo", 0) >= 0);
+ assert_se(sd_journal_add_match(j, "NUMBER=10", 0) >= 0);
+ assert_se(sd_journal_add_match(j, "NUMBER=11", 0) >= 0);
+ assert_se(sd_journal_add_match(j, "NUMBER=12", 0) >= 0);
+
+ assert_se(z = journal_make_match_string(j));
+ printf("resulting match expression is: %s\n", z);
+ free(z);
+
+ verify_contents(j, 0);
+
+ assert_se(sd_journal_query_unique(j, "NUMBER") >= 0);
+ SD_JOURNAL_FOREACH_UNIQUE(j, data, l)
+ printf("%.*s\n", (int) l, (const char*) data);
+
+ assert_se(rm_rf(t, REMOVE_ROOT|REMOVE_PHYSICAL) >= 0);
+
+ return 0;
+}
diff --git a/src/journal/test-journal-syslog.c b/src/journal/test-journal-syslog.c
new file mode 100644
index 0000000..45be7e5
--- /dev/null
+++ b/src/journal/test-journal-syslog.c
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+
+#include "alloc-util.h"
+#include "journald-syslog.h"
+#include "macro.h"
+#include "string-util.h"
+#include "syslog-util.h"
+
+static void test_syslog_parse_identifier(const char *str,
+ const char *ident, const char *pid, const char *rest, int ret) {
+ const char *buf = str;
+ _cleanup_free_ char *ident2 = NULL, *pid2 = NULL;
+ int ret2;
+
+ ret2 = syslog_parse_identifier(&buf, &ident2, &pid2);
+
+ assert_se(ret == ret2);
+ assert_se(ident == ident2 || streq_ptr(ident, ident2));
+ assert_se(pid == pid2 || streq_ptr(pid, pid2));
+ assert_se(streq(buf, rest));
+}
+
+static void test_syslog_parse_priority(const char *str, int priority, int ret) {
+ const char *buf = str;
+ int priority2 = 0, ret2;
+
+ ret2 = syslog_parse_priority(&buf, &priority2, false);
+
+ assert_se(ret == ret2);
+ if (ret2 == 1)
+ assert_se(priority == priority2);
+}
+
+int main(void) {
+ test_syslog_parse_identifier("pidu[111]: xxx", "pidu", "111", "xxx", 11);
+ test_syslog_parse_identifier("pidu: xxx", "pidu", NULL, "xxx", 6);
+ test_syslog_parse_identifier("pidu: xxx", "pidu", NULL, " xxx", 6);
+ test_syslog_parse_identifier("pidu xxx", NULL, NULL, "pidu xxx", 0);
+ test_syslog_parse_identifier(" pidu xxx", NULL, NULL, " pidu xxx", 0);
+ test_syslog_parse_identifier("", NULL, NULL, "", 0);
+ test_syslog_parse_identifier(" ", NULL, NULL, " ", 0);
+ test_syslog_parse_identifier(":", "", NULL, "", 1);
+ test_syslog_parse_identifier(": ", "", NULL, " ", 2);
+ test_syslog_parse_identifier(" :", "", NULL, "", 2);
+ test_syslog_parse_identifier(" pidu:", "pidu", NULL, "", 8);
+ test_syslog_parse_identifier("pidu:", "pidu", NULL, "", 5);
+ test_syslog_parse_identifier("pidu: ", "pidu", NULL, "", 6);
+ test_syslog_parse_identifier("pidu : ", NULL, NULL, "pidu : ", 0);
+
+ test_syslog_parse_priority("<>", 0, 0);
+ test_syslog_parse_priority("<>aaa", 0, 0);
+ test_syslog_parse_priority("<aaaa>", 0, 0);
+ test_syslog_parse_priority("<aaaa>aaa", 0, 0);
+ test_syslog_parse_priority(" <aaaa>", 0, 0);
+ test_syslog_parse_priority(" <aaaa>aaa", 0, 0);
+ /* TODO: add test cases of valid priorities */
+
+ return 0;
+}
diff --git a/src/journal/test-journal-verify.c b/src/journal/test-journal-verify.c
new file mode 100644
index 0000000..c4fa41e
--- /dev/null
+++ b/src/journal/test-journal-verify.c
@@ -0,0 +1,134 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+
+#include <fcntl.h>
+#include <stdio.h>
+#include <unistd.h>
+
+#include "fd-util.h"
+#include "io-util.h"
+#include "journal-file.h"
+#include "journal-verify.h"
+#include "log.h"
+#include "rm-rf.h"
+#include "terminal-util.h"
+#include "tests.h"
+#include "util.h"
+
+#define N_ENTRIES 6000
+#define RANDOM_RANGE 77
+
+static void bit_toggle(const char *fn, uint64_t p) {
+ uint8_t b;
+ ssize_t r;
+ int fd;
+
+ fd = open(fn, O_RDWR|O_CLOEXEC);
+ assert_se(fd >= 0);
+
+ r = pread(fd, &b, 1, p/8);
+ assert_se(r == 1);
+
+ b ^= 1 << (p % 8);
+
+ r = pwrite(fd, &b, 1, p/8);
+ assert_se(r == 1);
+
+ safe_close(fd);
+}
+
+static int raw_verify(const char *fn, const char *verification_key) {
+ JournalFile *f;
+ int r;
+
+ r = journal_file_open(-1, fn, O_RDONLY, 0666, true, (uint64_t) -1, !!verification_key, NULL, NULL, NULL, NULL, &f);
+ if (r < 0)
+ return r;
+
+ r = journal_file_verify(f, verification_key, NULL, NULL, NULL, false);
+ (void) journal_file_close(f);
+
+ return r;
+}
+
+int main(int argc, char *argv[]) {
+ char t[] = "/tmp/journal-XXXXXX";
+ unsigned n;
+ JournalFile *f;
+ const char *verification_key = argv[1];
+ usec_t from = 0, to = 0, total = 0;
+ char a[FORMAT_TIMESTAMP_MAX];
+ char b[FORMAT_TIMESTAMP_MAX];
+ char c[FORMAT_TIMESPAN_MAX];
+ struct stat st;
+ uint64_t p;
+
+ /* journal_file_open requires a valid machine id */
+ if (access("/etc/machine-id", F_OK) != 0)
+ return log_tests_skipped("/etc/machine-id not found");
+
+ test_setup_logging(LOG_DEBUG);
+
+ assert_se(mkdtemp(t));
+ assert_se(chdir(t) >= 0);
+
+ log_info("Generating...");
+
+ assert_se(journal_file_open(-1, "test.journal", O_RDWR|O_CREAT, 0666, true, (uint64_t) -1, !!verification_key, NULL, NULL, NULL, NULL, &f) == 0);
+
+ for (n = 0; n < N_ENTRIES; n++) {
+ struct iovec iovec;
+ struct dual_timestamp ts;
+ char *test;
+
+ dual_timestamp_get(&ts);
+
+ assert_se(asprintf(&test, "RANDOM=%lu", random() % RANDOM_RANGE));
+
+ iovec = IOVEC_MAKE_STRING(test);
+
+ assert_se(journal_file_append_entry(f, &ts, NULL, &iovec, 1, NULL, NULL, NULL) == 0);
+
+ free(test);
+ }
+
+ (void) journal_file_close(f);
+
+ log_info("Verifying...");
+
+ assert_se(journal_file_open(-1, "test.journal", O_RDONLY, 0666, true, (uint64_t) -1, !!verification_key, NULL, NULL, NULL, NULL, &f) == 0);
+ /* journal_file_print_header(f); */
+ journal_file_dump(f);
+
+ assert_se(journal_file_verify(f, verification_key, &from, &to, &total, true) >= 0);
+
+ if (verification_key && JOURNAL_HEADER_SEALED(f->header))
+ log_info("=> Validated from %s to %s, %s missing",
+ format_timestamp(a, sizeof(a), from),
+ format_timestamp(b, sizeof(b), to),
+ format_timespan(c, sizeof(c), total > to ? total - to : 0, 0));
+
+ (void) journal_file_close(f);
+
+ if (verification_key) {
+ log_info("Toggling bits...");
+
+ assert_se(stat("test.journal", &st) >= 0);
+
+ for (p = 38448*8+0; p < ((uint64_t) st.st_size * 8); p ++) {
+ bit_toggle("test.journal", p);
+
+ log_info("[ %"PRIu64"+%"PRIu64"]", p / 8, p % 8);
+
+ if (raw_verify("test.journal", verification_key) >= 0)
+ log_notice(ANSI_HIGHLIGHT_RED ">>>> %"PRIu64" (bit %"PRIu64") can be toggled without detection." ANSI_NORMAL, p / 8, p % 8);
+
+ bit_toggle("test.journal", p);
+ }
+ }
+
+ log_info("Exiting...");
+
+ assert_se(rm_rf(t, REMOVE_ROOT|REMOVE_PHYSICAL) >= 0);
+
+ return 0;
+}
diff --git a/src/journal/test-journal.c b/src/journal/test-journal.c
new file mode 100644
index 0000000..0795e0d
--- /dev/null
+++ b/src/journal/test-journal.c
@@ -0,0 +1,252 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+
+#include <fcntl.h>
+#include <unistd.h>
+
+#include "io-util.h"
+#include "journal-authenticate.h"
+#include "journal-file.h"
+#include "journal-vacuum.h"
+#include "log.h"
+#include "rm-rf.h"
+#include "tests.h"
+
+static bool arg_keep = false;
+
+static void test_non_empty(void) {
+ dual_timestamp ts;
+ JournalFile *f;
+ struct iovec iovec;
+ static const char test[] = "TEST1=1", test2[] = "TEST2=2";
+ Object *o;
+ uint64_t p;
+ sd_id128_t fake_boot_id;
+ char t[] = "/tmp/journal-XXXXXX";
+
+ test_setup_logging(LOG_DEBUG);
+
+ assert_se(mkdtemp(t));
+ assert_se(chdir(t) >= 0);
+
+ assert_se(journal_file_open(-1, "test.journal", O_RDWR|O_CREAT, 0666, true, (uint64_t) -1, true, NULL, NULL, NULL, NULL, &f) == 0);
+
+ assert_se(dual_timestamp_get(&ts));
+ assert_se(sd_id128_randomize(&fake_boot_id) == 0);
+
+ iovec = IOVEC_MAKE_STRING(test);
+ assert_se(journal_file_append_entry(f, &ts, NULL, &iovec, 1, NULL, NULL, NULL) == 0);
+
+ iovec = IOVEC_MAKE_STRING(test2);
+ assert_se(journal_file_append_entry(f, &ts, NULL, &iovec, 1, NULL, NULL, NULL) == 0);
+
+ iovec = IOVEC_MAKE_STRING(test);
+ assert_se(journal_file_append_entry(f, &ts, &fake_boot_id, &iovec, 1, NULL, NULL, NULL) == 0);
+
+#if HAVE_GCRYPT
+ journal_file_append_tag(f);
+#endif
+ journal_file_dump(f);
+
+ assert_se(journal_file_next_entry(f, 0, DIRECTION_DOWN, &o, &p) == 1);
+ assert_se(le64toh(o->entry.seqnum) == 1);
+
+ assert_se(journal_file_next_entry(f, p, DIRECTION_DOWN, &o, &p) == 1);
+ assert_se(le64toh(o->entry.seqnum) == 2);
+
+ assert_se(journal_file_next_entry(f, p, DIRECTION_DOWN, &o, &p) == 1);
+ assert_se(le64toh(o->entry.seqnum) == 3);
+ assert_se(sd_id128_equal(o->entry.boot_id, fake_boot_id));
+
+ assert_se(journal_file_next_entry(f, p, DIRECTION_DOWN, &o, &p) == 0);
+
+ assert_se(journal_file_next_entry(f, 0, DIRECTION_DOWN, &o, &p) == 1);
+ assert_se(le64toh(o->entry.seqnum) == 1);
+
+ assert_se(journal_file_find_data_object(f, test, strlen(test), NULL, &p) == 1);
+ assert_se(journal_file_next_entry_for_data(f, NULL, 0, p, DIRECTION_DOWN, &o, NULL) == 1);
+ assert_se(le64toh(o->entry.seqnum) == 1);
+
+ assert_se(journal_file_next_entry_for_data(f, NULL, 0, p, DIRECTION_UP, &o, NULL) == 1);
+ assert_se(le64toh(o->entry.seqnum) == 3);
+
+ assert_se(journal_file_find_data_object(f, test2, strlen(test2), NULL, &p) == 1);
+ assert_se(journal_file_next_entry_for_data(f, NULL, 0, p, DIRECTION_UP, &o, NULL) == 1);
+ assert_se(le64toh(o->entry.seqnum) == 2);
+
+ assert_se(journal_file_next_entry_for_data(f, NULL, 0, p, DIRECTION_DOWN, &o, NULL) == 1);
+ assert_se(le64toh(o->entry.seqnum) == 2);
+
+ assert_se(journal_file_find_data_object(f, "quux", 4, NULL, &p) == 0);
+
+ assert_se(journal_file_move_to_entry_by_seqnum(f, 1, DIRECTION_DOWN, &o, NULL) == 1);
+ assert_se(le64toh(o->entry.seqnum) == 1);
+
+ assert_se(journal_file_move_to_entry_by_seqnum(f, 3, DIRECTION_DOWN, &o, NULL) == 1);
+ assert_se(le64toh(o->entry.seqnum) == 3);
+
+ assert_se(journal_file_move_to_entry_by_seqnum(f, 2, DIRECTION_DOWN, &o, NULL) == 1);
+ assert_se(le64toh(o->entry.seqnum) == 2);
+
+ assert_se(journal_file_move_to_entry_by_seqnum(f, 10, DIRECTION_DOWN, &o, NULL) == 0);
+
+ journal_file_rotate(&f, true, (uint64_t) -1, true, NULL);
+ journal_file_rotate(&f, true, (uint64_t) -1, true, NULL);
+
+ (void) journal_file_close(f);
+
+ log_info("Done...");
+
+ if (arg_keep)
+ log_info("Not removing %s", t);
+ else {
+ journal_directory_vacuum(".", 3000000, 0, 0, NULL, true);
+
+ assert_se(rm_rf(t, REMOVE_ROOT|REMOVE_PHYSICAL) >= 0);
+ }
+
+ puts("------------------------------------------------------------");
+}
+
+static void test_empty(void) {
+ JournalFile *f1, *f2, *f3, *f4;
+ char t[] = "/tmp/journal-XXXXXX";
+
+ test_setup_logging(LOG_DEBUG);
+
+ assert_se(mkdtemp(t));
+ assert_se(chdir(t) >= 0);
+
+ assert_se(journal_file_open(-1, "test.journal", O_RDWR|O_CREAT, 0666, false, (uint64_t) -1, false, NULL, NULL, NULL, NULL, &f1) == 0);
+
+ assert_se(journal_file_open(-1, "test-compress.journal", O_RDWR|O_CREAT, 0666, true, (uint64_t) -1, false, NULL, NULL, NULL, NULL, &f2) == 0);
+
+ assert_se(journal_file_open(-1, "test-seal.journal", O_RDWR|O_CREAT, 0666, false, (uint64_t) -1, true, NULL, NULL, NULL, NULL, &f3) == 0);
+
+ assert_se(journal_file_open(-1, "test-seal-compress.journal", O_RDWR|O_CREAT, 0666, true, (uint64_t) -1, true, NULL, NULL, NULL, NULL, &f4) == 0);
+
+ journal_file_print_header(f1);
+ puts("");
+ journal_file_print_header(f2);
+ puts("");
+ journal_file_print_header(f3);
+ puts("");
+ journal_file_print_header(f4);
+ puts("");
+
+ log_info("Done...");
+
+ if (arg_keep)
+ log_info("Not removing %s", t);
+ else {
+ journal_directory_vacuum(".", 3000000, 0, 0, NULL, true);
+
+ assert_se(rm_rf(t, REMOVE_ROOT|REMOVE_PHYSICAL) >= 0);
+ }
+
+ (void) journal_file_close(f1);
+ (void) journal_file_close(f2);
+ (void) journal_file_close(f3);
+ (void) journal_file_close(f4);
+}
+
+#if HAVE_XZ || HAVE_LZ4
+static bool check_compressed(uint64_t compress_threshold, uint64_t data_size) {
+ dual_timestamp ts;
+ JournalFile *f;
+ struct iovec iovec;
+ Object *o;
+ uint64_t p;
+ char t[] = "/tmp/journal-XXXXXX";
+ char data[2048] = {0};
+ bool is_compressed;
+ int r;
+
+ assert_se(data_size <= sizeof(data));
+
+ test_setup_logging(LOG_DEBUG);
+
+ assert_se(mkdtemp(t));
+ assert_se(chdir(t) >= 0);
+
+ assert_se(journal_file_open(-1, "test.journal", O_RDWR|O_CREAT, 0666, true, compress_threshold, true, NULL, NULL, NULL, NULL, &f) == 0);
+
+ dual_timestamp_get(&ts);
+
+ iovec = IOVEC_MAKE(data, data_size);
+ assert_se(journal_file_append_entry(f, &ts, NULL, &iovec, 1, NULL, NULL, NULL) == 0);
+
+#if HAVE_GCRYPT
+ journal_file_append_tag(f);
+#endif
+ journal_file_dump(f);
+
+ /* We have to partially reimplement some of the dump logic, because the normal next_entry does the
+ * decompression for us. */
+ p = le64toh(f->header->header_size);
+ for (;;) {
+ r = journal_file_move_to_object(f, OBJECT_UNUSED, p, &o);
+ assert_se(r == 0);
+ if (o->object.type == OBJECT_DATA)
+ break;
+
+ assert_se(p < le64toh(f->header->tail_object_offset));
+ p = p + ALIGN64(le64toh(o->object.size));
+ }
+
+ is_compressed = (o->object.flags & OBJECT_COMPRESSION_MASK) != 0;
+
+ (void) journal_file_close(f);
+
+ log_info("Done...");
+
+ if (arg_keep)
+ log_info("Not removing %s", t);
+ else {
+ journal_directory_vacuum(".", 3000000, 0, 0, NULL, true);
+
+ assert_se(rm_rf(t, REMOVE_ROOT|REMOVE_PHYSICAL) >= 0);
+ }
+
+ puts("------------------------------------------------------------");
+
+ return is_compressed;
+}
+
+static void test_min_compress_size(void) {
+ /* Note that XZ will actually fail to compress anything under 80 bytes, so you have to choose the limits
+ * carefully */
+
+ /* DEFAULT_MIN_COMPRESS_SIZE is 512 */
+ assert_se(!check_compressed((uint64_t) -1, 255));
+ assert_se(check_compressed((uint64_t) -1, 513));
+
+ /* compress everything */
+ assert_se(check_compressed(0, 96));
+ assert_se(check_compressed(8, 96));
+
+ /* Ensure we don't try to compress less than 8 bytes */
+ assert_se(!check_compressed(0, 7));
+
+ /* check boundary conditions */
+ assert_se(check_compressed(256, 256));
+ assert_se(!check_compressed(256, 255));
+}
+#endif
+
+int main(int argc, char *argv[]) {
+ arg_keep = argc > 1;
+
+ test_setup_logging(LOG_INFO);
+
+ /* journal_file_open requires a valid machine id */
+ if (access("/etc/machine-id", F_OK) != 0)
+ return log_tests_skipped("/etc/machine-id not found");
+
+ test_non_empty();
+ test_empty();
+#if HAVE_XZ || HAVE_LZ4
+ test_min_compress_size();
+#endif
+
+ return 0;
+}
diff --git a/src/journal/test-mmap-cache.c b/src/journal/test-mmap-cache.c
new file mode 100644
index 0000000..8f755ef
--- /dev/null
+++ b/src/journal/test-mmap-cache.c
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: LGPL-2.1+ */
+
+#include <fcntl.h>
+#include <stdlib.h>
+#include <sys/mman.h>
+#include <unistd.h>
+
+#include "fd-util.h"
+#include "macro.h"
+#include "mmap-cache.h"
+#include "tmpfile-util.h"
+#include "util.h"
+
+int main(int argc, char *argv[]) {
+ MMapFileDescriptor *fx;
+ int x, y, z, r;
+ char px[] = "/tmp/testmmapXXXXXXX", py[] = "/tmp/testmmapYXXXXXX", pz[] = "/tmp/testmmapZXXXXXX";
+ MMapCache *m;
+ void *p, *q;
+
+ assert_se(m = mmap_cache_new());
+
+ x = mkostemp_safe(px);
+ assert_se(x >= 0);
+ unlink(px);
+
+ assert_se(fx = mmap_cache_add_fd(m, x));
+
+ y = mkostemp_safe(py);
+ assert_se(y >= 0);
+ unlink(py);
+
+ z = mkostemp_safe(pz);
+ assert_se(z >= 0);
+ unlink(pz);
+
+ r = mmap_cache_get(m, fx, PROT_READ, 0, false, 1, 2, NULL, &p, NULL);
+ assert_se(r >= 0);
+
+ r = mmap_cache_get(m, fx, PROT_READ, 0, false, 2, 2, NULL, &q, NULL);
+ assert_se(r >= 0);
+
+ assert_se((uint8_t*) p + 1 == (uint8_t*) q);
+
+ r = mmap_cache_get(m, fx, PROT_READ, 1, false, 3, 2, NULL, &q, NULL);
+ assert_se(r >= 0);
+
+ assert_se((uint8_t*) p + 2 == (uint8_t*) q);
+
+ r = mmap_cache_get(m, fx, PROT_READ, 0, false, 16ULL*1024ULL*1024ULL, 2, NULL, &p, NULL);
+ assert_se(r >= 0);
+
+ r = mmap_cache_get(m, fx, PROT_READ, 1, false, 16ULL*1024ULL*1024ULL+1, 2, NULL, &q, NULL);
+ assert_se(r >= 0);
+
+ assert_se((uint8_t*) p + 1 == (uint8_t*) q);
+
+ mmap_cache_free_fd(m, fx);
+ mmap_cache_unref(m);
+
+ safe_close(x);
+ safe_close(y);
+ safe_close(z);
+
+ return 0;
+}