summaryrefslogtreecommitdiffstats
path: root/src/knot/journal/serialization.c
diff options
context:
space:
mode:
Diffstat (limited to 'src/knot/journal/serialization.c')
-rw-r--r--src/knot/journal/serialization.c501
1 files changed, 501 insertions, 0 deletions
diff --git a/src/knot/journal/serialization.c b/src/knot/journal/serialization.c
new file mode 100644
index 0000000..5758481
--- /dev/null
+++ b/src/knot/journal/serialization.c
@@ -0,0 +1,501 @@
+/* Copyright (C) 2022 CZ.NIC, z.s.p.o. <knot-dns@labs.nic.cz>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#include <assert.h>
+
+#include "knot/journal/serialization.h"
+#include "knot/zone/zone-tree.h"
+
+#define SERIALIZE_RRSET_INIT (-1)
+#define SERIALIZE_RRSET_DONE ((1L<<16)+1)
+
+typedef enum {
+ PHASE_ZONE_SOA,
+ PHASE_ZONE_NODES,
+ PHASE_ZONE_NSEC3,
+ PHASE_SOA_1,
+ PHASE_REM,
+ PHASE_SOA_2,
+ PHASE_ADD,
+ PHASE_END,
+} serialize_phase_t;
+
+#define RRSET_BUF_MAXSIZE 256
+
+struct serialize_ctx {
+ zone_diff_t zdiff;
+ zone_tree_it_t zit;
+ zone_node_t *n;
+ uint16_t node_pos;
+ bool zone_diff;
+ bool zone_diff_add;
+ int ret;
+
+ const changeset_t *ch;
+ changeset_iter_t it;
+ serialize_phase_t changeset_phase;
+ long rrset_phase;
+ knot_rrset_t rrset_buf[RRSET_BUF_MAXSIZE];
+ size_t rrset_buf_size;
+ list_t free_rdatasets;
+};
+
+serialize_ctx_t *serialize_init(const changeset_t *ch)
+{
+ serialize_ctx_t *ctx = calloc(1, sizeof(*ctx));
+ if (ctx == NULL) {
+ return NULL;
+ }
+
+ ctx->ch = ch;
+ ctx->changeset_phase = ch->soa_from != NULL ? PHASE_SOA_1 : PHASE_SOA_2;
+ ctx->rrset_phase = SERIALIZE_RRSET_INIT;
+ ctx->rrset_buf_size = 0;
+ init_list(&ctx->free_rdatasets);
+
+ return ctx;
+}
+
+serialize_ctx_t *serialize_zone_init(const zone_contents_t *z)
+{
+ serialize_ctx_t *ctx = calloc(1, sizeof(*ctx));
+ if (ctx == NULL) {
+ return NULL;
+ }
+
+ zone_diff_from_zone(&ctx->zdiff, z);
+ ctx->changeset_phase = PHASE_ZONE_SOA;
+ ctx->rrset_phase = SERIALIZE_RRSET_INIT;
+ ctx->rrset_buf_size = 0;
+ init_list(&ctx->free_rdatasets);
+
+ return ctx;
+}
+
+serialize_ctx_t *serialize_zone_diff_init(const zone_diff_t *z)
+{
+ serialize_ctx_t *ctx = calloc(1, sizeof(*ctx));
+ if (ctx == NULL) {
+ return NULL;
+ }
+
+ ctx->zone_diff = true;
+ ctx->zdiff = *z;
+ zone_diff_reverse(&ctx->zdiff); // start with removals of counterparts
+
+ ctx->changeset_phase = PHASE_ZONE_SOA;
+ ctx->rrset_phase = SERIALIZE_RRSET_INIT;
+ ctx->rrset_buf_size = 0;
+ init_list(&ctx->free_rdatasets);
+
+ return ctx;
+}
+
+static knot_rrset_t get_next_rrset(serialize_ctx_t *ctx)
+{
+ knot_rrset_t res;
+ knot_rrset_init_empty(&res);
+ switch (ctx->changeset_phase) {
+ case PHASE_ZONE_SOA:
+ zone_tree_it_begin(&ctx->zdiff.nodes, &ctx->zit);
+ ctx->changeset_phase = PHASE_ZONE_NODES;
+ return node_rrset(ctx->zdiff.apex, KNOT_RRTYPE_SOA);
+ case PHASE_ZONE_NODES:
+ case PHASE_ZONE_NSEC3:
+ while (ctx->n == NULL || ctx->node_pos >= ctx->n->rrset_count) {
+ if (zone_tree_it_finished(&ctx->zit)) {
+ zone_tree_it_free(&ctx->zit);
+ if (ctx->changeset_phase == PHASE_ZONE_NSEC3 ||
+ zone_tree_is_empty(&ctx->zdiff.nsec3s)) {
+ if (ctx->zone_diff && !ctx->zone_diff_add) {
+ ctx->zone_diff_add = true;
+ zone_diff_reverse(&ctx->zdiff);
+ zone_tree_it_begin(&ctx->zdiff.nodes, &ctx->zit);
+ ctx->changeset_phase = PHASE_ZONE_NODES;
+ return node_rrset(ctx->zdiff.apex, KNOT_RRTYPE_SOA);
+ } else {
+ ctx->changeset_phase = PHASE_END;
+ return res;
+ }
+ } else {
+ zone_tree_it_begin(&ctx->zdiff.nsec3s, &ctx->zit);
+ ctx->changeset_phase = PHASE_ZONE_NSEC3;
+ }
+ }
+ ctx->n = zone_tree_it_val(&ctx->zit);
+ zone_tree_it_next(&ctx->zit);
+ ctx->node_pos = 0;
+ }
+ res = node_rrset_at(ctx->n, ctx->node_pos++);
+ if (ctx->n == ctx->zdiff.apex && res.type == KNOT_RRTYPE_SOA) {
+ return get_next_rrset(ctx);
+ }
+ if (ctx->zone_diff) {
+ knot_rrset_t counter_rr = node_rrset(binode_counterpart(ctx->n), res.type);
+ if (counter_rr.ttl == res.ttl && !knot_rrset_empty(&counter_rr)) {
+ if (knot_rdataset_subset(&res.rrs, &counter_rr.rrs)) {
+ return get_next_rrset(ctx);
+ }
+ knot_rdataset_t rd_copy;
+ ctx->ret = knot_rdataset_copy(&rd_copy, &res.rrs, NULL);
+ if (ctx->ret == KNOT_EOK) {
+ knot_rdataset_subtract(&rd_copy, &counter_rr.rrs, NULL);
+ ptrlist_add(&ctx->free_rdatasets, rd_copy.rdata, NULL);
+ res.rrs = rd_copy;
+ assert(!knot_rrset_empty(&res));
+ } else {
+ ctx->changeset_phase = PHASE_END;
+ }
+ }
+ }
+ return res;
+ case PHASE_SOA_1:
+ changeset_iter_rem(&ctx->it, ctx->ch);
+ ctx->changeset_phase = PHASE_REM;
+ return *ctx->ch->soa_from;
+ case PHASE_REM:
+ res = changeset_iter_next(&ctx->it);
+ if (knot_rrset_empty(&res)) {
+ changeset_iter_clear(&ctx->it);
+ changeset_iter_add(&ctx->it, ctx->ch);
+ ctx->changeset_phase = PHASE_ADD;
+ return *ctx->ch->soa_to;
+ }
+ return res;
+ case PHASE_SOA_2:
+ if (ctx->it.node != NULL) {
+ changeset_iter_clear(&ctx->it);
+ }
+ changeset_iter_add(&ctx->it, ctx->ch);
+ ctx->changeset_phase = PHASE_ADD;
+ return *ctx->ch->soa_to;
+ case PHASE_ADD:
+ res = changeset_iter_next(&ctx->it);
+ if (knot_rrset_empty(&res)) {
+ changeset_iter_clear(&ctx->it);
+ ctx->changeset_phase = PHASE_END;
+ }
+ return res;
+ default:
+ return res;
+ }
+}
+
+void serialize_prepare(serialize_ctx_t *ctx, size_t thresh_size,
+ size_t max_size, size_t *realsize)
+{
+ *realsize = 0;
+
+ // check if we are in middle of a rrset
+ if (ctx->rrset_buf_size > 0) {
+ ctx->rrset_buf[0] = ctx->rrset_buf[ctx->rrset_buf_size - 1];
+ ctx->rrset_buf_size = 1;
+
+ // memory optimization: free all buffered rrsets except last one
+ ptrnode_t *n, *next;
+ WALK_LIST_DELSAFE(n, next, ctx->free_rdatasets) {
+ if (n != TAIL(ctx->free_rdatasets)) {
+ free(n->d);
+ rem_node(&n->n);
+ free(n);
+ }
+ }
+ } else {
+ ctx->rrset_buf[0] = get_next_rrset(ctx);
+ if (ctx->changeset_phase == PHASE_END) {
+ ctx->rrset_buf_size = 0;
+ return;
+ }
+ ctx->rrset_buf_size = 1;
+ }
+
+ size_t candidate = 0;
+ long tmp_phase = ctx->rrset_phase;
+ while (1) {
+ if (tmp_phase >= ctx->rrset_buf[ctx->rrset_buf_size - 1].rrs.count) {
+ if (ctx->rrset_buf_size >= RRSET_BUF_MAXSIZE) {
+ return;
+ }
+ ctx->rrset_buf[ctx->rrset_buf_size++] = get_next_rrset(ctx);
+ if (ctx->changeset_phase == PHASE_END) {
+ ctx->rrset_buf_size--;
+ return;
+ }
+ tmp_phase = SERIALIZE_RRSET_INIT;
+ }
+ if (tmp_phase == SERIALIZE_RRSET_INIT) {
+ candidate += 3 * sizeof(uint16_t) +
+ knot_dname_size(ctx->rrset_buf[ctx->rrset_buf_size - 1].owner);
+ } else {
+ candidate += sizeof(uint32_t) + sizeof(uint16_t) +
+ knot_rdataset_at(&ctx->rrset_buf[ctx->rrset_buf_size - 1].rrs, tmp_phase)->len;
+ }
+ if (candidate > max_size) {
+ return;
+ }
+ *realsize = candidate;
+ if (candidate >= thresh_size) {
+ return;
+ }
+ tmp_phase++;
+ }
+}
+
+void serialize_chunk(serialize_ctx_t *ctx, uint8_t *dst_chunk, size_t chunk_size)
+{
+ wire_ctx_t wire = wire_ctx_init(dst_chunk, chunk_size);
+
+ for (size_t i = 0; ; ) {
+ if (ctx->rrset_phase >= ctx->rrset_buf[i].rrs.count) {
+ if (++i >= ctx->rrset_buf_size) {
+ break;
+ }
+ ctx->rrset_phase = SERIALIZE_RRSET_INIT;
+ }
+ if (ctx->rrset_phase == SERIALIZE_RRSET_INIT) {
+ int size = knot_dname_to_wire(wire.position, ctx->rrset_buf[i].owner,
+ wire_ctx_available(&wire));
+ if (size < 0 || wire_ctx_available(&wire) < size + 3 * sizeof(uint16_t)) {
+ break;
+ }
+ wire_ctx_skip(&wire, size);
+ wire_ctx_write_u16(&wire, ctx->rrset_buf[i].type);
+ wire_ctx_write_u16(&wire, ctx->rrset_buf[i].rclass);
+ wire_ctx_write_u16(&wire, ctx->rrset_buf[i].rrs.count);
+ } else {
+ const knot_rdata_t *rr = knot_rdataset_at(&ctx->rrset_buf[i].rrs,
+ ctx->rrset_phase);
+ assert(rr);
+ uint16_t rdlen = rr->len;
+ if (wire_ctx_available(&wire) < sizeof(uint32_t) + sizeof(uint16_t) + rdlen) {
+ break;
+ }
+ // Compatibility, but one TTL per rrset would be enough.
+ wire_ctx_write_u32(&wire, ctx->rrset_buf[i].ttl);
+ wire_ctx_write_u16(&wire, rdlen);
+ wire_ctx_write(&wire, rr->data, rdlen);
+ }
+ ctx->rrset_phase++;
+ }
+ assert(wire.error == KNOT_EOK);
+}
+
+bool serialize_unfinished(serialize_ctx_t *ctx)
+{
+ return ctx->changeset_phase < PHASE_END;
+}
+
+int serialize_deinit(serialize_ctx_t *ctx)
+{
+ if (ctx->it.node != NULL) {
+ changeset_iter_clear(&ctx->it);
+ }
+ if (ctx->zit.tree != NULL) {
+ zone_tree_it_free(&ctx->zit);
+ }
+ ptrnode_t *n, *next;
+ WALK_LIST_DELSAFE(n, next, ctx->free_rdatasets) {
+ free(n->d);
+ rem_node(&n->n);
+ free(n);
+ }
+ int ret = ctx->ret;
+ free(ctx);
+ return ret;
+}
+
+static uint64_t rrset_binary_size(const knot_rrset_t *rrset)
+{
+ if (rrset == NULL || rrset->rrs.count == 0) {
+ return 0;
+ }
+
+ // Owner size + type + class + RR count.
+ uint64_t size = knot_dname_size(rrset->owner) + 3 * sizeof(uint16_t);
+
+ // RRs.
+ knot_rdata_t *rr = rrset->rrs.rdata;
+ for (uint16_t i = 0; i < rrset->rrs.count; i++) {
+ // TTL + RR size + RR.
+ size += sizeof(uint32_t) + sizeof(uint16_t) + rr->len;
+ rr = knot_rdataset_next(rr);
+ }
+
+ return size;
+}
+
+static size_t node_diff_size(zone_node_t *node)
+{
+ size_t res = 0;
+ knot_rrset_t rr, counter_rr;
+ for (int i = 0; i < node->rrset_count; i++) {
+ rr = node_rrset_at(node, i);
+ counter_rr = node_rrset(binode_counterpart(node), rr.type);
+ if (!knot_rrset_equal(&rr, &counter_rr, true)) {
+ res += rrset_binary_size(&rr);
+ }
+ }
+ return res;
+}
+
+size_t zone_diff_serialized_size(zone_diff_t diff)
+{
+ size_t res = 0;
+ for (int i = 0; i < 2; i++) {
+ zone_diff_reverse(&diff);
+ zone_tree_it_t it = { 0 };
+ int ret = zone_tree_it_double_begin(&diff.nodes, diff.nsec3s.trie != NULL ?
+ &diff.nsec3s : NULL, &it);
+ if (ret != KNOT_EOK) {
+ return 0;
+ }
+ while (!zone_tree_it_finished(&it)) {
+ res += node_diff_size(zone_tree_it_val(&it));
+ zone_tree_it_next(&it);
+ }
+ zone_tree_it_free(&it);
+ }
+ return res;
+}
+
+size_t changeset_serialized_size(const changeset_t *ch)
+{
+ if (ch == NULL) {
+ return 0;
+ }
+
+ size_t soa_from_size = rrset_binary_size(ch->soa_from);
+ size_t soa_to_size = rrset_binary_size(ch->soa_to);
+
+ changeset_iter_t it;
+ if (ch->remove == NULL) {
+ changeset_iter_add(&it, ch);
+ } else {
+ changeset_iter_all(&it, ch);
+ }
+
+ size_t change_size = 0;
+ knot_rrset_t rrset = changeset_iter_next(&it);
+ while (!knot_rrset_empty(&rrset)) {
+ change_size += rrset_binary_size(&rrset);
+ rrset = changeset_iter_next(&it);
+ }
+
+ changeset_iter_clear(&it);
+
+ return soa_from_size + soa_to_size + change_size;
+}
+
+int serialize_rrset(wire_ctx_t *wire, const knot_rrset_t *rrset)
+{
+ assert(wire != NULL && rrset != NULL);
+
+ // write owner, type, class, rrcnt
+ int size = knot_dname_to_wire(wire->position, rrset->owner,
+ wire_ctx_available(wire));
+ if (size < 0 || wire_ctx_available(wire) < size + 3 * sizeof(uint16_t)) {
+ assert(0);
+ }
+ wire_ctx_skip(wire, size);
+ wire_ctx_write_u16(wire, rrset->type);
+ wire_ctx_write_u16(wire, rrset->rclass);
+ wire_ctx_write_u16(wire, rrset->rrs.count);
+
+ for (size_t phase = 0; phase < rrset->rrs.count; phase++) {
+ const knot_rdata_t *rr = knot_rdataset_at(&rrset->rrs, phase);
+ assert(rr);
+ uint16_t rdlen = rr->len;
+ if (wire_ctx_available(wire) < sizeof(uint32_t) + sizeof(uint16_t) + rdlen) {
+ assert(0);
+ }
+ wire_ctx_write_u32(wire, rrset->ttl);
+ wire_ctx_write_u16(wire, rdlen);
+ wire_ctx_write(wire, rr->data, rdlen);
+ assert(wire->error == KNOT_EOK);
+ }
+
+ return KNOT_EOK;
+}
+
+int deserialize_rrset(wire_ctx_t *wire, knot_rrset_t *rrset)
+{
+ assert(wire != NULL && rrset != NULL);
+
+ // Read owner, rtype, rclass and RR count.
+ int size = knot_dname_size(wire->position);
+ if (size < 0) {
+ assert(0);
+ }
+ knot_dname_t *owner = knot_dname_copy(wire->position, NULL);
+ if (owner == NULL || wire_ctx_available(wire) < size + 3 * sizeof(uint16_t)) {
+ knot_dname_free(owner, NULL);
+ return KNOT_EMALF;
+ }
+ wire_ctx_skip(wire, size);
+ uint16_t type = wire_ctx_read_u16(wire);
+ uint16_t rclass = wire_ctx_read_u16(wire);
+ uint16_t rrcount = wire_ctx_read_u16(wire);
+ if (wire->error != KNOT_EOK) {
+ knot_dname_free(owner, NULL);
+ return wire->error;
+ }
+ if (rrset->owner != NULL) {
+ if (knot_dname_cmp(owner, rrset->owner) != 0) {
+ knot_dname_free(owner, NULL);
+ return KNOT_ESEMCHECK;
+ }
+ knot_rrset_clear(rrset, NULL);
+ }
+ knot_rrset_init(rrset, owner, type, rclass, 0);
+
+ for (size_t phase = 0; phase < rrcount && wire_ctx_available(wire) > 0; phase++) {
+ uint32_t ttl = wire_ctx_read_u32(wire);
+ uint32_t rdata_size = wire_ctx_read_u16(wire);
+ if (phase == 0) {
+ rrset->ttl = ttl;
+ }
+ if (wire->error != KNOT_EOK ||
+ wire_ctx_available(wire) < rdata_size ||
+ knot_rrset_add_rdata(rrset, wire->position, rdata_size,
+ NULL) != KNOT_EOK) {
+ knot_rrset_clear(rrset, NULL);
+ return KNOT_EMALF;
+ }
+ wire_ctx_skip(wire, rdata_size);
+ assert(wire->error == KNOT_EOK);
+ }
+
+ return KNOT_EOK;
+}
+
+size_t rrset_serialized_size(const knot_rrset_t *rrset)
+{
+ if (rrset == NULL) {
+ return 0;
+ }
+
+ // Owner size + type + class + RR count.
+ size_t size = knot_dname_size(rrset->owner) + 3 * sizeof(uint16_t);
+
+ for (uint16_t i = 0; i < rrset->rrs.count; i++) {
+ const knot_rdata_t *rr = knot_rdataset_at(&rrset->rrs, i);
+ assert(rr);
+ // TTL + RR size + RR.
+ size += sizeof(uint32_t) + sizeof(uint16_t) + rr->len;
+ }
+
+ return size;
+}