summaryrefslogtreecommitdiffstats
path: root/src/knot/events
diff options
context:
space:
mode:
Diffstat (limited to 'src/knot/events')
-rw-r--r--src/knot/events/events.c564
-rw-r--r--src/knot/events/events.h214
-rw-r--r--src/knot/events/handlers.h49
-rw-r--r--src/knot/events/handlers/backup.c71
-rw-r--r--src/knot/events/handlers/dnssec.c116
-rw-r--r--src/knot/events/handlers/ds_check.c49
-rw-r--r--src/knot/events/handlers/ds_push.c277
-rw-r--r--src/knot/events/handlers/expire.c46
-rw-r--r--src/knot/events/handlers/flush.c33
-rw-r--r--src/knot/events/handlers/freeze_thaw.c46
-rw-r--r--src/knot/events/handlers/load.c406
-rw-r--r--src/knot/events/handlers/notify.c212
-rw-r--r--src/knot/events/handlers/refresh.c1391
-rw-r--r--src/knot/events/handlers/update.c433
-rw-r--r--src/knot/events/replan.c210
-rw-r--r--src/knot/events/replan.h35
16 files changed, 4152 insertions, 0 deletions
diff --git a/src/knot/events/events.c b/src/knot/events/events.c
new file mode 100644
index 0000000..4dba950
--- /dev/null
+++ b/src/knot/events/events.c
@@ -0,0 +1,564 @@
+/* Copyright (C) 2022 CZ.NIC, z.s.p.o. <knot-dns@labs.nic.cz>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#include <assert.h>
+#include <stdarg.h>
+#include <time.h>
+#include <unistd.h>
+#include <urcu.h>
+
+#include "libknot/libknot.h"
+#include "knot/common/log.h"
+#include "knot/events/events.h"
+#include "knot/events/handlers.h"
+#include "knot/events/replan.h"
+#include "knot/zone/zone.h"
+
+#define ZONE_EVENT_IMMEDIATE 1 /* Fast-track to worker queue. */
+
+typedef int (*zone_event_cb)(conf_t *conf, zone_t *zone);
+
+typedef struct event_info {
+ zone_event_type_t type;
+ const zone_event_cb callback;
+ const char *name;
+} event_info_t;
+
+static const event_info_t EVENT_INFO[] = {
+ { ZONE_EVENT_LOAD, event_load, "load" },
+ { ZONE_EVENT_REFRESH, event_refresh, "refresh" },
+ { ZONE_EVENT_UPDATE, event_update, "update" },
+ { ZONE_EVENT_EXPIRE, event_expire, "expiration" },
+ { ZONE_EVENT_FLUSH, event_flush, "flush" },
+ { ZONE_EVENT_BACKUP, event_backup, "backup/restore" },
+ { ZONE_EVENT_NOTIFY, event_notify, "notify" },
+ { ZONE_EVENT_DNSSEC, event_dnssec, "re-sign" },
+ { ZONE_EVENT_UFREEZE, event_ufreeze, "update-freeze" },
+ { ZONE_EVENT_UTHAW, event_uthaw, "update-thaw" },
+ { ZONE_EVENT_DS_CHECK, event_ds_check, "DS-check" },
+ { ZONE_EVENT_DS_PUSH, event_ds_push, "DS-push" },
+ { 0 }
+};
+
+static const event_info_t *get_event_info(zone_event_type_t type)
+{
+ const event_info_t *info;
+ for (info = EVENT_INFO; info->callback != NULL; info++) {
+ if (info->type == type) {
+ return info;
+ }
+ }
+
+ assert(0);
+ return NULL;
+}
+
+static bool valid_event(zone_event_type_t type)
+{
+ return (type > ZONE_EVENT_INVALID && type < ZONE_EVENT_COUNT);
+}
+
+bool ufreeze_applies(zone_event_type_t type)
+{
+ switch (type) {
+ case ZONE_EVENT_LOAD:
+ case ZONE_EVENT_REFRESH:
+ case ZONE_EVENT_UPDATE:
+ case ZONE_EVENT_FLUSH:
+ case ZONE_EVENT_DNSSEC:
+ case ZONE_EVENT_DS_CHECK:
+ return true;
+ default:
+ return false;
+ }
+}
+
+/*! \brief Return remaining time to planned event (seconds). */
+static time_t time_until(time_t planned)
+{
+ time_t now = time(NULL);
+ return now < planned ? (planned - now) : 0;
+}
+
+/*!
+ * \brief Set time of a given event type.
+ */
+static void event_set_time(zone_events_t *events, zone_event_type_t type, time_t time)
+{
+ assert(events);
+ assert(valid_event(type));
+
+ events->time[type] = time;
+}
+
+/*!
+ * \brief Get time of a given event type.
+ */
+static time_t event_get_time(zone_events_t *events, zone_event_type_t type)
+{
+ assert(events);
+ assert(valid_event(type));
+
+ return events->time[type];
+}
+
+/*!
+ * \brief Find next scheduled zone event.
+ *
+ * \note Afer the UTHAW event, get_next_event() is also invoked. In that situation,
+ * all the events are suddenly allowed, and those which were planned into
+ * the ufrozen interval, start to be performed one-by-one sorted by their times.
+ *
+ * \param events Zone events.
+ *
+ * \return Zone event type, or ZONE_EVENT_INVALID if no event is scheduled.
+ */
+static zone_event_type_t get_next_event(zone_events_t *events)
+{
+ if (!events) {
+ return ZONE_EVENT_INVALID;
+ }
+
+ zone_event_type_t next_type = ZONE_EVENT_INVALID;
+ time_t next = 0;
+
+ for (int i = 0; i < ZONE_EVENT_COUNT; i++) {
+ time_t current = events->time[i];
+
+ if ((next == 0 || current < next) && (current != 0) &&
+ (events->forced[i] || !events->ufrozen || !ufreeze_applies(i))) {
+ next = current;
+ next_type = i;
+ }
+ }
+
+ return next_type;
+}
+
+/*!
+ * \brief Fined time of next scheduled event.
+ */
+static time_t get_next_time(zone_events_t *events)
+{
+ zone_event_type_t type = get_next_event(events);
+ return valid_event(type) ? event_get_time(events, type) : 0;
+}
+
+/*!
+ * \brief Cancel scheduled item, schedule first enqueued item.
+ *
+ * \param mx_handover events->mx already locked. Take it over and unlock when done.
+ */
+static void reschedule(zone_events_t *events, bool mx_handover)
+{
+ assert(events);
+
+ if (!mx_handover) {
+ pthread_mutex_lock(&events->reschedule_lock);
+ pthread_mutex_lock(&events->mx);
+ }
+
+ if (!events->event || events->running || events->frozen) {
+ pthread_mutex_unlock(&events->mx);
+ pthread_mutex_unlock(&events->reschedule_lock);
+ return;
+ }
+
+ zone_event_type_t type = get_next_event(events);
+ if (!valid_event(type)) {
+ pthread_mutex_unlock(&events->mx);
+ pthread_mutex_unlock(&events->reschedule_lock);
+ return;
+ }
+
+ time_t diff = time_until(event_get_time(events, type));
+
+ pthread_mutex_unlock(&events->mx);
+
+ evsched_schedule(events->event, diff * 1000);
+
+ pthread_mutex_unlock(&events->reschedule_lock);
+}
+
+/*!
+ * \brief Zone event wrapper, expected to be called from a worker thread.
+ *
+ * 1. Takes the next planned event.
+ * 2. Resets the event's scheduled time (and forced flag).
+ * 3. Perform the event's callback.
+ * 4. Schedule next event planned event.
+ */
+static void event_wrap(worker_task_t *task)
+{
+ assert(task);
+ assert(task->ctx);
+
+ zone_t *zone = task->ctx;
+ zone_events_t *events = &zone->events;
+
+ pthread_mutex_lock(&events->mx);
+ zone_event_type_t type = get_next_event(events);
+ pthread_cond_t *blocking = events->blocking[type];
+ if (!valid_event(type)) {
+ events->running = false;
+ pthread_mutex_unlock(&events->mx);
+ return;
+ }
+ events->type = type;
+ event_set_time(events, type, 0);
+ events->forced[type] = false;
+ pthread_mutex_unlock(&events->mx);
+
+ const event_info_t *info = get_event_info(type);
+
+ /* Create a configuration copy just for this event. */
+ conf_t *conf;
+ rcu_read_lock();
+ int ret = conf_clone(&conf);
+ rcu_read_unlock();
+ if (ret == KNOT_EOK) {
+ /* Execute the event callback. */
+ ret = info->callback(conf, zone);
+ conf_free(conf);
+ }
+
+ if (ret != KNOT_EOK) {
+ log_zone_error(zone->name, "zone event '%s' failed (%s)",
+ info->name, knot_strerror(ret));
+ }
+
+ pthread_mutex_lock(&events->reschedule_lock);
+ pthread_mutex_lock(&events->mx);
+ events->running = false;
+ events->type = ZONE_EVENT_INVALID;
+
+ if (blocking != NULL) {
+ events->blocking[type] = NULL;
+ events->result[type] = ret;
+ pthread_cond_broadcast(blocking);
+ }
+
+ if (events->run_end != NULL) {
+ pthread_cond_broadcast(events->run_end);
+ }
+
+ reschedule(events, true); // unlocks events->mx
+}
+
+/*!
+ * \brief Called by scheduler thread if the event occurs.
+ */
+static void event_dispatch(event_t *event)
+{
+ assert(event);
+ assert(event->data);
+
+ zone_events_t *events = event->data;
+
+ pthread_mutex_lock(&events->mx);
+ if (!events->running && !events->frozen) {
+ events->running = true;
+ worker_pool_assign(events->pool, &events->task);
+ }
+ pthread_mutex_unlock(&events->mx);
+}
+
+int zone_events_init(zone_t *zone)
+{
+ if (!zone) {
+ return KNOT_EINVAL;
+ }
+
+ zone_events_t *events = &zone->events;
+
+ memset(&zone->events, 0, sizeof(zone->events));
+ pthread_mutex_init(&events->mx, NULL);
+ pthread_mutex_init(&events->reschedule_lock, NULL);
+ events->task.ctx = zone;
+ events->task.run = event_wrap;
+
+ return KNOT_EOK;
+}
+
+int zone_events_setup(struct zone *zone, worker_pool_t *workers,
+ evsched_t *scheduler)
+{
+ if (!zone || !workers || !scheduler) {
+ return KNOT_EINVAL;
+ }
+
+ event_t *event;
+ event = evsched_event_create(scheduler, event_dispatch, &zone->events);
+ if (!event) {
+ return KNOT_ENOMEM;
+ }
+
+ zone->events.event = event;
+ zone->events.pool = workers;
+
+ return KNOT_EOK;
+}
+
+void zone_events_deinit(zone_t *zone)
+{
+ if (!zone) {
+ return;
+ }
+
+ zone_events_t *events = &zone->events;
+
+ pthread_mutex_lock(&events->reschedule_lock);
+ pthread_mutex_lock(&events->mx);
+
+ evsched_cancel(events->event);
+ evsched_event_free(events->event);
+
+ pthread_mutex_unlock(&events->mx);
+ pthread_mutex_destroy(&events->mx);
+ pthread_mutex_unlock(&events->reschedule_lock);
+ pthread_mutex_destroy(&events->reschedule_lock);
+
+ memset(events, 0, sizeof(*events));
+}
+
+void _zone_events_schedule_at(zone_t *zone, ...)
+{
+ zone_events_t *events = &zone->events;
+ va_list args;
+ va_start(args, zone);
+
+ pthread_mutex_lock(&events->reschedule_lock);
+ pthread_mutex_lock(&events->mx);
+
+ time_t old_next = get_next_time(events);
+
+ // update timers
+ for (int type = va_arg(args, int); valid_event(type); type = va_arg(args, int)) {
+ time_t planned = va_arg(args, time_t);
+ if (planned < 0) {
+ continue;
+ }
+
+ time_t current = event_get_time(events, type);
+ if (current == 0 || (planned == 0 && !events->forced[type]) ||
+ (planned > 0 && planned < current)) {
+ event_set_time(events, type, planned);
+ }
+ }
+
+ // reschedule if changed
+ time_t next = get_next_time(events);
+ if (old_next != next) {
+ reschedule(events, true); // unlocks events->mx
+ } else {
+ pthread_mutex_unlock(&events->mx);
+ pthread_mutex_unlock(&events->reschedule_lock);
+ }
+
+ va_end(args);
+}
+
+void zone_events_schedule_user(zone_t *zone, zone_event_type_t type)
+{
+ if (!zone || !valid_event(type)) {
+ return;
+ }
+
+ zone_events_t *events = &zone->events;
+ pthread_mutex_lock(&events->mx);
+ events->forced[type] = true;
+ pthread_mutex_unlock(&events->mx);
+
+ zone_events_schedule_now(zone, type);
+
+ // reschedule because get_next_event result changed outside of _zone_events_schedule_at
+ reschedule(events, false);
+}
+
+int zone_events_schedule_blocking(zone_t *zone, zone_event_type_t type, bool user)
+{
+ if (!zone || !valid_event(type)) {
+ return KNOT_EINVAL;
+ }
+
+ zone_events_t *events = &zone->events;
+ pthread_cond_t local_cond;
+ pthread_cond_init(&local_cond, NULL);
+
+ pthread_mutex_lock(&events->mx);
+ while (events->blocking[type] != NULL) {
+ pthread_cond_wait(events->blocking[type], &events->mx);
+ }
+ events->blocking[type] = &local_cond;
+ pthread_mutex_unlock(&events->mx);
+
+ if (user) {
+ zone_events_schedule_user(zone, type);
+ } else {
+ zone_events_schedule_now(zone, type);
+ }
+
+ pthread_mutex_lock(&events->mx);
+ while (events->blocking[type] == &local_cond) {
+ pthread_cond_wait(&local_cond, &events->mx);
+ }
+ int ret = events->result[type];
+ pthread_mutex_unlock(&events->mx);
+ pthread_cond_destroy(&local_cond);
+
+ return ret;
+}
+
+void zone_events_enqueue(zone_t *zone, zone_event_type_t type)
+{
+ if (!zone || !valid_event(type)) {
+ return;
+ }
+
+ zone_events_t *events = &zone->events;
+
+ pthread_mutex_lock(&events->mx);
+
+ /* Bypass scheduler if no event is running. */
+ if (!events->running && !events->frozen &&
+ (!events->ufrozen || !ufreeze_applies(type))) {
+ events->running = true;
+ events->type = type;
+ event_set_time(events, type, ZONE_EVENT_IMMEDIATE);
+ worker_pool_assign(events->pool, &events->task);
+ pthread_mutex_unlock(&events->mx);
+ return;
+ }
+
+ pthread_mutex_unlock(&events->mx);
+
+ /* Execute as soon as possible. */
+ zone_events_schedule_now(zone, type);
+}
+
+void zone_events_freeze(zone_t *zone)
+{
+ if (!zone) {
+ return;
+ }
+
+ zone_events_t *events = &zone->events;
+
+ /* Prevent new events being enqueued. */
+ pthread_mutex_lock(&events->reschedule_lock);
+ pthread_mutex_lock(&events->mx);
+ events->frozen = true;
+ pthread_mutex_unlock(&events->mx);
+
+ /* Cancel current event. */
+ evsched_cancel(events->event);
+ pthread_mutex_unlock(&events->reschedule_lock);
+}
+
+void zone_events_freeze_blocking(zone_t *zone)
+{
+ if (!zone) {
+ return;
+ }
+
+ zone_events_freeze(zone);
+
+ zone_events_t *events = &zone->events;
+
+ /* Wait for running event to finish. */
+ pthread_cond_t cond;
+ pthread_cond_init(&cond, NULL);
+ pthread_mutex_lock(&events->mx);
+ while (events->running) {
+ events->run_end = &cond;
+ pthread_cond_wait(&cond, &events->mx);
+ }
+ events->run_end = NULL;
+ pthread_mutex_unlock(&events->mx);
+ pthread_cond_destroy(&cond);
+}
+
+void zone_events_start(zone_t *zone)
+{
+ if (!zone) {
+ return;
+ }
+
+ zone_events_t *events = &zone->events;
+
+ /* Unlock the events queue. */
+ pthread_mutex_lock(&events->reschedule_lock);
+ pthread_mutex_lock(&events->mx);
+ events->frozen = false;
+
+ reschedule(events, true); //unlocks events->mx
+}
+
+time_t zone_events_get_time(const struct zone *zone, zone_event_type_t type)
+{
+ if (zone == NULL) {
+ return KNOT_EINVAL;
+ }
+
+ time_t event_time = KNOT_ENOENT;
+ zone_events_t *events = (zone_events_t *)&zone->events;
+
+ pthread_mutex_lock(&events->mx);
+
+ /* Get next valid event. */
+ if (valid_event(type)) {
+ event_time = event_get_time(events, type);
+ }
+
+ pthread_mutex_unlock(&events->mx);
+
+ return event_time;
+}
+
+const char *zone_events_get_name(zone_event_type_t type)
+{
+ /* Get information about the event and time. */
+ const event_info_t *info = get_event_info(type);
+ if (info == NULL) {
+ return NULL;
+ }
+
+ return info->name;
+}
+
+time_t zone_events_get_next(const struct zone *zone, zone_event_type_t *type)
+{
+ if (zone == NULL || type == NULL) {
+ return KNOT_EINVAL;
+ }
+
+ time_t next_time = KNOT_ENOENT;
+ zone_events_t *events = (zone_events_t *)&zone->events;
+
+ pthread_mutex_lock(&events->mx);
+
+ /* Get time of next valid event. */
+ *type = get_next_event(events);
+ if (valid_event(*type)) {
+ next_time = event_get_time(events, *type);
+ } else {
+ *type = ZONE_EVENT_INVALID;
+ }
+
+ pthread_mutex_unlock(&events->mx);
+
+ return next_time;
+}
diff --git a/src/knot/events/events.h b/src/knot/events/events.h
new file mode 100644
index 0000000..8ede5fb
--- /dev/null
+++ b/src/knot/events/events.h
@@ -0,0 +1,214 @@
+/* Copyright (C) 2022 CZ.NIC, z.s.p.o. <knot-dns@labs.nic.cz>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include <pthread.h>
+#include <stdbool.h>
+#include <sys/time.h>
+
+#include "knot/conf/conf.h"
+#include "knot/common/evsched.h"
+#include "knot/worker/pool.h"
+#include "libknot/db/db.h"
+
+struct zone;
+
+typedef enum zone_event_type {
+ ZONE_EVENT_INVALID = -1,
+ // supported event types
+ ZONE_EVENT_LOAD = 0,
+ ZONE_EVENT_REFRESH,
+ ZONE_EVENT_UPDATE,
+ ZONE_EVENT_EXPIRE,
+ ZONE_EVENT_FLUSH,
+ ZONE_EVENT_BACKUP,
+ ZONE_EVENT_NOTIFY,
+ ZONE_EVENT_DNSSEC,
+ ZONE_EVENT_UFREEZE,
+ ZONE_EVENT_UTHAW,
+ ZONE_EVENT_DS_CHECK,
+ ZONE_EVENT_DS_PUSH,
+ // terminator
+ ZONE_EVENT_COUNT,
+} zone_event_type_t;
+
+typedef struct zone_events {
+ pthread_mutex_t mx; //!< Mutex protecting the struct.
+ pthread_mutex_t reschedule_lock;//!< Prevent concurrent reschedule() making mess.
+
+ zone_event_type_t type; //!< Type of running event.
+ bool running; //!< Some zone event is being run.
+ pthread_cond_t *run_end; //!< Notify this one after finishing a job.
+
+ bool frozen; //!< Terminated, don't schedule new events.
+ bool ufrozen; //!< Updates to the zone temporarily frozen by user.
+
+ event_t *event; //!< Scheduler event.
+ worker_pool_t *pool; //!< Server worker pool.
+
+ worker_task_t task; //!< Event execution context.
+ time_t time[ZONE_EVENT_COUNT]; //!< Event execution times.
+ bool forced[ZONE_EVENT_COUNT]; //!< Flag that the event was invoked by user ctl.
+ pthread_cond_t *blocking[ZONE_EVENT_COUNT]; //!< For blocking events: dispatching cond.
+ int result[ZONE_EVENT_COUNT]; //!< Event return values (in blocking operations).
+} zone_events_t;
+
+/*!
+ * \brief Initialize zone events.
+ *
+ * The function will not set up the scheduling, use \ref zone_events_setup
+ * to do that.
+ *
+ * \param zone Pointer to zone (context of execution).
+ *
+ * \return KNOT_E*
+ */
+int zone_events_init(struct zone *zone);
+
+/*!
+ * \brief Set up zone events execution.
+ *
+ * \param zone Zone to setup.
+ * \param workers Worker thread pool.
+ * \param scheduler Event scheduler.
+ *
+ * \return KNOT_E*
+ */
+int zone_events_setup(struct zone *zone, worker_pool_t *workers,
+ evsched_t *scheduler);
+
+/*!
+ * \brief Deinitialize zone events.
+ *
+ * \param zone Zone whose events we want to deinitialize.
+ */
+void zone_events_deinit(struct zone *zone);
+
+/*!
+ * \brief Enqueue event type for asynchronous execution.
+ *
+ * \note This is similar to the scheduling an event for NOW, but it can
+ * bypass the event scheduler if no event is running at the moment.
+ *
+ * \param zone Zone to schedule new event for.
+ * \param type Type of event.
+ */
+void zone_events_enqueue(struct zone *zone, zone_event_type_t type);
+
+/*!
+ * \brief Schedule new zone event.
+ *
+ * The function allows to set multiple events at once.
+ *
+ * The function interprets time values (t) as follows:
+ *
+ * t > 0: schedule timer for a given time
+ * t = 0: cancel the timer
+ * t < 0: ignore change in the timer
+ *
+ * If the event is already scheduled, the new time will be set only if the
+ * new time is earlier than the currently scheduled one. To override the
+ * check, cancel and schedule the event in a single function call.
+ *
+ * \param zone Zone to schedule new event for.
+ * \param ... Sequence of zone_event_type_t and time_t terminated with
+ * ZONE_EVENT_INVALID.
+ */
+void _zone_events_schedule_at(struct zone *zone, ...);
+
+#define zone_events_schedule_at(zone, events...) \
+ _zone_events_schedule_at(zone, events, ZONE_EVENT_INVALID)
+
+#define zone_events_schedule_now(zone, type) \
+ zone_events_schedule_at(zone, type, time(NULL))
+
+/*!
+ * \brief Schedule zone event to now, with forced flag.
+ */
+void zone_events_schedule_user(struct zone *zone, zone_event_type_t type);
+
+/*!
+ * \brief Schedule new zone event as soon as possible and wait for it's
+ * completion (end of task run), with optional forced flag.
+ *
+ * \param zone Zone to schedule new event for.
+ * \param type Zone event type.
+ * \param user Forced flag indication.
+ *
+ * \return KNOT_E*
+ */
+int zone_events_schedule_blocking(struct zone *zone, zone_event_type_t type, bool user);
+
+/*!
+ * \brief Freeze all zone events and prevent new events from running.
+ *
+ * \param zone Zone to freeze events for.
+ */
+void zone_events_freeze(struct zone *zone);
+
+/*!
+ * \brief Freeze zone events and wait for running event to finish.
+ *
+ * \param zone Zone to freeze events for.
+ */
+void zone_events_freeze_blocking(struct zone *zone);
+
+/*!
+ * \brief ufreeze_applies
+ * \param type Type of event to be checked
+ * \return true / false if user freeze applies
+ */
+bool ufreeze_applies(zone_event_type_t type);
+
+/*!
+ * \brief Start the events processing.
+ *
+ * \param zone Zone to start processing for.
+ */
+void zone_events_start(struct zone *zone);
+
+/*!
+ * \brief Return time of the occurrence of the given event.
+ *
+ * \param zone Zone to get event time from.
+ * \param type Event type.
+ *
+ * \retval time of the event when event found
+ * \retval 0 when the event is not planned
+ * \retval negative value if event is invalid
+ */
+time_t zone_events_get_time(const struct zone *zone, zone_event_type_t type);
+
+/*!
+ * \brief Return text name of the event.
+ *
+ * \param type Type of event.
+ *
+ * \retval String with event name if it exists.
+ * \retval NULL if the event does not exist.
+ */
+const char *zone_events_get_name(zone_event_type_t type);
+
+/*!
+ * \brief Return time and type of the next event.
+ *
+ * \param zone Zone to get next event from.
+ * \param type [out] Type of the next event will be stored in the parameter.
+ *
+ * \return time of the next event or an error (negative number)
+ */
+time_t zone_events_get_next(const struct zone *zone, zone_event_type_t *type);
diff --git a/src/knot/events/handlers.h b/src/knot/events/handlers.h
new file mode 100644
index 0000000..e6dfd6c
--- /dev/null
+++ b/src/knot/events/handlers.h
@@ -0,0 +1,49 @@
+/* Copyright (C) 2022 CZ.NIC, z.s.p.o. <knot-dns@labs.nic.cz>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include "knot/conf/conf.h"
+#include "knot/zone/zone.h"
+#include "knot/dnssec/zone-events.h" // zone_sign_reschedule_t
+
+/*! \brief Loads or reloads potentially changed zone. */
+int event_load(conf_t *conf, zone_t *zone);
+/*! \brief Refresh a zone from a master. */
+int event_refresh(conf_t *conf, zone_t *zone);
+/*! \brief Processes DDNS updates in the zone's DDNS queue. */
+int event_update(conf_t *conf, zone_t *zone);
+/*! \brief Empties in-memory zone contents. */
+int event_expire(conf_t *conf, zone_t *zone);
+/*! \brief Flushes zone contents into text file. */
+int event_flush(conf_t *conf, zone_t *zone);
+/*! \brief Backs up zone contents, metadata, keys, etc to a directory. */
+int event_backup(conf_t *conf, zone_t *zone);
+/*! \brief Sends notify to slaves. */
+int event_notify(conf_t *conf, zone_t *zone);
+/*! \brief Signs the zone using its DNSSEC keys, perform key rollovers. */
+int event_dnssec(conf_t *conf, zone_t *zone);
+/*! \brief NOT A HANDLER, just a helper function to reschedule based on reschedule_t */
+void event_dnssec_reschedule(conf_t *conf, zone_t *zone,
+ const zone_sign_reschedule_t *refresh, bool zone_changed);
+/*! \brief Freeze those events causing zone contents change. */
+int event_ufreeze(conf_t *conf, zone_t *zone);
+/*! \brief Unfreeze zone updates. */
+int event_uthaw(conf_t *conf, zone_t *zone);
+/*! \brief When CDS/CDNSKEY published, look for matching DS */
+int event_ds_check(conf_t *conf, zone_t *zone);
+/*! \brief After change of CDS/CDNSKEY, push the new DS to parent zone as DDNS. */
+int event_ds_push(conf_t *conf, zone_t *zone);
diff --git a/src/knot/events/handlers/backup.c b/src/knot/events/handlers/backup.c
new file mode 100644
index 0000000..a6b258c
--- /dev/null
+++ b/src/knot/events/handlers/backup.c
@@ -0,0 +1,71 @@
+/* Copyright (C) 2020 CZ.NIC, z.s.p.o. <knot-dns@labs.nic.cz>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#include <assert.h>
+#include <urcu.h>
+
+#include "knot/common/log.h"
+#include "knot/conf/conf.h"
+#include "knot/events/handlers.h"
+#include "knot/zone/backup.h"
+
+int event_backup(conf_t *conf, zone_t *zone)
+{
+ assert(zone);
+
+ zone_backup_ctx_t *ctx = zone->backup_ctx;
+ if (ctx == NULL) {
+ return KNOT_EINVAL;
+ }
+
+ bool restore = ctx->restore_mode;
+
+ if (!restore && ctx->failed) {
+ // No need to proceed with already faulty backup.
+ return KNOT_EOK;
+ }
+
+ char *back_dir = strdup(ctx->backup_dir);
+ if (back_dir == NULL) {
+ return KNOT_ENOMEM;
+ }
+
+ if (restore) {
+ // expire zone
+ zone_contents_t *expired = zone_switch_contents(zone, NULL);
+ synchronize_rcu();
+ knot_sem_wait(&zone->cow_lock);
+ zone_contents_deep_free(expired);
+ knot_sem_post(&zone->cow_lock);
+ zone->zonefile.exists = false;
+ }
+
+ int ret = zone_backup(conf, zone);
+ if (ret == KNOT_EOK) {
+ log_zone_info(zone->name, "zone %s '%s'",
+ restore ? "restored from" : "backed up to", back_dir);
+ } else {
+ log_zone_warning(zone->name, "zone %s failed (%s)",
+ restore ? "restore" : "backup", knot_strerror(ret));
+ }
+
+ if (restore && ret == KNOT_EOK) {
+ zone_reset(conf, zone);
+ }
+
+ free(back_dir);
+ return ret;
+}
diff --git a/src/knot/events/handlers/dnssec.c b/src/knot/events/handlers/dnssec.c
new file mode 100644
index 0000000..8263b0d
--- /dev/null
+++ b/src/knot/events/handlers/dnssec.c
@@ -0,0 +1,116 @@
+/* Copyright (C) 2022 CZ.NIC, z.s.p.o. <knot-dns@labs.nic.cz>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#include <assert.h>
+
+#include "knot/common/log.h"
+#include "knot/conf/conf.h"
+#include "knot/dnssec/zone-events.h"
+#include "knot/updates/apply.h"
+#include "knot/zone/zone.h"
+#include "libknot/errcode.h"
+
+static void log_dnssec_next(const knot_dname_t *zone, knot_time_t refresh_at)
+{
+ char time_str[64] = { 0 };
+ struct tm time_gm = { 0 };
+ time_t refresh = refresh_at;
+ localtime_r(&refresh, &time_gm);
+ strftime(time_str, sizeof(time_str), KNOT_LOG_TIME_FORMAT, &time_gm);
+ if (refresh_at == 0) {
+ log_zone_warning(zone, "DNSSEC, next signing not scheduled");
+ } else {
+ log_zone_info(zone, "DNSSEC, next signing at %s", time_str);
+ }
+}
+
+void event_dnssec_reschedule(conf_t *conf, zone_t *zone,
+ const zone_sign_reschedule_t *refresh, bool zone_changed)
+{
+ time_t now = time(NULL);
+ time_t ignore = -1;
+ knot_time_t refresh_at = refresh->next_sign;
+
+ refresh_at = knot_time_min(refresh_at, refresh->next_rollover);
+ refresh_at = knot_time_min(refresh_at, refresh->next_nsec3resalt);
+
+ log_dnssec_next(zone->name, (time_t)refresh_at);
+
+ if (refresh->plan_ds_check) {
+ zone->timers.next_ds_check = now;
+ }
+
+ zone_events_schedule_at(zone,
+ ZONE_EVENT_DNSSEC, refresh_at ? (time_t)refresh_at : ignore,
+ ZONE_EVENT_DS_CHECK, refresh->plan_ds_check ? now : ignore
+ );
+ if (zone_changed) {
+ zone_schedule_notify(zone, 0);
+ }
+}
+
+int event_dnssec(conf_t *conf, zone_t *zone)
+{
+ assert(zone);
+
+ zone_sign_reschedule_t resch = { 0 };
+ zone_sign_roll_flags_t r_flags = KEY_ROLL_ALLOW_ALL;
+ int sign_flags = 0;
+ bool zone_changed = false;
+
+ if (zone_get_flag(zone, ZONE_FORCE_RESIGN, true)) {
+ log_zone_info(zone->name, "DNSSEC, dropping previous "
+ "signatures, re-signing zone");
+ sign_flags = ZONE_SIGN_DROP_SIGNATURES;
+ } else {
+ log_zone_info(zone->name, "DNSSEC, signing zone");
+ sign_flags = 0;
+ }
+
+ if (zone_get_flag(zone, ZONE_FORCE_KSK_ROLL, true)) {
+ r_flags |= KEY_ROLL_FORCE_KSK_ROLL;
+ }
+ if (zone_get_flag(zone, ZONE_FORCE_ZSK_ROLL, true)) {
+ r_flags |= KEY_ROLL_FORCE_ZSK_ROLL;
+ }
+
+ zone_update_t up;
+ int ret = zone_update_init(&up, zone, UPDATE_INCREMENTAL | UPDATE_NO_CHSET);
+ if (ret != KNOT_EOK) {
+ return ret;
+ }
+
+ ret = knot_dnssec_zone_sign(&up, conf, sign_flags, r_flags, 0, &resch);
+ if (ret != KNOT_EOK) {
+ goto done;
+ }
+
+ zone_changed = !zone_update_no_change(&up);
+
+ ret = zone_update_commit(conf, &up);
+ if (ret != KNOT_EOK) {
+ goto done;
+ }
+
+done:
+ // Schedule dependent events
+ event_dnssec_reschedule(conf, zone, &resch, zone_changed);
+
+ if (ret != KNOT_EOK) {
+ zone_update_clear(&up);
+ }
+ return ret;
+}
diff --git a/src/knot/events/handlers/ds_check.c b/src/knot/events/handlers/ds_check.c
new file mode 100644
index 0000000..0138bed
--- /dev/null
+++ b/src/knot/events/handlers/ds_check.c
@@ -0,0 +1,49 @@
+/* Copyright (C) 2022 CZ.NIC, z.s.p.o. <knot-dns@labs.nic.cz>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#include "knot/dnssec/ds_query.h"
+#include "knot/zone/zone.h"
+
+int event_ds_check(conf_t *conf, zone_t *zone)
+{
+ kdnssec_ctx_t ctx = { 0 };
+
+ int ret = kdnssec_ctx_init(conf, &ctx, zone->name, zone_kaspdb(zone), NULL);
+ if (ret != KNOT_EOK) {
+ return ret;
+ }
+
+ ret = knot_parent_ds_query(conf, &ctx, conf->cache.srv_tcp_remote_io_timeout);
+
+ zone->timers.next_ds_check = 0;
+ switch (ret) {
+ case KNOT_NO_READY_KEY:
+ break;
+ case KNOT_EOK:
+ zone_events_schedule_now(zone, ZONE_EVENT_DNSSEC);
+ break;
+ default:
+ if (ctx.policy->ksk_sbm_check_interval > 0) {
+ time_t next_check = time(NULL) + ctx.policy->ksk_sbm_check_interval;
+ zone->timers.next_ds_check = next_check;
+ zone_events_schedule_at(zone, ZONE_EVENT_DS_CHECK, next_check);
+ }
+ }
+
+ kdnssec_ctx_deinit(&ctx);
+
+ return KNOT_EOK; // allways ok, if failure it has been rescheduled
+}
diff --git a/src/knot/events/handlers/ds_push.c b/src/knot/events/handlers/ds_push.c
new file mode 100644
index 0000000..11aef75
--- /dev/null
+++ b/src/knot/events/handlers/ds_push.c
@@ -0,0 +1,277 @@
+/* Copyright (C) 2022 CZ.NIC, z.s.p.o. <knot-dns@labs.nic.cz>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#include <assert.h>
+
+#include "knot/common/log.h"
+#include "knot/conf/conf.h"
+#include "knot/query/query.h"
+#include "knot/query/requestor.h"
+#include "knot/zone/zone.h"
+#include "libknot/errcode.h"
+
+struct ds_push_data {
+ const knot_dname_t *zone;
+ const knot_dname_t *parent_query;
+ knot_dname_t *parent_soa;
+ knot_rrset_t del_old_ds;
+ knot_rrset_t new_ds;
+ const struct sockaddr *remote;
+ query_edns_data_t edns;
+};
+
+#define DS_PUSH_RETRY 600
+
+#define DS_PUSH_LOG(priority, zone, remote, reused, fmt, ...) \
+ ns_log(priority, zone, LOG_OPERATION_DS_PUSH, LOG_DIRECTION_OUT, remote, \
+ reused, fmt, ## __VA_ARGS__)
+
+static const knot_rdata_t remove_cds = { 5, { 0, 0, 0, 0, 0 } };
+
+static int ds_push_begin(knot_layer_t *layer, void *params)
+{
+ layer->data = params;
+
+ return KNOT_STATE_PRODUCE;
+}
+
+static int parent_soa_produce(struct ds_push_data *data, knot_pkt_t *pkt)
+{
+ assert(data->parent_query[0] != '\0');
+ data->parent_query = knot_wire_next_label(data->parent_query, NULL);
+
+ int ret = knot_pkt_put_question(pkt, data->parent_query, KNOT_CLASS_IN, KNOT_RRTYPE_SOA);
+ if (ret != KNOT_EOK) {
+ return KNOT_STATE_FAIL;
+ }
+
+ ret = query_put_edns(pkt, &data->edns);
+ if (ret != KNOT_EOK) {
+ return KNOT_STATE_FAIL;
+ }
+
+ return KNOT_STATE_CONSUME;
+}
+
+static int ds_push_produce(knot_layer_t *layer, knot_pkt_t *pkt)
+{
+ struct ds_push_data *data = layer->data;
+
+ query_init_pkt(pkt);
+
+ if (data->parent_soa == NULL) {
+ return parent_soa_produce(data, pkt);
+ }
+
+ knot_wire_set_opcode(pkt->wire, KNOT_OPCODE_UPDATE);
+ int ret = knot_pkt_put_question(pkt, data->parent_soa, KNOT_CLASS_IN, KNOT_RRTYPE_SOA);
+ if (ret != KNOT_EOK) {
+ return KNOT_STATE_FAIL;
+ }
+
+ knot_pkt_begin(pkt, KNOT_AUTHORITY);
+
+ assert(data->del_old_ds.type == KNOT_RRTYPE_DS);
+ ret = knot_pkt_put(pkt, KNOT_COMPR_HINT_NONE, &data->del_old_ds, 0);
+ if (ret != KNOT_EOK) {
+ return KNOT_STATE_FAIL;
+ }
+
+ assert(data->new_ds.type == KNOT_RRTYPE_DS);
+ assert(!knot_rrset_empty(&data->new_ds));
+ if (knot_rdata_cmp(data->new_ds.rrs.rdata, &remove_cds) != 0) {
+ // Otherwise only remove DS - it was a special "remove CDS".
+ ret = knot_pkt_put(pkt, KNOT_COMPR_HINT_NONE, &data->new_ds, 0);
+ if (ret != KNOT_EOK) {
+ return KNOT_STATE_FAIL;
+ }
+ }
+
+ query_put_edns(pkt, &data->edns);
+
+ return KNOT_STATE_CONSUME;
+}
+
+static const knot_rrset_t *sect_soa(const knot_pkt_t *pkt, knot_section_t sect)
+{
+ const knot_pktsection_t *s = knot_pkt_section(pkt, sect);
+ const knot_rrset_t *rr = s->count > 0 ? knot_pkt_rr(s, 0) : NULL;
+ if (rr == NULL || rr->type != KNOT_RRTYPE_SOA || rr->rrs.count != 1) {
+ return NULL;
+ }
+ return rr;
+}
+
+static int ds_push_consume(knot_layer_t *layer, knot_pkt_t *pkt)
+{
+ struct ds_push_data *data = layer->data;
+
+ if (data->parent_soa != NULL) {
+ // DS push has already been sent, just finish the action.
+ return KNOT_STATE_DONE;
+ }
+
+ const knot_rrset_t *parent_soa = sect_soa(pkt, KNOT_ANSWER);
+ if (parent_soa != NULL) {
+ // parent SOA obtained, continue with DS push
+ data->parent_soa = knot_dname_copy(parent_soa->owner, NULL);
+ return KNOT_STATE_RESET;
+ }
+
+ if (data->parent_query[0] == '\0') {
+ // query for parent SOA systematically fails
+ DS_PUSH_LOG(LOG_WARNING, data->zone, data->remote,
+ layer->flags & KNOT_REQUESTOR_REUSED,
+ "unable to query parent SOA");
+ return KNOT_STATE_FAIL;
+ }
+
+ return KNOT_STATE_RESET; // cut off one more label and re-query
+}
+
+static int ds_push_reset(knot_layer_t *layer)
+{
+ (void)layer;
+ return KNOT_STATE_PRODUCE;
+}
+
+static int ds_push_finish(knot_layer_t *layer)
+{
+ struct ds_push_data *data = layer->data;
+ free(data->parent_soa);
+ data->parent_soa = NULL;
+ return layer->state;
+}
+
+static const knot_layer_api_t DS_PUSH_API = {
+ .begin = ds_push_begin,
+ .produce = ds_push_produce,
+ .reset = ds_push_reset,
+ .consume = ds_push_consume,
+ .finish = ds_push_finish,
+};
+
+static int send_ds_push(conf_t *conf, zone_t *zone,
+ const conf_remote_t *parent, int timeout)
+{
+ knot_rrset_t zone_cds = node_rrset(zone->contents->apex, KNOT_RRTYPE_CDS);
+ if (knot_rrset_empty(&zone_cds)) {
+ return KNOT_EOK; // No CDS, do nothing.
+ }
+ zone_cds.type = KNOT_RRTYPE_DS;
+ zone_cds.ttl = node_rrset(zone->contents->apex, KNOT_RRTYPE_DNSKEY).ttl;
+
+ struct ds_push_data data = {
+ .zone = zone->name,
+ .parent_query = zone->name,
+ .new_ds = zone_cds,
+ .remote = (struct sockaddr *)&parent->addr,
+ .edns = query_edns_data_init(conf, parent->addr.ss_family, 0)
+ };
+
+ knot_rrset_init(&data.del_old_ds, zone->name, KNOT_RRTYPE_DS, KNOT_CLASS_ANY, 0);
+ int ret = knot_rrset_add_rdata(&data.del_old_ds, NULL, 0, NULL);
+ if (ret != KNOT_EOK) {
+ return ret;
+ }
+
+ knot_requestor_t requestor;
+ knot_requestor_init(&requestor, &DS_PUSH_API, &data, NULL);
+
+ knot_pkt_t *pkt = knot_pkt_new(NULL, KNOT_WIRE_MAX_PKTSIZE, NULL);
+ if (pkt == NULL) {
+ knot_rdataset_clear(&data.del_old_ds.rrs, NULL);
+ knot_requestor_clear(&requestor);
+ return KNOT_ENOMEM;
+ }
+
+ const struct sockaddr_storage *dst = &parent->addr;
+ const struct sockaddr_storage *src = &parent->via;
+ knot_request_t *req = knot_request_make(NULL, dst, src, pkt, &parent->key, 0);
+ if (req == NULL) {
+ knot_rdataset_clear(&data.del_old_ds.rrs, NULL);
+ knot_request_free(req, NULL);
+ knot_requestor_clear(&requestor);
+ return KNOT_ENOMEM;
+ }
+
+ ret = knot_requestor_exec(&requestor, req, timeout);
+
+ if (ret == KNOT_EOK && knot_pkt_ext_rcode(req->resp) == 0) {
+ DS_PUSH_LOG(LOG_INFO, zone->name, dst,
+ requestor.layer.flags & KNOT_REQUESTOR_REUSED,
+ "success");
+ } else if (knot_pkt_ext_rcode(req->resp) == 0) {
+ DS_PUSH_LOG(LOG_WARNING, zone->name, dst,
+ requestor.layer.flags & KNOT_REQUESTOR_REUSED,
+ "failed (%s)", knot_strerror(ret));
+ } else {
+ DS_PUSH_LOG(LOG_WARNING, zone->name, dst,
+ requestor.layer.flags & KNOT_REQUESTOR_REUSED,
+ "server responded with error '%s'",
+ knot_pkt_ext_rcode_name(req->resp));
+ }
+
+ knot_rdataset_clear(&data.del_old_ds.rrs, NULL);
+ knot_request_free(req, NULL);
+ knot_requestor_clear(&requestor);
+
+ return ret;
+}
+
+int event_ds_push(conf_t *conf, zone_t *zone)
+{
+ assert(zone);
+
+ if (zone_contents_is_empty(zone->contents)) {
+ return KNOT_EOK;
+ }
+
+ int timeout = conf->cache.srv_tcp_remote_io_timeout;
+
+ conf_val_t ds_push = conf_zone_get(conf, C_DS_PUSH, zone->name);
+ if (ds_push.code != KNOT_EOK) {
+ conf_val_t policy_id = conf_zone_get(conf, C_DNSSEC_POLICY, zone->name);
+ conf_id_fix_default(&policy_id);
+ ds_push = conf_id_get(conf, C_POLICY, C_DS_PUSH, &policy_id);
+ }
+ conf_mix_iter_t iter;
+ conf_mix_iter_init(conf, &ds_push, &iter);
+ while (iter.id->code == KNOT_EOK) {
+ conf_val_t addr = conf_id_get(conf, C_RMT, C_ADDR, iter.id);
+ size_t addr_count = conf_val_count(&addr);
+
+ int ret = KNOT_EOK;
+ for (int i = 0; i < addr_count; i++) {
+ conf_remote_t parent = conf_remote(conf, iter.id, i);
+ ret = send_ds_push(conf, zone, &parent, timeout);
+ if (ret == KNOT_EOK) {
+ zone->timers.next_ds_push = 0;
+ break;
+ }
+ }
+
+ if (ret != KNOT_EOK) {
+ time_t next_push = time(NULL) + DS_PUSH_RETRY;
+ zone_events_schedule_at(zone, ZONE_EVENT_DS_PUSH, next_push);
+ zone->timers.next_ds_push = next_push;
+ }
+
+ conf_mix_iter_next(&iter);
+ }
+
+ return KNOT_EOK;
+}
diff --git a/src/knot/events/handlers/expire.c b/src/knot/events/handlers/expire.c
new file mode 100644
index 0000000..d7deedd
--- /dev/null
+++ b/src/knot/events/handlers/expire.c
@@ -0,0 +1,46 @@
+/* Copyright (C) 2022 CZ.NIC, z.s.p.o. <knot-dns@labs.nic.cz>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#include <assert.h>
+#include <urcu.h>
+
+#include "knot/common/log.h"
+#include "knot/conf/conf.h"
+#include "knot/events/handlers.h"
+#include "knot/events/replan.h"
+#include "knot/zone/contents.h"
+#include "knot/zone/zone.h"
+
+int event_expire(conf_t *conf, zone_t *zone)
+{
+ assert(zone);
+
+ zone_contents_t *expired = zone_switch_contents(zone, NULL);
+ log_zone_info(zone->name, "zone expired");
+
+ synchronize_rcu();
+ knot_sem_wait(&zone->cow_lock);
+ zone_contents_deep_free(expired);
+ knot_sem_post(&zone->cow_lock);
+
+ zone->zonefile.exists = false;
+
+ zone->timers.next_expire = time(NULL);
+ zone->timers.next_refresh = zone->timers.next_expire;
+ replan_from_timers(conf, zone);
+
+ return KNOT_EOK;
+}
diff --git a/src/knot/events/handlers/flush.c b/src/knot/events/handlers/flush.c
new file mode 100644
index 0000000..65663cb
--- /dev/null
+++ b/src/knot/events/handlers/flush.c
@@ -0,0 +1,33 @@
+/* Copyright (C) 2021 CZ.NIC, z.s.p.o. <knot-dns@labs.nic.cz>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#include <assert.h>
+#include <time.h>
+
+#include "knot/conf/conf.h"
+#include "knot/zone/zone.h"
+
+int event_flush(conf_t *conf, zone_t *zone)
+{
+ assert(conf);
+ assert(zone);
+
+ if (zone_contents_is_empty(zone->contents)) {
+ return KNOT_EOK;
+ }
+
+ return zone_flush_journal(conf, zone, true);
+}
diff --git a/src/knot/events/handlers/freeze_thaw.c b/src/knot/events/handlers/freeze_thaw.c
new file mode 100644
index 0000000..dfa867f
--- /dev/null
+++ b/src/knot/events/handlers/freeze_thaw.c
@@ -0,0 +1,46 @@
+/* Copyright (C) 2021 CZ.NIC, z.s.p.o. <knot-dns@labs.nic.cz>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#include "knot/common/log.h"
+#include "knot/conf/conf.h"
+#include "knot/events/events.h"
+#include "knot/zone/zone.h"
+
+int event_ufreeze(conf_t *conf, zone_t *zone)
+{
+ assert(zone);
+
+ pthread_mutex_lock(&zone->events.mx);
+ zone->events.ufrozen = true;
+ pthread_mutex_unlock(&zone->events.mx);
+
+ log_zone_info(zone->name, "zone updates frozen");
+
+ return KNOT_EOK;
+}
+
+int event_uthaw(conf_t *conf, zone_t *zone)
+{
+ assert(zone);
+
+ pthread_mutex_lock(&zone->events.mx);
+ zone->events.ufrozen = false;
+ pthread_mutex_unlock(&zone->events.mx);
+
+ log_zone_info(zone->name, "zone updates unfrozen");
+
+ return KNOT_EOK;
+}
diff --git a/src/knot/events/handlers/load.c b/src/knot/events/handlers/load.c
new file mode 100644
index 0000000..13e3298
--- /dev/null
+++ b/src/knot/events/handlers/load.c
@@ -0,0 +1,406 @@
+/* Copyright (C) 2023 CZ.NIC, z.s.p.o. <knot-dns@labs.nic.cz>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#include <assert.h>
+
+#include "knot/catalog/generate.h"
+#include "knot/common/log.h"
+#include "knot/conf/conf.h"
+#include "knot/dnssec/key-events.h"
+#include "knot/dnssec/zone-events.h"
+#include "knot/events/handlers.h"
+#include "knot/events/replan.h"
+#include "knot/zone/digest.h"
+#include "knot/zone/serial.h"
+#include "knot/zone/zone-diff.h"
+#include "knot/zone/zone-load.h"
+#include "knot/zone/zone.h"
+#include "knot/zone/zonefile.h"
+#include "knot/updates/acl.h"
+
+static bool dontcare_load_error(conf_t *conf, const zone_t *zone)
+{
+ return (zone->contents == NULL && zone_load_can_bootstrap(conf, zone->name));
+}
+
+static bool allowed_xfr(conf_t *conf, const zone_t *zone)
+{
+ conf_val_t acl = conf_zone_get(conf, C_ACL, zone->name);
+ while (acl.code == KNOT_EOK) {
+ conf_val_t action = conf_id_get(conf, C_ACL, C_ACTION, &acl);
+ while (action.code == KNOT_EOK) {
+ if (conf_opt(&action) == ACL_ACTION_TRANSFER) {
+ return true;
+ }
+ conf_val_next(&action);
+ }
+ conf_val_next(&acl);
+ }
+
+ return false;
+}
+
+int event_load(conf_t *conf, zone_t *zone)
+{
+ zone_update_t up = { 0 };
+ zone_contents_t *journal_conts = NULL, *zf_conts = NULL;
+ bool old_contents_exist = (zone->contents != NULL), zone_in_journal_exists = false;
+
+ conf_val_t val = conf_zone_get(conf, C_JOURNAL_CONTENT, zone->name);
+ unsigned load_from = conf_opt(&val);
+
+ val = conf_zone_get(conf, C_ZONEFILE_LOAD, zone->name);
+ unsigned zf_from = conf_opt(&val);
+
+ int ret = KNOT_EOK;
+
+ // If configured, load journal contents.
+ if (!old_contents_exist &&
+ ((load_from == JOURNAL_CONTENT_ALL && zf_from != ZONEFILE_LOAD_WHOLE) ||
+ zone->cat_members != NULL)) {
+ ret = zone_load_from_journal(conf, zone, &journal_conts);
+ switch (ret) {
+ case KNOT_EOK:
+ zone_in_journal_exists = true;
+ break;
+ case KNOT_ENOENT:
+ zone_in_journal_exists = false;
+ break;
+ default:
+ goto cleanup;
+ }
+ } else {
+ zone_in_journal_exists = zone_journal_has_zij(zone);
+ }
+
+ // If configured, attempt to load zonefile.
+ if (zf_from != ZONEFILE_LOAD_NONE && zone->cat_members == NULL) {
+ struct timespec mtime;
+ char *filename = conf_zonefile(conf, zone->name);
+ ret = zonefile_exists(filename, &mtime);
+ if (ret == KNOT_EOK) {
+ conf_val_t semchecks = conf_zone_get(conf, C_SEM_CHECKS, zone->name);
+ semcheck_optional_t mode = conf_opt(&semchecks);
+ if (mode == SEMCHECK_DNSSEC_AUTO) {
+ conf_val_t validation = conf_zone_get(conf, C_DNSSEC_VALIDATION, zone->name);
+ if (conf_bool(&validation)) {
+ /* Disable duplicate DNSSEC checks, which are the
+ same as DNSSEC validation in zone update commit. */
+ mode = SEMCHECK_DNSSEC_OFF;
+ }
+ }
+
+ ret = zone_load_contents(conf, zone->name, &zf_conts, mode, false);
+ }
+ if (ret != KNOT_EOK) {
+ assert(!zf_conts);
+ if (dontcare_load_error(conf, zone)) {
+ log_zone_info(zone->name, "failed to parse zone file '%s' (%s)",
+ filename, knot_strerror(ret));
+ } else {
+ log_zone_error(zone->name, "failed to parse zone file '%s' (%s)",
+ filename, knot_strerror(ret));
+ }
+ free(filename);
+ goto load_end;
+ }
+ free(filename);
+
+ // Save zonefile information.
+ zone->zonefile.serial = zone_contents_serial(zf_conts);
+ zone->zonefile.exists = (zf_conts != NULL);
+ zone->zonefile.mtime = mtime;
+
+ // If configured and possible, fix the SOA serial of zonefile.
+ zone_contents_t *relevant = (zone->contents != NULL ? zone->contents : journal_conts);
+ if (zf_conts != NULL && zf_from == ZONEFILE_LOAD_DIFSE && relevant != NULL) {
+ uint32_t serial = zone_contents_serial(relevant);
+ conf_val_t policy = conf_zone_get(conf, C_SERIAL_POLICY, zone->name);
+ uint32_t set = serial_next(serial, conf_opt(&policy), 1);
+ zone_contents_set_soa_serial(zf_conts, set);
+ log_zone_info(zone->name, "zone file parsed, serial updated %u -> %u",
+ zone->zonefile.serial, set);
+ zone->zonefile.serial = set;
+ } else {
+ log_zone_info(zone->name, "zone file parsed, serial %u",
+ zone->zonefile.serial);
+ }
+
+ // If configured and appliable to zonefile, load journal changes.
+ if (load_from != JOURNAL_CONTENT_NONE) {
+ ret = zone_load_journal(conf, zone, zf_conts);
+ if (ret != KNOT_EOK) {
+ zone_contents_deep_free(zf_conts);
+ zf_conts = NULL;
+ log_zone_warning(zone->name, "failed to load journal (%s)",
+ knot_strerror(ret));
+ }
+ }
+ }
+ if (zone->cat_members != NULL && !old_contents_exist) {
+ uint32_t serial = journal_conts == NULL ? 1 : zone_contents_serial(journal_conts);
+ serial = serial_next(serial, SERIAL_POLICY_UNIXTIME, 1); // unixtime hardcoded
+ zf_conts = catalog_update_to_zone(zone->cat_members, zone->name, serial);
+ if (zf_conts == NULL) {
+ ret = zone->cat_members->error == KNOT_EOK ? KNOT_ENOMEM : zone->cat_members->error;
+ goto cleanup;
+ }
+ }
+
+ // If configured contents=all, but not present, store zonefile.
+ if ((load_from == JOURNAL_CONTENT_ALL || zone->cat_members != NULL) &&
+ !zone_in_journal_exists && (zf_conts != NULL || old_contents_exist)) {
+ zone_contents_t *store_c = old_contents_exist ? zone->contents : zf_conts;
+ ret = zone_in_journal_store(conf, zone, store_c);
+ if (ret != KNOT_EOK) {
+ log_zone_warning(zone->name, "failed to write zone-in-journal (%s)",
+ knot_strerror(ret));
+ } else {
+ zone_in_journal_exists = true;
+ }
+ }
+
+ val = conf_zone_get(conf, C_DNSSEC_SIGNING, zone->name);
+ bool dnssec_enable = (conf_bool(&val) && zone->cat_members == NULL), zu_from_zf_conts = false;
+ bool do_diff = (zf_from == ZONEFILE_LOAD_DIFF || zf_from == ZONEFILE_LOAD_DIFSE || zone->cat_members != NULL);
+ bool ignore_dnssec = (do_diff && dnssec_enable);
+
+ val = conf_zone_get(conf, C_ZONEMD_GENERATE, zone->name);
+ unsigned digest_alg = conf_opt(&val);
+ bool update_zonemd = (digest_alg != ZONE_DIGEST_NONE);
+
+ // Create zone_update structure according to current state.
+ if (old_contents_exist) {
+ if (zone->cat_members != NULL) {
+ ret = zone_update_init(&up, zone, UPDATE_INCREMENTAL);
+ if (ret == KNOT_EOK) {
+ ret = catalog_update_to_update(zone->cat_members, &up);
+ }
+ if (ret == KNOT_EOK) {
+ ret = zone_update_increment_soa(&up, conf);
+ }
+ } else if (zf_conts == NULL) {
+ // nothing to be re-loaded
+ ret = KNOT_EOK;
+ goto cleanup;
+ } else if (zf_from == ZONEFILE_LOAD_WHOLE) {
+ // throw old zone contents and load new from ZF
+ ret = zone_update_from_contents(&up, zone, zf_conts,
+ (load_from == JOURNAL_CONTENT_NONE ?
+ UPDATE_FULL : UPDATE_HYBRID));
+ zu_from_zf_conts = true;
+ } else {
+ // compute ZF diff and if success, apply it
+ ret = zone_update_from_differences(&up, zone, NULL, zf_conts, UPDATE_INCREMENTAL,
+ ignore_dnssec, update_zonemd);
+ }
+ } else {
+ if (journal_conts != NULL && (zf_from != ZONEFILE_LOAD_WHOLE || zone->cat_members != NULL)) {
+ if (zf_conts == NULL) {
+ // load zone-in-journal
+ ret = zone_update_from_contents(&up, zone, journal_conts, UPDATE_HYBRID);
+ } else {
+ // load zone-in-journal, compute ZF diff and if success, apply it
+ ret = zone_update_from_differences(&up, zone, journal_conts, zf_conts,
+ UPDATE_HYBRID, ignore_dnssec, update_zonemd);
+ if (ret == KNOT_ESEMCHECK || ret == KNOT_ERANGE) {
+ log_zone_warning(zone->name,
+ "zone file changed with SOA serial %s, "
+ "ignoring zone file and loading from journal",
+ (ret == KNOT_ESEMCHECK ? "unupdated" : "decreased"));
+ zone_contents_deep_free(zf_conts);
+ zf_conts = NULL;
+ ret = zone_update_from_contents(&up, zone, journal_conts, UPDATE_HYBRID);
+ }
+ }
+ } else {
+ if (zf_conts == NULL) {
+ // nothing to be loaded
+ ret = KNOT_ENOENT;
+ } else {
+ // load from ZF
+ ret = zone_update_from_contents(&up, zone, zf_conts,
+ (load_from == JOURNAL_CONTENT_NONE ?
+ UPDATE_FULL : UPDATE_HYBRID));
+ if (zf_from == ZONEFILE_LOAD_WHOLE) {
+ zu_from_zf_conts = true;
+ }
+ }
+ }
+ }
+
+load_end:
+ if (ret != KNOT_EOK) {
+ switch (ret) {
+ case KNOT_ENOENT:
+ if (zone_load_can_bootstrap(conf, zone->name)) {
+ log_zone_info(zone->name, "zone will be bootstrapped");
+ } else {
+ log_zone_info(zone->name, "zone not found");
+ }
+ break;
+ case KNOT_ESEMCHECK:
+ log_zone_warning(zone->name, "zone file changed without SOA serial update");
+ break;
+ case KNOT_ERANGE:
+ if (serial_compare(zone->zonefile.serial, zone_contents_serial(zone->contents)) == SERIAL_INCOMPARABLE) {
+ log_zone_warning(zone->name, "zone file changed with incomparable SOA serial");
+ } else {
+ log_zone_warning(zone->name, "zone file changed with decreased SOA serial");
+ }
+ break;
+ }
+ goto cleanup;
+ }
+
+ bool zf_serial_updated = (zf_conts != NULL && zone_contents_serial(zf_conts) != zone_contents_serial(zone->contents));
+
+ // The contents are already part of zone_update.
+ zf_conts = NULL;
+ journal_conts = NULL;
+
+ ret = zone_update_verify_digest(conf, &up);
+ if (ret != KNOT_EOK) {
+ goto cleanup;
+ }
+
+ uint32_t middle_serial = zone_contents_serial(up.new_cont);
+
+ if (do_diff && old_contents_exist && dnssec_enable && zf_serial_updated &&
+ !zone_in_journal_exists) {
+ ret = zone_update_start_extra(&up, conf);
+ if (ret != KNOT_EOK) {
+ goto cleanup;
+ }
+ }
+
+ // Sign zone using DNSSEC if configured.
+ zone_sign_reschedule_t dnssec_refresh = { 0 };
+ if (dnssec_enable) {
+ ret = knot_dnssec_zone_sign(&up, conf, 0, KEY_ROLL_ALLOW_ALL, 0, &dnssec_refresh);
+ if (ret != KNOT_EOK) {
+ goto cleanup;
+ }
+ if (zu_from_zf_conts && (up.flags & UPDATE_HYBRID) && allowed_xfr(conf, zone)) {
+ log_zone_warning(zone->name,
+ "with automatic DNSSEC signing and outgoing transfers enabled, "
+ "'zonefile-load: difference' should be set to avoid malformed "
+ "IXFR after manual zone file update");
+ }
+ } else if (update_zonemd) {
+ /* Don't update ZONEMD if no change and ZONEMD is up-to-date.
+ * If ZONEFILE_LOAD_DIFSE, the change is non-empty and ZONEMD
+ * is directly updated without its verification. */
+ if (!zone_update_no_change(&up) || !zone_contents_digest_exists(up.new_cont, digest_alg, false)) {
+ if (zone_update_to(&up) == NULL || middle_serial == zone->zonefile.serial) {
+ ret = zone_update_increment_soa(&up, conf);
+ }
+ if (ret == KNOT_EOK) {
+ ret = zone_update_add_digest(&up, digest_alg, false);
+ }
+ if (ret != KNOT_EOK) {
+ goto cleanup;
+ }
+ }
+ }
+
+ // If the change is only automatically incremented SOA serial, make it no change.
+ if ((zf_from == ZONEFILE_LOAD_DIFSE || zone->cat_members != NULL) &&
+ (up.flags & (UPDATE_INCREMENTAL | UPDATE_HYBRID)) &&
+ changeset_differs_just_serial(&up.change, update_zonemd)) {
+ changeset_t *cpy = changeset_clone(&up.change);
+ if (cpy == NULL) {
+ ret = KNOT_ENOMEM;
+ goto cleanup;
+ }
+ ret = zone_update_apply_changeset_reverse(&up, cpy);
+ if (ret != KNOT_EOK) {
+ changeset_free(cpy);
+ goto cleanup;
+ }
+
+ // If the original ZONEMD is outdated, use the reverted changeset again.
+ if (update_zonemd && !zone_contents_digest_exists(up.new_cont, digest_alg, false)) {
+ ret = zone_update_apply_changeset(&up, cpy);
+ changeset_free(cpy);
+ if (ret != KNOT_EOK) {
+ goto cleanup;
+ }
+ } else {
+ changeset_free(cpy);
+ // Revert automatic zone serial increment.
+ zone->zonefile.serial = zone_contents_serial(up.new_cont);
+ /* Reset possibly set the resigned flag. Note that dnssec
+ * reschedule isn't reverted, but shouldn't be a problem
+ * for non-empty zones as SOA, ZONEMD, and their RRSIGs
+ * are always updated with other changes in the zone. */
+ zone->zonefile.resigned = false;
+ }
+ }
+
+ uint32_t old_serial = 0, new_serial = zone_contents_serial(up.new_cont);
+ char old_serial_str[11] = "none", new_serial_str[15] = "";
+ if (old_contents_exist) {
+ old_serial = zone_contents_serial(zone->contents);
+ (void)snprintf(old_serial_str, sizeof(old_serial_str), "%u", old_serial);
+ }
+ if (new_serial != middle_serial) {
+ (void)snprintf(new_serial_str, sizeof(new_serial_str), " -> %u", new_serial);
+ }
+
+ // Commit zone_update back to zone (including journal update, rcu,...).
+ ret = zone_update_commit(conf, &up);
+ if (ret != KNOT_EOK) {
+ goto cleanup;
+ }
+
+ char expires_in[32] = "";
+ if (zone->timers.next_expire > 0) {
+ (void)snprintf(expires_in, sizeof(expires_in),
+ ", expires in %u seconds",
+ (uint32_t)MAX(zone->timers.next_expire - time(NULL), 0));
+ }
+
+ log_zone_info(zone->name, "loaded, serial %s -> %u%s, %zu bytes%s",
+ old_serial_str, middle_serial, new_serial_str, zone->contents->size, expires_in);
+
+ if (zone->cat_members != NULL) {
+ catalog_update_clear(zone->cat_members);
+ }
+
+ // Schedule dependent events.
+ if (dnssec_enable) {
+ event_dnssec_reschedule(conf, zone, &dnssec_refresh, false); // false since we handle NOTIFY below
+ }
+
+ replan_from_timers(conf, zone);
+
+ if (!zone_timers_serial_notified(&zone->timers, new_serial)) {
+ zone_schedule_notify(zone, 0);
+ }
+
+ return KNOT_EOK;
+
+cleanup:
+ // Try to bootstrap the zone if local error.
+ replan_from_timers(conf, zone);
+
+ zone_update_clear(&up);
+ zone_contents_deep_free(zf_conts);
+ zone_contents_deep_free(journal_conts);
+
+ return (dontcare_load_error(conf, zone) ? KNOT_EOK : ret);
+}
diff --git a/src/knot/events/handlers/notify.c b/src/knot/events/handlers/notify.c
new file mode 100644
index 0000000..dc3965d
--- /dev/null
+++ b/src/knot/events/handlers/notify.c
@@ -0,0 +1,212 @@
+/* Copyright (C) 2022 CZ.NIC, z.s.p.o. <knot-dns@labs.nic.cz>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#include <assert.h>
+
+#include "contrib/openbsd/siphash.h"
+#include "knot/common/log.h"
+#include "knot/conf/conf.h"
+#include "knot/query/query.h"
+#include "knot/query/requestor.h"
+#include "knot/zone/zone.h"
+#include "libknot/errcode.h"
+
+static notifailed_rmt_hash notifailed_hash(conf_val_t *rmt_id)
+{
+ SIPHASH_KEY zero_key = { 0, 0 };
+ SIPHASH_CTX ctx;
+ SipHash24_Init(&ctx, &zero_key);
+ SipHash24_Update(&ctx, rmt_id->data, rmt_id->len);
+ return SipHash24_End(&ctx);
+}
+
+/*!
+ * \brief NOTIFY message processing data.
+ */
+struct notify_data {
+ const knot_dname_t *zone;
+ const knot_rrset_t *soa;
+ const struct sockaddr *remote;
+ query_edns_data_t edns;
+};
+
+static int notify_begin(knot_layer_t *layer, void *params)
+{
+ layer->data = params;
+
+ return KNOT_STATE_PRODUCE;
+}
+
+static int notify_produce(knot_layer_t *layer, knot_pkt_t *pkt)
+{
+ struct notify_data *data = layer->data;
+
+ // mandatory: NOTIFY opcode, AA flag, SOA qtype
+ query_init_pkt(pkt);
+ knot_wire_set_opcode(pkt->wire, KNOT_OPCODE_NOTIFY);
+ knot_wire_set_aa(pkt->wire);
+ knot_pkt_put_question(pkt, data->zone, KNOT_CLASS_IN, KNOT_RRTYPE_SOA);
+
+ // unsecure hint: new SOA
+ if (data->soa) {
+ knot_pkt_begin(pkt, KNOT_ANSWER);
+ knot_pkt_put(pkt, KNOT_COMPR_HINT_QNAME, data->soa, 0);
+ }
+
+ query_put_edns(pkt, &data->edns);
+
+ return KNOT_STATE_CONSUME;
+}
+
+static int notify_consume(knot_layer_t *layer, knot_pkt_t *pkt)
+{
+ return KNOT_STATE_DONE;
+}
+
+static const knot_layer_api_t NOTIFY_API = {
+ .begin = notify_begin,
+ .produce = notify_produce,
+ .consume = notify_consume,
+};
+
+#define NOTIFY_OUT_LOG(priority, zone, remote, reused, fmt, ...) \
+ ns_log(priority, zone, LOG_OPERATION_NOTIFY, LOG_DIRECTION_OUT, remote, \
+ (reused), fmt, ## __VA_ARGS__)
+
+static int send_notify(conf_t *conf, zone_t *zone, const knot_rrset_t *soa,
+ const conf_remote_t *slave, int timeout, bool retry)
+{
+ struct notify_data data = {
+ .zone = zone->name,
+ .soa = soa,
+ .remote = (struct sockaddr *)&slave->addr,
+ .edns = query_edns_data_init(conf, slave->addr.ss_family, 0)
+ };
+
+ knot_requestor_t requestor;
+ knot_requestor_init(&requestor, &NOTIFY_API, &data, NULL);
+
+ knot_pkt_t *pkt = knot_pkt_new(NULL, KNOT_WIRE_MAX_PKTSIZE, NULL);
+ if (!pkt) {
+ knot_requestor_clear(&requestor);
+ return KNOT_ENOMEM;
+ }
+
+ const struct sockaddr_storage *dst = &slave->addr;
+ const struct sockaddr_storage *src = &slave->via;
+ knot_request_flag_t flags = conf->cache.srv_tcp_fastopen ? KNOT_REQUEST_TFO : 0;
+ knot_request_t *req = knot_request_make(NULL, dst, src, pkt, &slave->key, flags);
+ if (!req) {
+ knot_request_free(req, NULL);
+ knot_requestor_clear(&requestor);
+ return KNOT_ENOMEM;
+ }
+
+ int ret = knot_requestor_exec(&requestor, req, timeout);
+
+ const char *log_retry = retry ? "retry, " : "";
+
+ if (ret == KNOT_EOK && knot_pkt_ext_rcode(req->resp) == 0) {
+ NOTIFY_OUT_LOG(LOG_INFO, zone->name, dst,
+ requestor.layer.flags & KNOT_REQUESTOR_REUSED,
+ "%sserial %u", log_retry, knot_soa_serial(soa->rrs.rdata));
+ zone->timers.last_notified_serial = (knot_soa_serial(soa->rrs.rdata) | LAST_NOTIFIED_SERIAL_VALID);
+ } else if (knot_pkt_ext_rcode(req->resp) == 0) {
+ NOTIFY_OUT_LOG(LOG_WARNING, zone->name, dst,
+ requestor.layer.flags & KNOT_REQUESTOR_REUSED,
+ "%sfailed (%s)", log_retry, knot_strerror(ret));
+ } else {
+ NOTIFY_OUT_LOG(LOG_WARNING, zone->name, dst,
+ requestor.layer.flags & KNOT_REQUESTOR_REUSED,
+ "%sserver responded with error '%s'",
+ log_retry, knot_pkt_ext_rcode_name(req->resp));
+ }
+
+ knot_request_free(req, NULL);
+ knot_requestor_clear(&requestor);
+
+ return ret;
+}
+
+int event_notify(conf_t *conf, zone_t *zone)
+{
+ assert(zone);
+
+ bool failed = false;
+
+ if (zone_contents_is_empty(zone->contents)) {
+ return KNOT_EOK;
+ }
+
+ // NOTIFY content
+ int timeout = conf->cache.srv_tcp_remote_io_timeout;
+ knot_rrset_t soa = node_rrset(zone->contents->apex, KNOT_RRTYPE_SOA);
+
+ // in case of re-try, NOTIFY only failed remotes
+ pthread_mutex_lock(&zone->preferred_lock);
+ bool retry = (zone->notifailed.size > 0);
+
+ // send NOTIFY to each remote, use working address
+ conf_val_t notify = conf_zone_get(conf, C_NOTIFY, zone->name);
+ conf_mix_iter_t iter;
+ conf_mix_iter_init(conf, &notify, &iter);
+ while (iter.id->code == KNOT_EOK) {
+ notifailed_rmt_hash rmt_hash = notifailed_hash(iter.id);
+ if (retry && notifailed_rmt_dynarray_bsearch(&zone->notifailed, &rmt_hash) == NULL) {
+ conf_mix_iter_next(&iter);
+ continue;
+ }
+ pthread_mutex_unlock(&zone->preferred_lock);
+
+ conf_val_t addr = conf_id_get(conf, C_RMT, C_ADDR, iter.id);
+ size_t addr_count = conf_val_count(&addr);
+
+ int ret = KNOT_EOK;
+
+ for (int i = 0; i < addr_count; i++) {
+ conf_remote_t slave = conf_remote(conf, iter.id, i);
+ ret = send_notify(conf, zone, &soa, &slave, timeout, retry);
+ if (ret == KNOT_EOK) {
+ break;
+ }
+ }
+
+ pthread_mutex_lock(&zone->preferred_lock);
+ if (ret != KNOT_EOK) {
+ failed = true;
+ notifailed_rmt_dynarray_add(&zone->notifailed, &rmt_hash);
+ } else {
+ notifailed_rmt_dynarray_remove(&zone->notifailed, &rmt_hash);
+ }
+
+ conf_mix_iter_next(&iter);
+ }
+
+ if (failed) {
+ notifailed_rmt_dynarray_sort_dedup(&zone->notifailed);
+
+ uint32_t retry_in = knot_soa_retry(soa.rrs.rdata);
+ conf_val_t val = conf_zone_get(conf, C_RETRY_MIN_INTERVAL, zone->name);
+ retry_in = MAX(retry_in, conf_int(&val));
+ val = conf_zone_get(conf, C_RETRY_MAX_INTERVAL, zone->name);
+ retry_in = MIN(retry_in, conf_int(&val));
+
+ zone_events_schedule_at(zone, ZONE_EVENT_NOTIFY, time(NULL) + retry_in);
+ }
+ pthread_mutex_unlock(&zone->preferred_lock);
+
+ return failed ? KNOT_ERROR : KNOT_EOK;
+}
diff --git a/src/knot/events/handlers/refresh.c b/src/knot/events/handlers/refresh.c
new file mode 100644
index 0000000..9125aac
--- /dev/null
+++ b/src/knot/events/handlers/refresh.c
@@ -0,0 +1,1391 @@
+/* Copyright (C) 2022 CZ.NIC, z.s.p.o. <knot-dns@labs.nic.cz>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#include <assert.h>
+#include <stdint.h>
+
+#include "contrib/mempattern.h"
+#include "libdnssec/random.h"
+#include "knot/common/log.h"
+#include "knot/conf/conf.h"
+#include "knot/dnssec/zone-events.h"
+#include "knot/events/handlers.h"
+#include "knot/events/replan.h"
+#include "knot/nameserver/ixfr.h"
+#include "knot/query/layer.h"
+#include "knot/query/query.h"
+#include "knot/query/requestor.h"
+#include "knot/updates/changesets.h"
+#include "knot/zone/adjust.h"
+#include "knot/zone/digest.h"
+#include "knot/zone/serial.h"
+#include "knot/zone/zone.h"
+#include "knot/zone/zonefile.h"
+#include "libknot/errcode.h"
+
+/*!
+ * \brief Refresh event processing.
+ *
+ * The following diagram represents refresh event processing.
+ *
+ * \verbatim
+ * O
+ * |
+ * +-----v-----+
+ * | BEGIN |
+ * +---+---+---+
+ * has SOA | | no SOA
+ * +-------------------+ +------------------------------+
+ * | |
+ * +------v------+ outdated +--------------+ error +-------v------+
+ * | SOA query +------------> IXFR query +-----------> AXFR query |
+ * +-----+---+---+ +------+-------+ +----+----+----+
+ * error | | current | success success | | error
+ * | +-----+ +---------------+ | |
+ * | | | +--------------------------------------+ |
+ * | | | | +----------+ +--------------+
+ * | | | | | | |
+ * | +--v-v-v--+ | +--v--v--+
+ * | | DONE | | | FAIL |
+ * | +---------+ | +--------+
+ * +----------------------------+
+ *
+ * \endverbatim
+ */
+
+#define REFRESH_LOG(priority, data, direction, msg...) \
+ ns_log(priority, (data)->zone->name, LOG_OPERATION_REFRESH, direction, \
+ (data)->remote, (data)->layer->flags & KNOT_REQUESTOR_REUSED, msg)
+
+#define AXFRIN_LOG(priority, data, msg...) \
+ ns_log(priority, (data)->zone->name, LOG_OPERATION_AXFR, LOG_DIRECTION_IN, \
+ (data)->remote, (data)->layer->flags & KNOT_REQUESTOR_REUSED, msg)
+
+#define IXFRIN_LOG(priority, data, msg...) \
+ ns_log(priority, (data)->zone->name, LOG_OPERATION_IXFR, LOG_DIRECTION_IN, \
+ (data)->remote, (data)->layer->flags & KNOT_REQUESTOR_REUSED, msg)
+
+enum state {
+ REFRESH_STATE_INVALID = 0,
+ STATE_SOA_QUERY,
+ STATE_TRANSFER,
+};
+
+enum xfr_type {
+ XFR_TYPE_NOTIMP = -2,
+ XFR_TYPE_ERROR = -1,
+ XFR_TYPE_UNDETERMINED = 0,
+ XFR_TYPE_UPTODATE,
+ XFR_TYPE_AXFR,
+ XFR_TYPE_IXFR,
+};
+
+struct refresh_data {
+ knot_layer_t *layer; //!< Used for reading requestor flags.
+
+ // transfer configuration, initialize appropriately:
+
+ zone_t *zone; //!< Zone to eventually updated.
+ conf_t *conf; //!< Server configuration.
+ const struct sockaddr *remote; //!< Remote endpoint.
+ const knot_rrset_t *soa; //!< Local SOA (NULL for AXFR).
+ const size_t max_zone_size; //!< Maximal zone size.
+ bool use_edns; //!< Allow EDNS in SOA/AXFR/IXFR queries.
+ query_edns_data_t edns; //!< EDNS data to be used in queries.
+ zone_master_fallback_t *fallback; //!< Flags allowing zone_master_try() fallbacks.
+ bool fallback_axfr; //!< Flag allowing fallback to AXFR,
+ uint32_t expire_timer; //!< Result: expire timer from answer EDNS.
+
+ // internal state, initialize with zeroes:
+
+ int ret; //!< Error code.
+ enum state state; //!< Event processing state.
+ enum xfr_type xfr_type; //!< Transer type (mostly IXFR versus AXFR).
+ knot_rrset_t *initial_soa_copy; //!< Copy of the received initial SOA.
+ struct xfr_stats stats; //!< Transfer statistics.
+ struct timespec started; //!< When refresh started.
+ size_t change_size; //!< Size of added and removed RRs.
+
+ struct {
+ zone_contents_t *zone; //!< AXFR result, new zone.
+ } axfr;
+
+ struct {
+ struct ixfr_proc *proc; //!< IXFR processing context.
+ knot_rrset_t *final_soa; //!< SOA denoting end of transfer.
+ list_t changesets; //!< IXFR result, zone updates.
+ } ixfr;
+
+ bool updated; // TODO: Can we fid a better way to check if zone was updated?
+ knot_mm_t *mm; // TODO: This used to be used in IXFR. Remove or reuse.
+};
+
+static const uint32_t EXPIRE_TIMER_INVALID = ~0U;
+
+static bool serial_is_current(uint32_t local_serial, uint32_t remote_serial)
+{
+ return (serial_compare(local_serial, remote_serial) & SERIAL_MASK_GEQ);
+}
+
+static time_t bootstrap_next(uint8_t *count)
+{
+ // Let the increment gradually grow in a sensible way.
+ time_t increment = 5 * (*count) * (*count);
+
+ if (increment < 7200) { // two hours
+ (*count)++;
+ } else {
+ increment = 7200;
+ }
+
+ // Add a random delay to prevent burst refresh.
+ return increment + dnssec_random_uint16_t() % 30;
+}
+
+static void limit_timer(conf_t *conf, const knot_dname_t *zone, uint32_t *timer,
+ const char *tm_name, const yp_name_t *low, const yp_name_t *upp)
+{
+ uint32_t tlow = 0;
+ if (low > 0) {
+ conf_val_t val1 = conf_zone_get(conf, low, zone);
+ tlow = conf_int(&val1);
+ }
+ conf_val_t val2 = conf_zone_get(conf, upp, zone);
+ uint32_t tupp = conf_int(&val2);
+
+ const char *msg = "%s timer trimmed to '%s-%s-interval'";
+ if (*timer < tlow) {
+ *timer = tlow;
+ log_zone_debug(zone, msg, tm_name, tm_name, "min");
+ } else if (*timer > tupp) {
+ *timer = tupp;
+ log_zone_debug(zone, msg, tm_name, tm_name, "max");
+ }
+}
+
+/*!
+ * \brief Modify the expire timer wrt the received EDNS EXPIRE (RFC 7314, section 4)
+ *
+ * \param data The refresh data.
+ * \param pkt A received packet to parse.
+ * \param strictly_follow Strictly use EDNS EXPIRE as the expire timer value.
+ * (false == RFC 7314, section 4, second paragraph,
+ * true == third paragraph)
+ */
+static void consume_edns_expire(struct refresh_data *data, knot_pkt_t *pkt, bool strictly_follow)
+{
+ if (data->zone->is_catalog_flag) {
+ data->expire_timer = EXPIRE_TIMER_INVALID;
+ return;
+ }
+
+ uint8_t *expire_opt = knot_pkt_edns_option(pkt, KNOT_EDNS_OPTION_EXPIRE);
+ if (expire_opt != NULL && knot_edns_opt_get_length(expire_opt) == sizeof(uint32_t)) {
+ uint32_t edns_expire = knot_wire_read_u32(knot_edns_opt_get_data(expire_opt));
+ data->expire_timer = strictly_follow ? edns_expire :
+ MAX(edns_expire, data->zone->timers.next_expire - time(NULL));
+ }
+}
+
+static void finalize_timers(struct refresh_data *data)
+{
+ conf_t *conf = data->conf;
+ zone_t *zone = data->zone;
+
+ // EDNS EXPIRE -- RFC 7314, section 4, fourth paragraph.
+ data->expire_timer = MIN(data->expire_timer, zone_soa_expire(data->zone));
+ assert(data->expire_timer != EXPIRE_TIMER_INVALID);
+
+ time_t now = time(NULL);
+ const knot_rdataset_t *soa = zone_soa(zone);
+
+ uint32_t soa_refresh = knot_soa_refresh(soa->rdata);
+ limit_timer(conf, zone->name, &soa_refresh, "refresh",
+ C_REFRESH_MIN_INTERVAL, C_REFRESH_MAX_INTERVAL);
+ zone->timers.next_refresh = now + soa_refresh;
+ zone->timers.last_refresh_ok = true;
+
+ if (zone->is_catalog_flag) {
+ // It's already zero in most cases.
+ zone->timers.next_expire = 0;
+ } else {
+ limit_timer(conf, zone->name, &data->expire_timer, "expire",
+ // Limit min if not received as EDNS Expire.
+ data->expire_timer == knot_soa_expire(soa->rdata) ?
+ C_EXPIRE_MIN_INTERVAL : 0,
+ C_EXPIRE_MAX_INTERVAL);
+ zone->timers.next_expire = now + data->expire_timer;
+ }
+}
+
+static void fill_expires_in(char *expires_in, size_t size, const struct refresh_data *data)
+{
+ assert(!data->zone->is_catalog_flag || data->zone->timers.next_expire == 0);
+ if (data->zone->timers.next_expire > 0) {
+ (void)snprintf(expires_in, size,
+ ", expires in %u seconds", data->expire_timer);
+ }
+}
+
+static void xfr_log_publish(const struct refresh_data *data,
+ const uint32_t old_serial,
+ const uint32_t new_serial,
+ const uint32_t master_serial,
+ bool has_master_serial,
+ bool axfr_bootstrap)
+{
+ struct timespec finished = time_now();
+ double duration = time_diff_ms(&data->started, &finished) / 1000.0;
+
+ char old_info[32] = "none";
+ if (!axfr_bootstrap) {
+ (void)snprintf(old_info, sizeof(old_info), "%u", old_serial);
+ }
+
+ char master_info[32] = "";
+ if (has_master_serial) {
+ (void)snprintf(master_info, sizeof(master_info),
+ ", remote serial %u", master_serial);
+ }
+
+ char expires_in[32] = "";
+ fill_expires_in(expires_in, sizeof(expires_in), data);
+
+ REFRESH_LOG(LOG_INFO, data, LOG_DIRECTION_NONE,
+ "zone updated, %0.2f seconds, serial %s -> %u%s%s",
+ duration, old_info, new_serial, master_info, expires_in);
+}
+
+static void xfr_log_read_ms(const knot_dname_t *zone, int ret)
+{
+ log_zone_error(zone, "failed reading master serial from KASP DB (%s)", knot_strerror(ret));
+}
+
+static int axfr_init(struct refresh_data *data)
+{
+ zone_contents_t *new_zone = zone_contents_new(data->zone->name, true);
+ if (new_zone == NULL) {
+ return KNOT_ENOMEM;
+ }
+
+ data->axfr.zone = new_zone;
+ return KNOT_EOK;
+}
+
+static void axfr_cleanup(struct refresh_data *data)
+{
+ zone_contents_deep_free(data->axfr.zone);
+ data->axfr.zone = NULL;
+}
+
+static void axfr_slave_sign_serial(zone_contents_t *new_contents, zone_t *zone,
+ conf_t *conf, uint32_t *master_serial)
+{
+ // Update slave's serial to ensure it's growing and consistent with
+ // its serial policy.
+ conf_val_t val = conf_zone_get(conf, C_SERIAL_POLICY, zone->name);
+ unsigned serial_policy = conf_opt(&val);
+
+ *master_serial = zone_contents_serial(new_contents);
+
+ uint32_t new_serial, lastsigned_serial;
+ if (zone->contents != NULL) {
+ // Retransfer or AXFR-fallback - increment current serial.
+ new_serial = serial_next(zone_contents_serial(zone->contents), serial_policy, 1);
+ } else if (zone_get_lastsigned_serial(zone, &lastsigned_serial) == KNOT_EOK) {
+ // Bootstrap - increment stored serial.
+ new_serial = serial_next(lastsigned_serial, serial_policy, 1);
+ } else {
+ // Bootstrap - try to reuse master serial, considering policy.
+ new_serial = serial_next(*master_serial, serial_policy, 0);
+ }
+ zone_contents_set_soa_serial(new_contents, new_serial);
+}
+
+static int axfr_finalize(struct refresh_data *data)
+{
+ zone_contents_t *new_zone = data->axfr.zone;
+
+ conf_val_t val = conf_zone_get(data->conf, C_DNSSEC_SIGNING, data->zone->name);
+ bool dnssec_enable = conf_bool(&val);
+ uint32_t old_serial = zone_contents_serial(data->zone->contents), master_serial = 0;
+ bool bootstrap = (data->zone->contents == NULL);
+
+ if (dnssec_enable) {
+ axfr_slave_sign_serial(new_zone, data->zone, data->conf, &master_serial);
+ }
+
+ zone_update_t up = { 0 };
+ int ret = zone_update_from_contents(&up, data->zone, new_zone, UPDATE_FULL);
+ if (ret != KNOT_EOK) {
+ data->fallback->remote = false;
+ return ret;
+ }
+ // Seized by zone_update. Don't free the contents again in axfr_cleanup.
+ data->axfr.zone = NULL;
+
+ ret = zone_update_semcheck(data->conf, &up);
+ if (ret == KNOT_EOK) {
+ ret = zone_update_verify_digest(data->conf, &up);
+ }
+ if (ret != KNOT_EOK) {
+ zone_update_clear(&up);
+ return ret;
+ }
+
+ val = conf_zone_get(data->conf, C_ZONEMD_GENERATE, data->zone->name);
+ unsigned digest_alg = conf_opt(&val);
+
+ if (dnssec_enable) {
+ zone_sign_reschedule_t resch = { 0 };
+ ret = knot_dnssec_zone_sign(&up, data->conf, ZONE_SIGN_KEEP_SERIAL, KEY_ROLL_ALLOW_ALL, 0, &resch);
+ event_dnssec_reschedule(data->conf, data->zone, &resch, true);
+ } else if (digest_alg != ZONE_DIGEST_NONE) {
+ assert(zone_update_to(&up) != NULL);
+ ret = zone_update_add_digest(&up, digest_alg, false);
+ }
+ if (ret != KNOT_EOK) {
+ zone_update_clear(&up);
+ data->fallback->remote = false;
+ return ret;
+ }
+
+ ret = zone_update_commit(data->conf, &up);
+ if (ret != KNOT_EOK) {
+ zone_update_clear(&up);
+ AXFRIN_LOG(LOG_WARNING, data,
+ "failed to store changes (%s)", knot_strerror(ret));
+ data->fallback->remote = false;
+ return ret;
+ }
+
+ if (dnssec_enable) {
+ ret = zone_set_master_serial(data->zone, master_serial);
+ if (ret != KNOT_EOK) {
+ log_zone_warning(data->zone->name,
+ "unable to save master serial, future transfers might be broken");
+ }
+ }
+
+ finalize_timers(data);
+ xfr_log_publish(data, old_serial, zone_contents_serial(new_zone),
+ master_serial, dnssec_enable, bootstrap);
+
+ return KNOT_EOK;
+}
+
+static int axfr_consume_rr(const knot_rrset_t *rr, struct refresh_data *data)
+{
+ assert(rr);
+ assert(data);
+ assert(data->axfr.zone);
+
+ // zc is stateless structure which can be initialized for each rr
+ // the changes are stored only in data->axfr.zone (aka zc.z)
+ zcreator_t zc = {
+ .z = data->axfr.zone,
+ .master = false,
+ .ret = KNOT_EOK
+ };
+
+ if (rr->type == KNOT_RRTYPE_SOA &&
+ node_rrtype_exists(zc.z->apex, KNOT_RRTYPE_SOA)) {
+ return KNOT_STATE_DONE;
+ }
+
+ data->ret = zcreator_step(&zc, rr);
+ if (data->ret != KNOT_EOK) {
+ return KNOT_STATE_FAIL;
+ }
+
+ data->change_size += knot_rrset_size(rr);
+ if (data->change_size > data->max_zone_size) {
+ AXFRIN_LOG(LOG_WARNING, data,
+ "zone size exceeded");
+ data->ret = KNOT_EZONESIZE;
+ return KNOT_STATE_FAIL;
+ }
+
+ return KNOT_STATE_CONSUME;
+}
+
+static int axfr_consume_packet(knot_pkt_t *pkt, struct refresh_data *data)
+{
+ assert(pkt);
+ assert(data);
+
+ const knot_pktsection_t *answer = knot_pkt_section(pkt, KNOT_ANSWER);
+ int ret = KNOT_STATE_CONSUME;
+ for (uint16_t i = 0; i < answer->count && ret == KNOT_STATE_CONSUME; ++i) {
+ ret = axfr_consume_rr(knot_pkt_rr(answer, i), data);
+ }
+ return ret;
+}
+
+static int axfr_consume(knot_pkt_t *pkt, struct refresh_data *data, bool reuse_soa)
+{
+ assert(pkt);
+ assert(data);
+
+ // Check RCODE
+ if (knot_pkt_ext_rcode(pkt) != KNOT_RCODE_NOERROR) {
+ AXFRIN_LOG(LOG_WARNING, data,
+ "server responded with error '%s'",
+ knot_pkt_ext_rcode_name(pkt));
+ data->ret = KNOT_EDENIED;
+ return KNOT_STATE_FAIL;
+ }
+
+ // Initialize with first packet
+ if (data->axfr.zone == NULL) {
+ data->ret = axfr_init(data);
+ if (data->ret != KNOT_EOK) {
+ AXFRIN_LOG(LOG_WARNING, data,
+ "failed to initialize (%s)",
+ knot_strerror(data->ret));
+ data->fallback->remote = false;
+ return KNOT_STATE_FAIL;
+ }
+
+ AXFRIN_LOG(LOG_INFO, data, "started");
+ xfr_stats_begin(&data->stats);
+ data->change_size = 0;
+ }
+
+ int next;
+ // Process saved SOA if fallback from IXFR
+ if (data->initial_soa_copy != NULL) {
+ next = reuse_soa ? axfr_consume_rr(data->initial_soa_copy, data) :
+ KNOT_STATE_CONSUME;
+ knot_rrset_free(data->initial_soa_copy, data->mm);
+ data->initial_soa_copy = NULL;
+ if (next != KNOT_STATE_CONSUME) {
+ return next;
+ }
+ }
+
+ // Process answer packet
+ xfr_stats_add(&data->stats, pkt->size);
+ next = axfr_consume_packet(pkt, data);
+
+ // Finalize
+ if (next == KNOT_STATE_DONE) {
+ xfr_stats_end(&data->stats);
+ }
+
+ return next;
+}
+
+/*! \brief Initialize IXFR-in processing context. */
+static int ixfr_init(struct refresh_data *data)
+{
+ struct ixfr_proc *proc = mm_alloc(data->mm, sizeof(*proc));
+ if (proc == NULL) {
+ return KNOT_ENOMEM;
+ }
+
+ memset(proc, 0, sizeof(struct ixfr_proc));
+ proc->state = IXFR_START;
+ proc->mm = data->mm;
+
+ data->ixfr.proc = proc;
+ data->ixfr.final_soa = NULL;
+
+ init_list(&data->ixfr.changesets);
+
+ return KNOT_EOK;
+}
+
+/*! \brief Clean up data allocated by IXFR-in processing. */
+static void ixfr_cleanup(struct refresh_data *data)
+{
+ if (data->ixfr.proc == NULL) {
+ return;
+ }
+
+ knot_rrset_free(data->ixfr.final_soa, data->mm);
+ data->ixfr.final_soa = NULL;
+ mm_free(data->mm, data->ixfr.proc);
+ data->ixfr.proc = NULL;
+
+ changesets_free(&data->ixfr.changesets);
+}
+
+static bool ixfr_serial_once(changeset_t *ch, int policy, uint32_t *master_serial, uint32_t *local_serial)
+{
+ uint32_t ch_from = changeset_from(ch), ch_to = changeset_to(ch);
+
+ if (ch_from != *master_serial || (serial_compare(ch_from, ch_to) & SERIAL_MASK_GEQ)) {
+ return false;
+ }
+
+ uint32_t new_from = *local_serial;
+ uint32_t new_to = serial_next(new_from, policy, 1);
+ knot_soa_serial_set(ch->soa_from->rrs.rdata, new_from);
+ knot_soa_serial_set(ch->soa_to->rrs.rdata, new_to);
+
+ *master_serial = ch_to;
+ *local_serial = new_to;
+
+ return true;
+}
+
+static int ixfr_slave_sign_serial(list_t *changesets, zone_t *zone,
+ conf_t *conf, uint32_t *master_serial)
+{
+ uint32_t local_serial = zone_contents_serial(zone->contents), lastsigned;
+
+ if (zone_get_lastsigned_serial(zone, &lastsigned) != KNOT_EOK || lastsigned != local_serial) {
+ // this is kind of assert
+ return KNOT_ERROR;
+ }
+
+ conf_val_t val = conf_zone_get(conf, C_SERIAL_POLICY, zone->name);
+ unsigned serial_policy = conf_opt(&val);
+
+ int ret = zone_get_master_serial(zone, master_serial);
+ if (ret != KNOT_EOK) {
+ log_zone_error(zone->name, "failed to read master serial"
+ "from KASP DB (%s)", knot_strerror(ret));
+ return ret;
+ }
+ changeset_t *chs;
+ WALK_LIST(chs, *changesets) {
+ if (!ixfr_serial_once(chs, serial_policy, master_serial, &local_serial)) {
+ return KNOT_EINVAL;
+ }
+ }
+
+ return KNOT_EOK;
+}
+
+static int ixfr_finalize(struct refresh_data *data)
+{
+ conf_val_t val = conf_zone_get(data->conf, C_DNSSEC_SIGNING, data->zone->name);
+ bool dnssec_enable = conf_bool(&val);
+ uint32_t master_serial = 0, old_serial = zone_contents_serial(data->zone->contents);
+
+ if (dnssec_enable) {
+ int ret = ixfr_slave_sign_serial(&data->ixfr.changesets, data->zone, data->conf, &master_serial);
+ if (ret != KNOT_EOK) {
+ IXFRIN_LOG(LOG_WARNING, data,
+ "failed to adjust SOA serials from unsigned remote (%s)",
+ knot_strerror(ret));
+ data->fallback_axfr = false;
+ data->fallback->remote = false;
+ return ret;
+ }
+ }
+
+ zone_update_t up = { 0 };
+ int ret = zone_update_init(&up, data->zone, UPDATE_INCREMENTAL | UPDATE_STRICT | UPDATE_NO_CHSET);
+ if (ret != KNOT_EOK) {
+ data->fallback_axfr = false;
+ data->fallback->remote = false;
+ return ret;
+ }
+
+ changeset_t *set;
+ WALK_LIST(set, data->ixfr.changesets) {
+ ret = zone_update_apply_changeset(&up, set);
+ if (ret != KNOT_EOK) {
+ uint32_t serial_from = knot_soa_serial(set->soa_from->rrs.rdata);
+ uint32_t serial_to = knot_soa_serial(set->soa_to->rrs.rdata);
+ zone_update_clear(&up);
+ IXFRIN_LOG(LOG_WARNING, data,
+ "serial %u -> %u, failed to apply changes to zone (%s)",
+ serial_from, serial_to, knot_strerror(ret));
+ return ret;
+ }
+ }
+
+ ret = zone_update_semcheck(data->conf, &up);
+ if (ret == KNOT_EOK) {
+ ret = zone_update_verify_digest(data->conf, &up);
+ }
+ if (ret != KNOT_EOK) {
+ zone_update_clear(&up);
+ data->fallback_axfr = false;
+ return ret;
+ }
+
+ val = conf_zone_get(data->conf, C_ZONEMD_GENERATE, data->zone->name);
+ unsigned digest_alg = conf_opt(&val);
+
+ if (dnssec_enable) {
+ ret = knot_dnssec_sign_update(&up, data->conf);
+ } else if (digest_alg != ZONE_DIGEST_NONE) {
+ assert(zone_update_to(&up) != NULL);
+ ret = zone_update_add_digest(&up, digest_alg, false);
+ }
+ if (ret != KNOT_EOK) {
+ zone_update_clear(&up);
+ data->fallback_axfr = false;
+ data->fallback->remote = false;
+ return ret;
+ }
+
+ ret = zone_update_commit(data->conf, &up);
+ if (ret != KNOT_EOK) {
+ zone_update_clear(&up);
+ IXFRIN_LOG(LOG_WARNING, data,
+ "failed to store changes (%s)", knot_strerror(ret));
+ return ret;
+ }
+
+ if (dnssec_enable && !EMPTY_LIST(data->ixfr.changesets)) {
+ ret = zone_set_master_serial(data->zone, master_serial);
+ if (ret != KNOT_EOK) {
+ log_zone_warning(data->zone->name,
+ "unable to save master serial, future transfers might be broken");
+ }
+ }
+
+ finalize_timers(data);
+ xfr_log_publish(data, old_serial, zone_contents_serial(data->zone->contents),
+ master_serial, dnssec_enable, false);
+
+ return KNOT_EOK;
+}
+
+/*! \brief Stores starting SOA into changesets structure. */
+static int ixfr_solve_start(const knot_rrset_t *rr, struct refresh_data *data)
+{
+ assert(data->ixfr.final_soa == NULL);
+ if (rr->type != KNOT_RRTYPE_SOA) {
+ return KNOT_EMALF;
+ }
+
+ // Store terminal SOA
+ data->ixfr.final_soa = knot_rrset_copy(rr, data->mm);
+ if (data->ixfr.final_soa == NULL) {
+ return KNOT_ENOMEM;
+ }
+
+ // Initialize list for changes
+ init_list(&data->ixfr.changesets);
+
+ return KNOT_EOK;
+}
+
+/*! \brief Decides what to do with a starting SOA (deletions). */
+static int ixfr_solve_soa_del(const knot_rrset_t *rr, struct refresh_data *data)
+{
+ if (rr->type != KNOT_RRTYPE_SOA) {
+ return KNOT_EMALF;
+ }
+
+ // Create new changeset.
+ changeset_t *change = changeset_new(data->zone->name);
+ if (change == NULL) {
+ return KNOT_ENOMEM;
+ }
+
+ // Store SOA into changeset.
+ change->soa_from = knot_rrset_copy(rr, NULL);
+ if (change->soa_from == NULL) {
+ changeset_free(change);
+ return KNOT_ENOMEM;
+ }
+
+ // Add changeset.
+ add_tail(&data->ixfr.changesets, &change->n);
+
+ return KNOT_EOK;
+}
+
+/*! \brief Stores ending SOA into changeset. */
+static int ixfr_solve_soa_add(const knot_rrset_t *rr, changeset_t *change, knot_mm_t *mm)
+{
+ if (rr->type != KNOT_RRTYPE_SOA) {
+ return KNOT_EMALF;
+ }
+
+ change->soa_to = knot_rrset_copy(rr, NULL);
+ if (change->soa_to == NULL) {
+ return KNOT_ENOMEM;
+ }
+
+ return KNOT_EOK;
+}
+
+/*! \brief Adds single RR into remove section of changeset. */
+static int ixfr_solve_del(const knot_rrset_t *rr, changeset_t *change, knot_mm_t *mm)
+{
+ return changeset_add_removal(change, rr, 0);
+}
+
+/*! \brief Adds single RR into add section of changeset. */
+static int ixfr_solve_add(const knot_rrset_t *rr, changeset_t *change, knot_mm_t *mm)
+{
+ return changeset_add_addition(change, rr, 0);
+}
+
+/*! \brief Decides what the next IXFR-in state should be. */
+static int ixfr_next_state(struct refresh_data *data, const knot_rrset_t *rr)
+{
+ const bool soa = (rr->type == KNOT_RRTYPE_SOA);
+ enum ixfr_state state = data->ixfr.proc->state;
+
+ if ((state == IXFR_SOA_ADD || state == IXFR_ADD) &&
+ knot_rrset_equal(rr, data->ixfr.final_soa, true)) {
+ return IXFR_DONE;
+ }
+
+ switch (state) {
+ case IXFR_START:
+ // Final SOA already stored or transfer start.
+ return data->ixfr.final_soa ? IXFR_SOA_DEL : IXFR_START;
+ case IXFR_SOA_DEL:
+ // Empty delete section or start of delete section.
+ return soa ? IXFR_SOA_ADD : IXFR_DEL;
+ case IXFR_SOA_ADD:
+ // Empty add section or start of add section.
+ return soa ? IXFR_SOA_DEL : IXFR_ADD;
+ case IXFR_DEL:
+ // End of delete section or continue.
+ return soa ? IXFR_SOA_ADD : IXFR_DEL;
+ case IXFR_ADD:
+ // End of add section or continue.
+ return soa ? IXFR_SOA_DEL : IXFR_ADD;
+ default:
+ assert(0);
+ return IXFR_INVALID;
+ }
+}
+
+/*!
+ * \brief Processes single RR according to current IXFR-in state. The states
+ * correspond with IXFR-in message structure, in the order they are
+ * mentioned in the code.
+ *
+ * \param rr RR to process.
+ * \param proc Processing context.
+ *
+ * \return KNOT_E*
+ */
+static int ixfr_step(const knot_rrset_t *rr, struct refresh_data *data)
+{
+ data->ixfr.proc->state = ixfr_next_state(data, rr);
+ changeset_t *change = TAIL(data->ixfr.changesets);
+
+ switch (data->ixfr.proc->state) {
+ case IXFR_START:
+ return ixfr_solve_start(rr, data);
+ case IXFR_SOA_DEL:
+ return ixfr_solve_soa_del(rr, data);
+ case IXFR_DEL:
+ return ixfr_solve_del(rr, change, data->mm);
+ case IXFR_SOA_ADD:
+ return ixfr_solve_soa_add(rr, change, data->mm);
+ case IXFR_ADD:
+ return ixfr_solve_add(rr, change, data->mm);
+ case IXFR_DONE:
+ return KNOT_EOK;
+ default:
+ return KNOT_ERROR;
+ }
+}
+
+static int ixfr_consume_rr(const knot_rrset_t *rr, struct refresh_data *data)
+{
+ if (knot_dname_in_bailiwick(rr->owner, data->zone->name) < 0) {
+ return KNOT_STATE_CONSUME;
+ }
+
+ data->ret = ixfr_step(rr, data);
+ if (data->ret != KNOT_EOK) {
+ IXFRIN_LOG(LOG_WARNING, data,
+ "failed (%s)", knot_strerror(data->ret));
+ return KNOT_STATE_FAIL;
+ }
+
+ data->change_size += knot_rrset_size(rr);
+ if (data->change_size / 2 > data->max_zone_size) {
+ IXFRIN_LOG(LOG_WARNING, data,
+ "transfer size exceeded");
+ data->ret = KNOT_EZONESIZE;
+ return KNOT_STATE_FAIL;
+ }
+
+ if (data->ixfr.proc->state == IXFR_DONE) {
+ return KNOT_STATE_DONE;
+ }
+
+ return KNOT_STATE_CONSUME;
+}
+
+/*!
+ * \brief Processes IXFR reply packet and fills in the changesets structure.
+ *
+ * \param pkt Packet containing the IXFR reply in wire format.
+ * \param adata Answer data, including processing context.
+ *
+ * \return KNOT_STATE_CONSUME, KNOT_STATE_DONE, KNOT_STATE_FAIL
+ */
+static int ixfr_consume_packet(knot_pkt_t *pkt, struct refresh_data *data)
+{
+ // Process RRs in the message.
+ const knot_pktsection_t *answer = knot_pkt_section(pkt, KNOT_ANSWER);
+ int ret = KNOT_STATE_CONSUME;
+ for (uint16_t i = 0; i < answer->count && ret == KNOT_STATE_CONSUME; ++i) {
+ ret = ixfr_consume_rr(knot_pkt_rr(answer, i), data);
+ }
+ return ret;
+}
+
+static enum xfr_type determine_xfr_type(const knot_pktsection_t *answer,
+ uint32_t zone_serial, const knot_rrset_t *initial_soa)
+{
+ if (answer->count < 1) {
+ return XFR_TYPE_NOTIMP;
+ }
+
+ const knot_rrset_t *rr_one = knot_pkt_rr(answer, 0);
+ if (initial_soa != NULL) {
+ if (rr_one->type == KNOT_RRTYPE_SOA) {
+ return knot_rrset_equal(initial_soa, rr_one, true) ?
+ XFR_TYPE_AXFR : XFR_TYPE_IXFR;
+ }
+ return XFR_TYPE_AXFR;
+ }
+
+ if (answer->count == 1) {
+ if (rr_one->type == KNOT_RRTYPE_SOA) {
+ return serial_is_current(zone_serial, knot_soa_serial(rr_one->rrs.rdata)) ?
+ XFR_TYPE_UPTODATE : XFR_TYPE_UNDETERMINED;
+ }
+ return XFR_TYPE_ERROR;
+ }
+
+ const knot_rrset_t *rr_two = knot_pkt_rr(answer, 1);
+ if (answer->count == 2 && rr_one->type == KNOT_RRTYPE_SOA &&
+ knot_rrset_equal(rr_one, rr_two, true)) {
+ return XFR_TYPE_AXFR;
+ }
+
+ return (rr_one->type == KNOT_RRTYPE_SOA && rr_two->type != KNOT_RRTYPE_SOA) ?
+ XFR_TYPE_AXFR : XFR_TYPE_IXFR;
+}
+
+static int ixfr_consume(knot_pkt_t *pkt, struct refresh_data *data)
+{
+ assert(pkt);
+ assert(data);
+
+ // Check RCODE
+ if (knot_pkt_ext_rcode(pkt) != KNOT_RCODE_NOERROR) {
+ IXFRIN_LOG(LOG_WARNING, data,
+ "server responded with error '%s'",
+ knot_pkt_ext_rcode_name(pkt));
+ data->ret = KNOT_EDENIED;
+ return KNOT_STATE_FAIL;
+ }
+
+ // Initialize with first packet
+ if (data->ixfr.proc == NULL) {
+ const knot_pktsection_t *answer = knot_pkt_section(pkt, KNOT_ANSWER);
+
+ uint32_t master_serial;
+ data->ret = slave_zone_serial(data->zone, data->conf, &master_serial);
+ if (data->ret != KNOT_EOK) {
+ xfr_log_read_ms(data->zone->name, data->ret);
+ data->fallback_axfr = false;
+ data->fallback->remote = false;
+ return KNOT_STATE_FAIL;
+ }
+ data->xfr_type = determine_xfr_type(answer, master_serial,
+ data->initial_soa_copy);
+ switch (data->xfr_type) {
+ case XFR_TYPE_ERROR:
+ IXFRIN_LOG(LOG_WARNING, data,
+ "malformed response SOA");
+ data->ret = KNOT_EMALF;
+ data->xfr_type = XFR_TYPE_IXFR; // unrecognisable IXFR type is the same as failed IXFR
+ return KNOT_STATE_FAIL;
+ case XFR_TYPE_NOTIMP:
+ IXFRIN_LOG(LOG_WARNING, data,
+ "not supported by remote");
+ data->ret = KNOT_ENOTSUP;
+ data->xfr_type = XFR_TYPE_IXFR;
+ return KNOT_STATE_FAIL;
+ case XFR_TYPE_UNDETERMINED:
+ // Store the SOA and check with next packet
+ data->initial_soa_copy = knot_rrset_copy(knot_pkt_rr(answer, 0), data->mm);
+ if (data->initial_soa_copy == NULL) {
+ data->ret = KNOT_ENOMEM;
+ return KNOT_STATE_FAIL;
+ }
+ xfr_stats_add(&data->stats, pkt->size);
+ return KNOT_STATE_CONSUME;
+ case XFR_TYPE_AXFR:
+ IXFRIN_LOG(LOG_INFO, data,
+ "receiving AXFR-style IXFR");
+ return axfr_consume(pkt, data, true);
+ case XFR_TYPE_UPTODATE:
+ consume_edns_expire(data, pkt, false);
+ finalize_timers(data);
+ char expires_in[32] = "";
+ fill_expires_in(expires_in, sizeof(expires_in), data);
+ IXFRIN_LOG(LOG_INFO, data,
+ "zone is up-to-date%s", expires_in);
+ xfr_stats_begin(&data->stats);
+ xfr_stats_add(&data->stats, pkt->size);
+ xfr_stats_end(&data->stats);
+ return KNOT_STATE_DONE;
+ case XFR_TYPE_IXFR:
+ break;
+ default:
+ assert(0);
+ data->ret = KNOT_EPROCESSING;
+ return KNOT_STATE_FAIL;
+ }
+
+ data->ret = ixfr_init(data);
+ if (data->ret != KNOT_EOK) {
+ IXFRIN_LOG(LOG_WARNING, data,
+ "failed to initialize (%s)", knot_strerror(data->ret));
+ data->fallback_axfr = false;
+ data->fallback->remote = false;
+ return KNOT_STATE_FAIL;
+ }
+
+ IXFRIN_LOG(LOG_INFO, data, "started");
+ xfr_stats_begin(&data->stats);
+ data->change_size = 0;
+ }
+
+ int next;
+ // Process saved SOA if existing
+ if (data->initial_soa_copy != NULL) {
+ next = ixfr_consume_rr(data->initial_soa_copy, data);
+ knot_rrset_free(data->initial_soa_copy, data->mm);
+ data->initial_soa_copy = NULL;
+ if (next != KNOT_STATE_CONSUME) {
+ return next;
+ }
+ }
+
+ // Process answer packet
+ xfr_stats_add(&data->stats, pkt->size);
+ next = ixfr_consume_packet(pkt, data);
+
+ // Finalize
+ if (next == KNOT_STATE_DONE) {
+ xfr_stats_end(&data->stats);
+ }
+
+ return next;
+}
+
+static int soa_query_produce(knot_layer_t *layer, knot_pkt_t *pkt)
+{
+ struct refresh_data *data = layer->data;
+
+ query_init_pkt(pkt);
+
+ data->ret = knot_pkt_put_question(pkt, data->zone->name, KNOT_CLASS_IN,
+ KNOT_RRTYPE_SOA);
+ if (data->ret != KNOT_EOK) {
+ return KNOT_STATE_FAIL;
+ }
+
+ if (data->use_edns) {
+ data->ret = query_put_edns(pkt, &data->edns);
+ if (data->ret != KNOT_EOK) {
+ return KNOT_STATE_FAIL;
+ }
+ }
+
+ return KNOT_STATE_CONSUME;
+}
+
+static int soa_query_consume(knot_layer_t *layer, knot_pkt_t *pkt)
+{
+ struct refresh_data *data = layer->data;
+
+ if (knot_pkt_ext_rcode(pkt) != KNOT_RCODE_NOERROR) {
+ REFRESH_LOG(LOG_WARNING, data, LOG_DIRECTION_IN,
+ "server responded with error '%s'",
+ knot_pkt_ext_rcode_name(pkt));
+ data->ret = KNOT_EDENIED;
+ return KNOT_STATE_FAIL;
+ }
+
+ const knot_pktsection_t *answer = knot_pkt_section(pkt, KNOT_ANSWER);
+ const knot_rrset_t *rr = answer->count == 1 ? knot_pkt_rr(answer, 0) : NULL;
+ if (!rr || rr->type != KNOT_RRTYPE_SOA || rr->rrs.count != 1) {
+ REFRESH_LOG(LOG_WARNING, data, LOG_DIRECTION_IN,
+ "malformed message");
+ conf_val_t val = conf_zone_get(data->conf, C_SEM_CHECKS, data->zone->name);
+ if (conf_opt(&val) == SEMCHECKS_SOFT) {
+ data->xfr_type = XFR_TYPE_AXFR;
+ data->state = STATE_TRANSFER;
+ return KNOT_STATE_RESET;
+ } else {
+ data->ret = KNOT_EMALF;
+ return KNOT_STATE_FAIL;
+ }
+ }
+
+ uint32_t local_serial;
+ data->ret = slave_zone_serial(data->zone, data->conf, &local_serial);
+ if (data->ret != KNOT_EOK) {
+ xfr_log_read_ms(data->zone->name, data->ret);
+ data->fallback->remote = false;
+ return KNOT_STATE_FAIL;
+ }
+ uint32_t remote_serial = knot_soa_serial(rr->rrs.rdata);
+ bool current = serial_is_current(local_serial, remote_serial);
+ bool master_uptodate = serial_is_current(remote_serial, local_serial);
+
+ if (!current) {
+ REFRESH_LOG(LOG_INFO, data, LOG_DIRECTION_NONE,
+ "remote serial %u, zone is outdated", remote_serial);
+ data->state = STATE_TRANSFER;
+ return KNOT_STATE_RESET; // continue with transfer
+ } else if (master_uptodate) {
+ consume_edns_expire(data, pkt, false);
+ finalize_timers(data);
+ char expires_in[32] = "";
+ fill_expires_in(expires_in, sizeof(expires_in), data);
+ REFRESH_LOG(LOG_INFO, data, LOG_DIRECTION_NONE,
+ "remote serial %u, zone is up-to-date%s",
+ remote_serial, expires_in);
+ return KNOT_STATE_DONE;
+ } else {
+ REFRESH_LOG(LOG_INFO, data, LOG_DIRECTION_NONE,
+ "remote serial %u, remote is outdated", remote_serial);
+ return KNOT_STATE_FAIL;
+ }
+}
+
+static int transfer_produce(knot_layer_t *layer, knot_pkt_t *pkt)
+{
+ struct refresh_data *data = layer->data;
+
+ query_init_pkt(pkt);
+
+ bool ixfr = (data->xfr_type == XFR_TYPE_IXFR);
+
+ data->ret = knot_pkt_put_question(pkt, data->zone->name, KNOT_CLASS_IN,
+ ixfr ? KNOT_RRTYPE_IXFR : KNOT_RRTYPE_AXFR);
+ if (data->ret != KNOT_EOK) {
+ return KNOT_STATE_FAIL;
+ }
+
+ if (ixfr) {
+ assert(data->soa);
+ knot_rrset_t *sending_soa = knot_rrset_copy(data->soa, data->mm);
+ uint32_t master_serial;
+ data->ret = slave_zone_serial(data->zone, data->conf, &master_serial);
+ if (data->ret != KNOT_EOK) {
+ data->fallback->remote = false;
+ xfr_log_read_ms(data->zone->name, data->ret);
+ }
+ if (sending_soa == NULL || data->ret != KNOT_EOK) {
+ knot_rrset_free(sending_soa, data->mm);
+ return KNOT_STATE_FAIL;
+ }
+ knot_soa_serial_set(sending_soa->rrs.rdata, master_serial);
+ knot_pkt_begin(pkt, KNOT_AUTHORITY);
+ knot_pkt_put(pkt, KNOT_COMPR_HINT_QNAME, sending_soa, 0);
+ knot_rrset_free(sending_soa, data->mm);
+ }
+
+ if (data->use_edns) {
+ data->ret = query_put_edns(pkt, &data->edns);
+ if (data->ret != KNOT_EOK) {
+ return KNOT_STATE_FAIL;
+ }
+ }
+
+ return KNOT_STATE_CONSUME;
+}
+
+static int transfer_consume(knot_layer_t *layer, knot_pkt_t *pkt)
+{
+ struct refresh_data *data = layer->data;
+
+ consume_edns_expire(data, pkt, true);
+ if (data->expire_timer < 2) {
+ REFRESH_LOG(LOG_WARNING, data, LOG_DIRECTION_NONE,
+ "remote is expired, ignoring");
+ return KNOT_STATE_IGNORE;
+ }
+
+ data->fallback_axfr = (data->xfr_type == XFR_TYPE_IXFR);
+
+ int next = (data->xfr_type == XFR_TYPE_AXFR) ? axfr_consume(pkt, data, false) :
+ ixfr_consume(pkt, data);
+
+ // Transfer completed
+ if (next == KNOT_STATE_DONE) {
+ // Log transfer even if we still can fail
+ xfr_log_finished(data->zone->name,
+ data->xfr_type == XFR_TYPE_IXFR ||
+ data->xfr_type == XFR_TYPE_UPTODATE ?
+ LOG_OPERATION_IXFR : LOG_OPERATION_AXFR,
+ LOG_DIRECTION_IN, data->remote,
+ layer->flags & KNOT_REQUESTOR_REUSED,
+ &data->stats);
+
+ /*
+ * TODO: Move finialization into finish
+ * callback. And update requestor to allow reset from fallback
+ * as we need IXFR to AXFR failover.
+ */
+ if (tsig_unsigned_count(layer->tsig) != 0) {
+ data->ret = KNOT_EMALF;
+ return KNOT_STATE_FAIL;
+ }
+
+ // Finalize and publish the zone
+ switch (data->xfr_type) {
+ case XFR_TYPE_IXFR:
+ data->ret = ixfr_finalize(data);
+ break;
+ case XFR_TYPE_AXFR:
+ data->ret = axfr_finalize(data);
+ break;
+ default:
+ return next;
+ }
+ if (data->ret == KNOT_EOK) {
+ data->updated = true;
+ } else {
+ next = KNOT_STATE_FAIL;
+ }
+ }
+
+ return next;
+}
+
+static int refresh_begin(knot_layer_t *layer, void *_data)
+{
+ layer->data = _data;
+ struct refresh_data *data = _data;
+ data->layer = layer;
+
+ if (data->soa) {
+ data->state = STATE_SOA_QUERY;
+ data->xfr_type = XFR_TYPE_IXFR;
+ data->initial_soa_copy = NULL;
+ } else {
+ data->state = STATE_TRANSFER;
+ data->xfr_type = XFR_TYPE_AXFR;
+ data->initial_soa_copy = NULL;
+ }
+
+ data->started = time_now();
+
+ return KNOT_STATE_PRODUCE;
+}
+
+static int refresh_produce(knot_layer_t *layer, knot_pkt_t *pkt)
+{
+ struct refresh_data *data = layer->data;
+ data->layer = layer;
+
+ switch (data->state) {
+ case STATE_SOA_QUERY: return soa_query_produce(layer, pkt);
+ case STATE_TRANSFER: return transfer_produce(layer, pkt);
+ default:
+ return KNOT_STATE_FAIL;
+ }
+}
+
+static int refresh_consume(knot_layer_t *layer, knot_pkt_t *pkt)
+{
+ struct refresh_data *data = layer->data;
+ data->layer = layer;
+
+ data->fallback->address = false; // received something, other address not needed
+
+ switch (data->state) {
+ case STATE_SOA_QUERY: return soa_query_consume(layer, pkt);
+ case STATE_TRANSFER: return transfer_consume(layer, pkt);
+ default:
+ return KNOT_STATE_FAIL;
+ }
+}
+
+static int refresh_reset(knot_layer_t *layer)
+{
+ return KNOT_STATE_PRODUCE;
+}
+
+static int refresh_finish(knot_layer_t *layer)
+{
+ struct refresh_data *data = layer->data;
+ data->layer = layer;
+
+ // clean processing context
+ axfr_cleanup(data);
+ ixfr_cleanup(data);
+
+ return KNOT_STATE_NOOP;
+}
+
+static const knot_layer_api_t REFRESH_API = {
+ .begin = refresh_begin,
+ .produce = refresh_produce,
+ .consume = refresh_consume,
+ .reset = refresh_reset,
+ .finish = refresh_finish,
+};
+
+static size_t max_zone_size(conf_t *conf, const knot_dname_t *zone)
+{
+ conf_val_t val = conf_zone_get(conf, C_ZONE_MAX_SIZE, zone);
+ return conf_int(&val);
+}
+
+typedef struct {
+ bool force_axfr;
+ bool send_notify;
+} try_refresh_ctx_t;
+
+static int try_refresh(conf_t *conf, zone_t *zone, const conf_remote_t *master,
+ void *ctx, zone_master_fallback_t *fallback)
+{
+ // TODO: Abstract interface to issue DNS queries. This is almost copy-pasted.
+
+ assert(zone);
+ assert(master);
+ assert(ctx);
+ assert(fallback);
+
+ try_refresh_ctx_t *trctx = ctx;
+
+ knot_rrset_t soa = { 0 };
+ if (zone->contents) {
+ soa = node_rrset(zone->contents->apex, KNOT_RRTYPE_SOA);
+ }
+
+ struct refresh_data data = {
+ .zone = zone,
+ .conf = conf,
+ .remote = (struct sockaddr *)&master->addr,
+ .soa = zone->contents && !trctx->force_axfr ? &soa : NULL,
+ .max_zone_size = max_zone_size(conf, zone->name),
+ .use_edns = !master->no_edns,
+ .edns = query_edns_data_init(conf, master->addr.ss_family,
+ QUERY_EDNS_OPT_EXPIRE),
+ .expire_timer = EXPIRE_TIMER_INVALID,
+ .fallback = fallback,
+ .fallback_axfr = false, // will be set upon IXFR consume
+ };
+
+ knot_requestor_t requestor;
+ knot_requestor_init(&requestor, &REFRESH_API, &data, NULL);
+
+ knot_pkt_t *pkt = knot_pkt_new(NULL, KNOT_WIRE_MAX_PKTSIZE, NULL);
+ if (!pkt) {
+ knot_requestor_clear(&requestor);
+ return KNOT_ENOMEM;
+ }
+
+ const struct sockaddr_storage *dst = &master->addr;
+ const struct sockaddr_storage *src = &master->via;
+ knot_request_flag_t flags = conf->cache.srv_tcp_fastopen ? KNOT_REQUEST_TFO : 0;
+ knot_request_t *req = knot_request_make(NULL, dst, src, pkt, &master->key, flags);
+ if (!req) {
+ knot_request_free(req, NULL);
+ knot_requestor_clear(&requestor);
+ return KNOT_ENOMEM;
+ }
+
+ int timeout = conf->cache.srv_tcp_remote_io_timeout;
+
+ int ret;
+
+ // while loop runs 0x or 1x; IXFR to AXFR failover
+ while (ret = knot_requestor_exec(&requestor, req, timeout),
+ ret = (data.ret == KNOT_EOK ? ret : data.ret),
+ data.fallback_axfr && ret != KNOT_EOK) {
+ REFRESH_LOG(LOG_WARNING, &data, LOG_DIRECTION_IN,
+ "fallback to AXFR (%s)", knot_strerror(ret));
+ ixfr_cleanup(&data);
+ data.ret = KNOT_EOK;
+ data.xfr_type = XFR_TYPE_AXFR;
+ data.fallback_axfr = false,
+ requestor.layer.state = KNOT_STATE_RESET;
+ requestor.layer.flags |= KNOT_REQUESTOR_CLOSE;
+ }
+ knot_request_free(req, NULL);
+ knot_requestor_clear(&requestor);
+
+ if (ret == KNOT_EOK) {
+ trctx->send_notify = data.updated && !master->block_notify_after_xfr;
+ trctx->force_axfr = false;
+ }
+
+ return ret;
+}
+
+int event_refresh(conf_t *conf, zone_t *zone)
+{
+ assert(zone);
+
+ if (!zone_is_slave(conf, zone)) {
+ return KNOT_ENOTSUP;
+ }
+
+ try_refresh_ctx_t trctx = { 0 };
+
+ // TODO: Flag on zone is ugly. Event specific parameters would be nice.
+ if (zone_get_flag(zone, ZONE_FORCE_AXFR, true)) {
+ trctx.force_axfr = true;
+ zone->zonefile.retransfer = true;
+ }
+
+ int ret = zone_master_try(conf, zone, try_refresh, &trctx, "refresh");
+ zone_clear_preferred_master(zone);
+ if (ret != KNOT_EOK) {
+ const knot_rdataset_t *soa = zone_soa(zone);
+ uint32_t next;
+
+ if (soa) {
+ next = knot_soa_retry(soa->rdata);
+ } else {
+ next = bootstrap_next(&zone->zonefile.bootstrap_cnt);
+ }
+
+ limit_timer(conf, zone->name, &next, "retry",
+ C_RETRY_MIN_INTERVAL, C_RETRY_MAX_INTERVAL);
+ zone->timers.next_refresh = time(NULL) + next;
+ zone->timers.last_refresh_ok = false;
+
+ char time_str[64] = { 0 };
+ struct tm time_gm = { 0 };
+ localtime_r(&zone->timers.next_refresh, &time_gm);
+ strftime(time_str, sizeof(time_str), KNOT_LOG_TIME_FORMAT, &time_gm);
+
+ log_zone_error(zone->name, "refresh, failed (%s), next retry at %s",
+ knot_strerror(ret), time_str);
+ } else {
+ zone->zonefile.bootstrap_cnt = 0;
+ }
+
+ /* Reschedule events. */
+ replan_from_timers(conf, zone);
+ if (trctx.send_notify) {
+ zone_schedule_notify(zone, 1);
+ }
+
+ return ret;
+}
diff --git a/src/knot/events/handlers/update.c b/src/knot/events/handlers/update.c
new file mode 100644
index 0000000..f337eb5
--- /dev/null
+++ b/src/knot/events/handlers/update.c
@@ -0,0 +1,433 @@
+/* Copyright (C) 2022 CZ.NIC, z.s.p.o. <knot-dns@labs.nic.cz>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#include <assert.h>
+
+#include "knot/events/handlers.h"
+#include "knot/nameserver/log.h"
+#include "knot/nameserver/process_query.h"
+#include "knot/query/capture.h"
+#include "knot/query/requestor.h"
+#include "knot/updates/ddns.h"
+#include "knot/zone/digest.h"
+#include "knot/zone/zone.h"
+#include "libdnssec/random.h"
+#include "libknot/libknot.h"
+#include "contrib/net.h"
+#include "contrib/time.h"
+
+#define UPDATE_LOG(priority, qdata, fmt...) \
+ ns_log(priority, knot_pkt_qname(qdata->query), LOG_OPERATION_UPDATE, \
+ LOG_DIRECTION_IN, (struct sockaddr *)knotd_qdata_remote_addr(qdata), \
+ false, fmt)
+
+static void init_qdata_from_request(knotd_qdata_t *qdata,
+ zone_t *zone,
+ knot_request_t *req,
+ knotd_qdata_params_t *params,
+ knotd_qdata_extra_t *extra)
+{
+ memset(qdata, 0, sizeof(*qdata));
+ qdata->params = params;
+ qdata->query = req->query;
+ qdata->sign = req->sign;
+ qdata->extra = extra;
+ memset(extra, 0, sizeof(*extra));
+ qdata->extra->zone = zone;
+}
+
+static int check_prereqs(knot_request_t *request,
+ const zone_t *zone, zone_update_t *update,
+ knotd_qdata_t *qdata)
+{
+ uint16_t rcode = KNOT_RCODE_NOERROR;
+ int ret = ddns_process_prereqs(request->query, update, &rcode);
+ if (ret != KNOT_EOK) {
+ UPDATE_LOG(LOG_WARNING, qdata, "prerequisites not met (%s)",
+ knot_strerror(ret));
+ assert(rcode != KNOT_RCODE_NOERROR);
+ knot_wire_set_rcode(request->resp->wire, rcode);
+ return ret;
+ }
+
+ return KNOT_EOK;
+}
+
+static int process_single_update(knot_request_t *request,
+ const zone_t *zone, zone_update_t *update,
+ knotd_qdata_t *qdata)
+{
+ uint16_t rcode = KNOT_RCODE_NOERROR;
+ int ret = ddns_process_update(zone, request->query, update, &rcode);
+ if (ret != KNOT_EOK) {
+ UPDATE_LOG(LOG_WARNING, qdata, "failed to apply (%s)",
+ knot_strerror(ret));
+ assert(rcode != KNOT_RCODE_NOERROR);
+ knot_wire_set_rcode(request->resp->wire, rcode);
+ return ret;
+ }
+
+ return KNOT_EOK;
+}
+
+static void set_rcodes(list_t *requests, const uint16_t rcode)
+{
+ ptrnode_t *node;
+ WALK_LIST(node, *requests) {
+ knot_request_t *req = node->d;
+ if (knot_wire_get_rcode(req->resp->wire) == KNOT_RCODE_NOERROR) {
+ knot_wire_set_rcode(req->resp->wire, rcode);
+ }
+ }
+}
+
+static int process_bulk(zone_t *zone, list_t *requests, zone_update_t *up)
+{
+ // Walk all the requests and process.
+ ptrnode_t *node;
+ WALK_LIST(node, *requests) {
+ knot_request_t *req = node->d;
+ // Init qdata structure for logging (unique per-request).
+ knotd_qdata_params_t params = {
+ .remote = &req->remote
+ };
+ knotd_qdata_t qdata;
+ knotd_qdata_extra_t extra;
+ init_qdata_from_request(&qdata, zone, req, &params, &extra);
+
+ int ret = check_prereqs(req, zone, up, &qdata);
+ if (ret != KNOT_EOK) {
+ // Skip updates with failed prereqs.
+ continue;
+ }
+
+ ret = process_single_update(req, zone, up, &qdata);
+ if (ret != KNOT_EOK) {
+ return ret;
+ }
+ }
+
+ return KNOT_EOK;
+}
+
+static int process_normal(conf_t *conf, zone_t *zone, list_t *requests)
+{
+ assert(requests);
+
+ // Init zone update structure
+ zone_update_t up;
+ int ret = zone_update_init(&up, zone, UPDATE_INCREMENTAL | UPDATE_NO_CHSET);
+ if (ret != KNOT_EOK) {
+ set_rcodes(requests, KNOT_RCODE_SERVFAIL);
+ return ret;
+ }
+
+ // Process all updates.
+ ret = process_bulk(zone, requests, &up);
+ if (ret == KNOT_EOK) {
+ ret = zone_update_verify_digest(conf, &up);
+ }
+ if (ret != KNOT_EOK) {
+ zone_update_clear(&up);
+ set_rcodes(requests, KNOT_RCODE_SERVFAIL);
+ return ret;
+ }
+
+ // Sign update.
+ conf_val_t val = conf_zone_get(conf, C_DNSSEC_SIGNING, zone->name);
+ bool dnssec_enable = conf_bool(&val);
+ val = conf_zone_get(conf, C_ZONEMD_GENERATE, zone->name);
+ unsigned digest_alg = conf_opt(&val);
+ if (dnssec_enable) {
+ ret = knot_dnssec_sign_update(&up, conf);
+ } else if (digest_alg != ZONE_DIGEST_NONE) {
+ if (zone_update_to(&up) == NULL) {
+ ret = zone_update_increment_soa(&up, conf);
+ }
+ if (ret == KNOT_EOK) {
+ ret = zone_update_add_digest(&up, digest_alg, false);
+ }
+ }
+ if (ret != KNOT_EOK) {
+ zone_update_clear(&up);
+ set_rcodes(requests, KNOT_RCODE_SERVFAIL);
+ return ret;
+ }
+
+ // Apply changes.
+ ret = zone_update_commit(conf, &up);
+ if (ret != KNOT_EOK) {
+ zone_update_clear(&up);
+ if (ret == KNOT_EZONESIZE) {
+ set_rcodes(requests, KNOT_RCODE_REFUSED);
+ } else {
+ set_rcodes(requests, KNOT_RCODE_SERVFAIL);
+ }
+ return ret;
+ }
+
+ return KNOT_EOK;
+}
+
+static void process_requests(conf_t *conf, zone_t *zone, list_t *requests)
+{
+ assert(zone);
+ assert(requests);
+
+ /* Keep original state. */
+ struct timespec t_start = time_now();
+ const uint32_t old_serial = zone_contents_serial(zone->contents);
+
+ /* Process authenticated packet. */
+ int ret = process_normal(conf, zone, requests);
+ if (ret != KNOT_EOK) {
+ log_zone_error(zone->name, "DDNS, processing failed (%s)",
+ knot_strerror(ret));
+ return;
+ }
+
+ /* Evaluate response. */
+ const uint32_t new_serial = zone_contents_serial(zone->contents);
+ if (new_serial == old_serial) {
+ log_zone_info(zone->name, "DDNS, finished, no changes to the zone were made");
+ return;
+ }
+
+ struct timespec t_end = time_now();
+ log_zone_info(zone->name, "DDNS, finished, serial %u -> %u, "
+ "%.02f seconds", old_serial, new_serial,
+ time_diff_ms(&t_start, &t_end) / 1000.0);
+
+ zone_schedule_notify(zone, 1);
+}
+
+static int remote_forward(conf_t *conf, knot_request_t *request, conf_remote_t *remote)
+{
+ /* Copy request and assign new ID. */
+ knot_pkt_t *query = knot_pkt_new(NULL, request->query->max_size, NULL);
+ int ret = knot_pkt_copy(query, request->query);
+ if (ret != KNOT_EOK) {
+ knot_pkt_free(query);
+ return ret;
+ }
+ knot_wire_set_id(query->wire, dnssec_random_uint16_t());
+ knot_tsig_append(query->wire, &query->size, query->max_size, query->tsig_rr);
+
+ /* Prepare packet capture layer. */
+ const knot_layer_api_t *capture = query_capture_api();
+ struct capture_param capture_param = {
+ .sink = request->resp
+ };
+
+ /* Create requestor instance. */
+ knot_requestor_t re;
+ ret = knot_requestor_init(&re, capture, &capture_param, NULL);
+ if (ret != KNOT_EOK) {
+ knot_pkt_free(query);
+ return ret;
+ }
+
+ /* Create a request. */
+ const struct sockaddr_storage *dst = &remote->addr;
+ const struct sockaddr_storage *src = &remote->via;
+ knot_request_flag_t flags = conf->cache.srv_tcp_fastopen ? KNOT_REQUEST_TFO : 0;
+ knot_request_t *req = knot_request_make(re.mm, dst, src, query, NULL, flags);
+ if (req == NULL) {
+ knot_requestor_clear(&re);
+ knot_pkt_free(query);
+ return KNOT_ENOMEM;
+ }
+
+ /* Execute the request. */
+ int timeout = conf->cache.srv_tcp_remote_io_timeout;
+ ret = knot_requestor_exec(&re, req, timeout);
+
+ knot_request_free(req, re.mm);
+ knot_requestor_clear(&re);
+
+ return ret;
+}
+
+static void forward_request(conf_t *conf, zone_t *zone, knot_request_t *request)
+{
+ /* Read the ddns master or the first master. */
+ conf_val_t remote = conf_zone_get(conf, C_DDNS_MASTER, zone->name);
+ if (remote.code != KNOT_EOK) {
+ remote = conf_zone_get(conf, C_MASTER, zone->name);
+ }
+
+ /* Get the number of remote addresses. */
+ conf_val_t addr = conf_id_get(conf, C_RMT, C_ADDR, &remote);
+ size_t addr_count = conf_val_count(&addr);
+ assert(addr_count > 0);
+
+ /* Try all remote addresses to forward the request to. */
+ int ret = KNOT_EOK;
+ for (size_t i = 0; i < addr_count; i++) {
+ conf_remote_t master = conf_remote(conf, &remote, i);
+
+ ret = remote_forward(conf, request, &master);
+ if (ret == KNOT_EOK) {
+ break;
+ }
+ }
+
+ /* Restore message ID and TSIG. */
+ knot_wire_set_id(request->resp->wire, knot_wire_get_id(request->query->wire));
+ knot_tsig_append(request->resp->wire, &request->resp->size,
+ request->resp->max_size, request->resp->tsig_rr);
+
+ /* Set RCODE if forwarding failed. */
+ if (ret != KNOT_EOK) {
+ knot_wire_set_rcode(request->resp->wire, KNOT_RCODE_SERVFAIL);
+ log_zone_error(zone->name, "DDNS, failed to forward updates to the master (%s)",
+ knot_strerror(ret));
+ } else {
+ log_zone_info(zone->name, "DDNS, updates forwarded to the master");
+ }
+}
+
+static void forward_requests(conf_t *conf, zone_t *zone, list_t *requests)
+{
+ assert(zone);
+ assert(requests);
+
+ ptrnode_t *node;
+ WALK_LIST(node, *requests) {
+ knot_request_t *req = node->d;
+ forward_request(conf, zone, req);
+ }
+}
+
+static void send_update_response(conf_t *conf, zone_t *zone, knot_request_t *req)
+{
+ if (req->resp) {
+ if (!zone_is_slave(conf, zone)) {
+ // Sign the response with TSIG where applicable
+ knotd_qdata_t qdata;
+ knotd_qdata_extra_t extra;
+ init_qdata_from_request(&qdata, zone, req, NULL, &extra);
+
+ (void)process_query_sign_response(req->resp, &qdata);
+ }
+
+ if (net_is_stream(req->fd)) {
+ net_dns_tcp_send(req->fd, req->resp->wire, req->resp->size,
+ conf->cache.srv_tcp_remote_io_timeout, NULL);
+ } else {
+ net_dgram_send(req->fd, req->resp->wire, req->resp->size,
+ &req->remote);
+ }
+ }
+}
+
+static void free_request(knot_request_t *req)
+{
+ close(req->fd);
+ knot_pkt_free(req->query);
+ knot_pkt_free(req->resp);
+ dnssec_binary_free(&req->sign.tsig_key.secret);
+ free(req);
+}
+
+static void send_update_responses(conf_t *conf, zone_t *zone, list_t *updates)
+{
+ ptrnode_t *node, *nxt;
+ WALK_LIST_DELSAFE(node, nxt, *updates) {
+ knot_request_t *req = node->d;
+ send_update_response(conf, zone, req);
+ free_request(req);
+ }
+ ptrlist_free(updates, NULL);
+}
+
+static int init_update_responses(list_t *updates)
+{
+ ptrnode_t *node, *nxt;
+ WALK_LIST_DELSAFE(node, nxt, *updates) {
+ knot_request_t *req = node->d;
+ req->resp = knot_pkt_new(NULL, KNOT_WIRE_MAX_PKTSIZE, NULL);
+ if (req->resp == NULL) {
+ return KNOT_ENOMEM;
+ }
+
+ assert(req->query);
+ knot_pkt_init_response(req->resp, req->query);
+ }
+
+ return KNOT_EOK;
+}
+
+static size_t update_dequeue(zone_t *zone, list_t *updates)
+{
+ assert(zone);
+ assert(updates);
+
+ pthread_mutex_lock(&zone->ddns_lock);
+
+ if (EMPTY_LIST(zone->ddns_queue)) {
+ /* Lost race during reload. */
+ pthread_mutex_unlock(&zone->ddns_lock);
+ return 0;
+ }
+
+ *updates = zone->ddns_queue;
+ size_t update_count = zone->ddns_queue_size;
+ init_list(&zone->ddns_queue);
+ zone->ddns_queue_size = 0;
+
+ pthread_mutex_unlock(&zone->ddns_lock);
+
+ return update_count;
+}
+
+int event_update(conf_t *conf, zone_t *zone)
+{
+ assert(zone);
+
+ /* Get list of pending updates. */
+ list_t updates;
+ size_t update_count = update_dequeue(zone, &updates);
+ if (update_count == 0) {
+ return KNOT_EOK;
+ }
+
+ /* Init updates responses. */
+ int ret = init_update_responses(&updates);
+ if (ret != KNOT_EOK) {
+ /* Send what responses we can. */
+ set_rcodes(&updates, KNOT_RCODE_SERVFAIL);
+ send_update_responses(conf, zone, &updates);
+ return ret;
+ }
+
+ /* Process update list - forward if zone has master, or execute.
+ RCODEs are set. */
+ if (zone_is_slave(conf, zone)) {
+ log_zone_info(zone->name,
+ "DDNS, forwarding %zu updates", update_count);
+ forward_requests(conf, zone, &updates);
+ } else {
+ log_zone_info(zone->name,
+ "DDNS, processing %zu updates", update_count);
+ process_requests(conf, zone, &updates);
+ }
+
+ /* Send responses. */
+ send_update_responses(conf, zone, &updates);
+
+ return KNOT_EOK;
+}
diff --git a/src/knot/events/replan.c b/src/knot/events/replan.c
new file mode 100644
index 0000000..ed03fe1
--- /dev/null
+++ b/src/knot/events/replan.c
@@ -0,0 +1,210 @@
+/* Copyright (C) 2023 CZ.NIC, z.s.p.o. <knot-dns@labs.nic.cz>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#include <assert.h>
+#include <time.h>
+
+#include "knot/dnssec/kasp/kasp_db.h"
+#include "knot/events/replan.h"
+
+#define TIME_CANCEL 0
+#define TIME_IGNORE (-1)
+
+/*!
+ * \brief Move DDNS queue from old zone to new zone and replan if necessary.
+ *
+ * New zone will contain references from the old zone. New zone will free
+ * the data.
+ */
+static void replan_ddns(zone_t *zone, zone_t *old_zone)
+{
+ if (old_zone->ddns_queue_size == 0) {
+ return;
+ }
+
+ ptrnode_t *node;
+ WALK_LIST(node, old_zone->ddns_queue) {
+ ptrlist_add(&zone->ddns_queue, node->d, NULL);
+ }
+ zone->ddns_queue_size = old_zone->ddns_queue_size;
+
+ ptrlist_free(&old_zone->ddns_queue, NULL);
+
+ zone_events_schedule_now(zone, ZONE_EVENT_UPDATE);
+}
+
+/*!
+ * \brief Replan events that are already planned for the old zone.
+ *
+ * \notice Preserves notifailed.
+ */
+static void replan_from_zone(zone_t *zone, zone_t *old_zone)
+{
+ assert(zone);
+ assert(old_zone);
+
+ replan_ddns(zone, old_zone);
+
+ const zone_event_type_t types[] = {
+ ZONE_EVENT_REFRESH,
+ ZONE_EVENT_FLUSH,
+ ZONE_EVENT_BACKUP,
+ ZONE_EVENT_NOTIFY,
+ ZONE_EVENT_UFREEZE,
+ ZONE_EVENT_UTHAW,
+ ZONE_EVENT_INVALID
+ };
+
+ for (const zone_event_type_t *type = types; *type != ZONE_EVENT_INVALID; type++) {
+ time_t when = zone_events_get_time(old_zone, *type);
+ if (when > 0) {
+ zone_events_schedule_at(zone, *type, when);
+ }
+ }
+}
+
+/*!
+ * \brief Replan DNSSEC if automatic signing enabled.
+ *
+ * This is required as the configuration could have changed.
+ */
+static void replan_dnssec(conf_t *conf, zone_t *zone)
+{
+ assert(conf);
+ assert(zone);
+
+ conf_val_t val = conf_zone_get(conf, C_DNSSEC_SIGNING, zone->name);
+ if (conf_bool(&val)) {
+ zone_events_schedule_now(zone, ZONE_EVENT_DNSSEC);
+ }
+}
+
+/*!
+ * \brief Replan events that depend on zone timers (REFRESH, EXPIRE, FLUSH, RESALT, PARENT DS QUERY).
+ */
+void replan_from_timers(conf_t *conf, zone_t *zone)
+{
+ assert(conf);
+ assert(zone);
+
+ time_t now = time(NULL);
+
+ time_t refresh = TIME_CANCEL;
+ if (zone_is_slave(conf, zone)) {
+ refresh = zone->timers.next_refresh;
+ if (zone->contents == NULL && zone->timers.last_refresh_ok) { // zone disappeared w/o expiry
+ refresh = now;
+ }
+ assert(refresh > 0);
+ }
+
+ time_t expire_pre = TIME_IGNORE;
+ time_t expire = TIME_IGNORE;
+ if (zone_is_slave(conf, zone) && zone->contents != NULL) {
+ expire_pre = TIME_CANCEL;
+ expire = zone->timers.next_expire;
+ }
+
+ time_t flush = TIME_IGNORE;
+ if (!zone_is_slave(conf, zone) || zone->contents != NULL) {
+ conf_val_t val = conf_zone_get(conf, C_ZONEFILE_SYNC, zone->name);
+ int64_t sync_timeout = conf_int(&val);
+ if (sync_timeout > 0) {
+ flush = zone->timers.last_flush + sync_timeout;
+ }
+ }
+
+ time_t resalt = TIME_IGNORE;
+ time_t ds_check = TIME_CANCEL;
+ time_t ds_push = TIME_CANCEL;
+ conf_val_t val = conf_zone_get(conf, C_DNSSEC_SIGNING, zone->name);
+ if (conf_bool(&val)) {
+ conf_val_t policy = conf_zone_get(conf, C_DNSSEC_POLICY, zone->name);
+ conf_id_fix_default(&policy);
+ val = conf_id_get(conf, C_POLICY, C_NSEC3, &policy);
+ if (conf_bool(&val)) {
+ knot_time_t last_resalt = 0;
+ if (knot_lmdb_open(zone_kaspdb(zone)) == KNOT_EOK) {
+ (void)kasp_db_load_nsec3salt(zone_kaspdb(zone), zone->name, NULL, &last_resalt);
+ }
+ if (last_resalt == 0) {
+ resalt = now;
+ } else {
+ val = conf_id_get(conf, C_POLICY, C_NSEC3_SALT_LIFETIME, &policy);
+ if (conf_int(&val) > 0) {
+ resalt = last_resalt + conf_int(&val);
+ }
+ }
+ }
+
+ ds_check = zone->timers.next_ds_check;
+ if (ds_check == 0) {
+ ds_check = TIME_IGNORE;
+ }
+ ds_push = zone->timers.next_ds_push;
+ if (ds_push == 0) {
+ ds_push = TIME_IGNORE;
+ }
+ }
+
+ zone_events_schedule_at(zone,
+ ZONE_EVENT_REFRESH, refresh,
+ ZONE_EVENT_EXPIRE, expire_pre,
+ ZONE_EVENT_EXPIRE, expire,
+ ZONE_EVENT_FLUSH, flush,
+ ZONE_EVENT_DNSSEC, resalt,
+ ZONE_EVENT_DS_CHECK, ds_check,
+ ZONE_EVENT_DS_PUSH, ds_push);
+}
+
+void replan_load_new(zone_t *zone, bool gen_catalog)
+{
+ if (gen_catalog) {
+ /* Catalog generation must wait until the zonedb
+ * is fully created. */
+ zone_events_schedule_now(zone, ZONE_EVENT_LOAD);
+ } else {
+ /* Enqueue directly, make first load waitable,
+ * other events will cascade from load. */
+ zone_events_enqueue(zone, ZONE_EVENT_LOAD);
+ }
+}
+
+void replan_load_bootstrap(conf_t *conf, zone_t *zone)
+{
+ replan_from_timers(conf, zone);
+}
+
+void replan_load_current(conf_t *conf, zone_t *zone, zone_t *old_zone)
+{
+ replan_from_zone(zone, old_zone);
+
+ if (zone->contents != NULL || zone_expired(zone)) {
+ replan_from_timers(conf, zone);
+ replan_dnssec(conf, zone);
+ } else {
+ zone_events_schedule_now(zone, ZONE_EVENT_LOAD);
+ }
+}
+
+void replan_load_updated(zone_t *zone, zone_t *old_zone)
+{
+ zone_notifailed_clear(zone);
+ replan_from_zone(zone, old_zone);
+
+ // other events will cascade from load
+ zone_events_schedule_now(zone, ZONE_EVENT_LOAD);
+}
diff --git a/src/knot/events/replan.h b/src/knot/events/replan.h
new file mode 100644
index 0000000..62ebeb2
--- /dev/null
+++ b/src/knot/events/replan.h
@@ -0,0 +1,35 @@
+/* Copyright (C) 2023 CZ.NIC, z.s.p.o. <knot-dns@labs.nic.cz>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#pragma once
+
+#include "knot/conf/conf.h"
+#include "knot/zone/zone.h"
+
+/*!
+ * \brief Replan timer dependent refresh, expire, and flush.
+ */
+void replan_from_timers(conf_t *conf, zone_t *zone);
+
+/*!
+ * \defgroup replan_load Replan timers after zone load or reload.
+ * @{
+ */
+void replan_load_new(zone_t *zone, bool gen_catalog);
+void replan_load_bootstrap(conf_t *conf, zone_t *zone);
+void replan_load_current(conf_t *conf, zone_t *zone, zone_t *old_zone);
+void replan_load_updated(zone_t *zone, zone_t *old_zone);
+/*! @} */