summaryrefslogtreecommitdiffstats
path: root/mgmtd
diff options
context:
space:
mode:
Diffstat (limited to 'mgmtd')
-rw-r--r--mgmtd/.gitignore1
-rw-r--r--mgmtd/Makefile10
-rw-r--r--mgmtd/mgmt.c77
-rw-r--r--mgmtd/mgmt.h110
-rw-r--r--mgmtd/mgmt_be_adapter.c935
-rw-r--r--mgmtd/mgmt_be_adapter.h193
-rw-r--r--mgmtd/mgmt_defines.h58
-rw-r--r--mgmtd/mgmt_ds.c546
-rw-r--r--mgmtd/mgmt_ds.h336
-rw-r--r--mgmtd/mgmt_fe_adapter.c1402
-rw-r--r--mgmtd/mgmt_fe_adapter.h152
-rw-r--r--mgmtd/mgmt_history.c379
-rw-r--r--mgmtd/mgmt_history.h97
-rw-r--r--mgmtd/mgmt_main.c287
-rw-r--r--mgmtd/mgmt_memory.c33
-rw-r--r--mgmtd/mgmt_memory.h29
-rw-r--r--mgmtd/mgmt_txn.c2644
-rw-r--r--mgmtd/mgmt_txn.h243
-rw-r--r--mgmtd/mgmt_vty.c507
-rw-r--r--mgmtd/subdir.am68
20 files changed, 8107 insertions, 0 deletions
diff --git a/mgmtd/.gitignore b/mgmtd/.gitignore
new file mode 100644
index 0000000..7ce107e
--- /dev/null
+++ b/mgmtd/.gitignore
@@ -0,0 +1 @@
+mgmtd
diff --git a/mgmtd/Makefile b/mgmtd/Makefile
new file mode 100644
index 0000000..d69ec5f
--- /dev/null
+++ b/mgmtd/Makefile
@@ -0,0 +1,10 @@
+all: ALWAYS
+ @$(MAKE) -s -C .. mgmtd/mgmtd
+%: ALWAYS
+ @$(MAKE) -s -C .. mgmtd/$@
+
+Makefile:
+ #nothing
+ALWAYS:
+.PHONY: ALWAYS makefiles
+.SUFFIXES:
diff --git a/mgmtd/mgmt.c b/mgmtd/mgmt.c
new file mode 100644
index 0000000..77c4473
--- /dev/null
+++ b/mgmtd/mgmt.c
@@ -0,0 +1,77 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * FRR Management Daemon (MGMTD) program
+ *
+ * Copyright (C) 2021 Vmware, Inc.
+ * Pushpasis Sarkar
+ */
+
+#include <zebra.h>
+#include "debug.h"
+#include "mgmtd/mgmt.h"
+#include "mgmtd/mgmt_be_adapter.h"
+#include "mgmtd/mgmt_ds.h"
+#include "mgmtd/mgmt_fe_adapter.h"
+#include "mgmtd/mgmt_history.h"
+#include "mgmtd/mgmt_memory.h"
+
+struct debug mgmt_debug_be = {.desc = "Management backend adapater"};
+struct debug mgmt_debug_ds = {.desc = "Management datastore"};
+struct debug mgmt_debug_fe = {.desc = "Management frontend adapater"};
+struct debug mgmt_debug_txn = {.desc = "Management transaction"};
+
+/* MGMTD process wide configuration. */
+static struct mgmt_master mgmt_master;
+
+/* MGMTD process wide configuration pointer to export. */
+struct mgmt_master *mm;
+
+void mgmt_master_init(struct event_loop *master, const int buffer_size)
+{
+ memset(&mgmt_master, 0, sizeof(struct mgmt_master));
+
+ mm = &mgmt_master;
+ mm->master = master;
+ mm->terminating = false;
+ mm->socket_buffer = buffer_size;
+ mm->perf_stats_en = true;
+}
+
+void mgmt_init(void)
+{
+
+ /* Initialize datastores */
+ mgmt_ds_init(mm);
+
+ /* Initialize history */
+ mgmt_history_init();
+
+ /* Initialize MGMTD Transaction module */
+ mgmt_txn_init(mm, mm->master);
+
+ /* Initialize the MGMTD Frontend Adapter Module */
+ mgmt_fe_adapter_init(mm->master);
+
+ /* Initialize the CLI frontend client */
+ vty_init_mgmt_fe();
+
+ /* MGMTD VTY commands installation. */
+ mgmt_vty_init();
+
+ /*
+ * Initialize the MGMTD Backend Adapter Module
+ *
+ * We do this after the FE stuff so that we always read our config file
+ * prior to any BE connection.
+ */
+ mgmt_be_adapter_init(mm->master);
+}
+
+void mgmt_terminate(void)
+{
+ mgmt_fe_adapter_destroy();
+ mgmt_be_adapter_destroy();
+ mgmt_txn_destroy();
+ mgmt_history_destroy();
+ mgmt_ds_destroy();
+}
diff --git a/mgmtd/mgmt.h b/mgmtd/mgmt.h
new file mode 100644
index 0000000..d89d76f
--- /dev/null
+++ b/mgmtd/mgmt.h
@@ -0,0 +1,110 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MGMTD message definition header.
+ *
+ * Copyright (C) 2021 Vmware, Inc.
+ * Pushpasis Sarkar <spushpasis@vmware.com>
+ */
+
+#ifndef _FRR_MGMTD_H
+#define _FRR_MGMTD_H
+
+#include "debug.h"
+#include "vrf.h"
+#include "defaults.h"
+#include "stream.h"
+
+#include "mgmtd/mgmt_memory.h"
+#include "mgmtd/mgmt_defines.h"
+#include "mgmtd/mgmt_history.h"
+#include "mgmtd/mgmt_txn.h"
+#include "mgmtd/mgmt_ds.h"
+
+#define MGMTD_VTY_PORT 2623
+#define MGMTD_SOCKET_BUF_SIZE 65535
+#define MGMTD_MAX_COMMIT_LIST 10
+
+extern struct debug mgmt_debug_be;
+extern struct debug mgmt_debug_ds;
+extern struct debug mgmt_debug_fe;
+extern struct debug mgmt_debug_txn;
+
+#define MGMT_DEBUG_BE_CHECK() DEBUG_MODE_CHECK(&mgmt_debug_be, DEBUG_MODE_ALL)
+#define MGMT_DEBUG_DS_CHECK() DEBUG_MODE_CHECK(&mgmt_debug_ds, DEBUG_MODE_ALL)
+#define MGMT_DEBUG_FE_CHECK() DEBUG_MODE_CHECK(&mgmt_debug_fe, DEBUG_MODE_ALL)
+#define MGMT_DEBUG_TXN_CHECK() DEBUG_MODE_CHECK(&mgmt_debug_tx, DEBUG_MODE_ALL)
+
+struct mgmt_txn_ctx;
+
+/*
+ * MGMTD master for system wide configurations and variables.
+ */
+struct mgmt_master {
+ struct event_loop *master;
+
+ /* How big should we set the socket buffer size */
+ uint32_t socket_buffer;
+
+ /* The single instance of config transaction allowed at any time */
+ struct mgmt_txns_head txn_list;
+
+ /* Map of Transactions and its ID */
+ struct hash *txn_hash;
+ uint64_t next_txn_id;
+
+ /* The single instance of config transaction allowed at any time */
+ struct mgmt_txn_ctx *cfg_txn;
+
+ /* Datastores */
+ struct mgmt_ds_ctx *running_ds;
+ struct mgmt_ds_ctx *candidate_ds;
+ struct mgmt_ds_ctx *oper_ds;
+
+ bool terminating; /* global flag that sigint terminate seen */
+ bool perf_stats_en; /* to enable performance stats measurement */
+
+ /* List of commit infos */
+ struct mgmt_cmt_infos_head cmts; /* List of last 10 commits executed. */
+};
+
+extern struct mgmt_master *mm;
+
+/* Inline functions */
+static inline unsigned long timeval_elapsed(struct timeval a, struct timeval b)
+{
+ return (((a.tv_sec - b.tv_sec) * TIMER_SECOND_MICRO)
+ + (a.tv_usec - b.tv_usec));
+}
+
+/*
+ * Remove trailing separator from a string.
+ *
+ * str
+ * A null terminated string.
+ *
+ * sep
+ * Trailing character that needs to be removed.
+ */
+static inline void mgmt_remove_trailing_separator(char *str, char sep)
+{
+ size_t len;
+
+ len = strlen(str);
+ if (len && str[len - 1] == sep)
+ str[len - 1] = '\0';
+}
+
+/* Prototypes. */
+extern void mgmt_terminate(void);
+extern void mgmt_reset(void);
+extern time_t mgmt_clock(void);
+
+extern int mgmt_config_write(struct vty *vty);
+extern struct vty *mgmt_vty_read_config(const char *config_file,
+ char *config_default_dir);
+extern void mgmt_master_init(struct event_loop *master, const int buffer_size);
+
+extern void mgmt_init(void);
+extern void mgmt_vty_init(void);
+
+#endif /* _FRR_MGMTD_H */
diff --git a/mgmtd/mgmt_be_adapter.c b/mgmtd/mgmt_be_adapter.c
new file mode 100644
index 0000000..399fdaf
--- /dev/null
+++ b/mgmtd/mgmt_be_adapter.c
@@ -0,0 +1,935 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MGMTD Backend Client Connection Adapter
+ *
+ * Copyright (C) 2021 Vmware, Inc.
+ * Pushpasis Sarkar <spushpasis@vmware.com>
+ * Copyright (c) 2023, LabN Consulting, L.L.C.
+ */
+
+#include <zebra.h>
+#include "darr.h"
+#include "frrevent.h"
+#include "sockopt.h"
+#include "network.h"
+#include "libfrr.h"
+#include "mgmt_msg.h"
+#include "mgmt_pb.h"
+#include "mgmtd/mgmt.h"
+#include "mgmtd/mgmt_memory.h"
+#include "mgmt_be_client.h"
+#include "mgmtd/mgmt_be_adapter.h"
+
+#define MGMTD_BE_ADAPTER_DBG(fmt, ...) \
+ DEBUGD(&mgmt_debug_be, "BE-ADAPTER: %s: " fmt, __func__, ##__VA_ARGS__)
+#define MGMTD_BE_ADAPTER_ERR(fmt, ...) \
+ zlog_err("BE-ADAPTER: %s: ERROR: " fmt, __func__, ##__VA_ARGS__)
+
+#define FOREACH_ADAPTER_IN_LIST(adapter) \
+ frr_each_safe (mgmt_be_adapters, &mgmt_be_adapters, (adapter))
+
+/*
+ * Mapping of YANG XPath regular expressions to
+ * their corresponding backend clients.
+ */
+struct mgmt_be_xpath_map {
+ char *xpath_regexp;
+ uint subscr_info[MGMTD_BE_CLIENT_ID_MAX];
+};
+
+struct mgmt_be_client_xpath {
+ const char *xpath;
+ uint subscribed;
+};
+
+struct mgmt_be_client_xpath_map {
+ struct mgmt_be_client_xpath *xpaths;
+ uint nxpaths;
+};
+
+struct mgmt_be_get_adapter_config_params {
+ struct mgmt_be_client_adapter *adapter;
+ struct nb_config_cbs *cfg_chgs;
+ uint32_t seq;
+};
+
+/*
+ * Each client gets their own map, but also union all the strings into the
+ * above map as well.
+ */
+#if HAVE_STATICD
+static struct mgmt_be_client_xpath staticd_xpaths[] = {
+ {
+ .xpath = "/frr-vrf:lib/*",
+ .subscribed = MGMT_SUBSCR_VALIDATE_CFG | MGMT_SUBSCR_NOTIFY_CFG,
+ },
+ {
+ .xpath = "/frr-interface:lib/*",
+ .subscribed = MGMT_SUBSCR_VALIDATE_CFG | MGMT_SUBSCR_NOTIFY_CFG,
+ },
+ {
+ .xpath =
+ "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-staticd:staticd/*",
+ .subscribed = MGMT_SUBSCR_VALIDATE_CFG | MGMT_SUBSCR_NOTIFY_CFG,
+ },
+};
+#endif
+
+static struct mgmt_be_client_xpath_map
+ mgmt_client_xpaths[MGMTD_BE_CLIENT_ID_MAX] = {
+#ifdef HAVE_STATICD
+ [MGMTD_BE_CLIENT_ID_STATICD] = {staticd_xpaths,
+ array_size(staticd_xpaths)},
+#endif
+};
+
+/*
+ * We would like to have a better ADT than one with O(n) comparisons
+ *
+ * Perhaps it's possible to sort this array in a way that allows binary search
+ * to find the start, then walk until no possible match can follow? Intuition
+ * says this probably involves exact match/no-match on a stem in the map array
+ * or something like that.
+ */
+static struct mgmt_be_xpath_map *mgmt_xpath_map;
+
+static struct event_loop *mgmt_loop;
+static struct msg_server mgmt_be_server = {.fd = -1};
+
+static struct mgmt_be_adapters_head mgmt_be_adapters;
+
+static struct mgmt_be_client_adapter
+ *mgmt_be_adapters_by_id[MGMTD_BE_CLIENT_ID_MAX];
+
+/* Forward declarations */
+static void
+mgmt_be_adapter_sched_init_event(struct mgmt_be_client_adapter *adapter);
+
+static uint mgmt_be_get_subscr_for_xpath_and_client(
+ const char *xpath, enum mgmt_be_client_id client_id, uint subscr_mask);
+
+static struct mgmt_be_client_adapter *
+mgmt_be_find_adapter_by_fd(int conn_fd)
+{
+ struct mgmt_be_client_adapter *adapter;
+
+ FOREACH_ADAPTER_IN_LIST (adapter) {
+ if (adapter->conn->fd == conn_fd)
+ return adapter;
+ }
+
+ return NULL;
+}
+
+static struct mgmt_be_client_adapter *
+mgmt_be_find_adapter_by_name(const char *name)
+{
+ struct mgmt_be_client_adapter *adapter;
+
+ FOREACH_ADAPTER_IN_LIST (adapter) {
+ if (!strncmp(adapter->name, name, sizeof(adapter->name)))
+ return adapter;
+ }
+
+ return NULL;
+}
+
+static void mgmt_register_client_xpath(enum mgmt_be_client_id id,
+ const char *xpath, uint subscribed)
+{
+ struct mgmt_be_xpath_map *map;
+
+ darr_foreach_p (mgmt_xpath_map, map)
+ if (!strcmp(xpath, map->xpath_regexp)) {
+ map->subscr_info[id] = subscribed;
+ return;
+ }
+ /* we didn't find a matching entry */
+ map = darr_append(mgmt_xpath_map);
+ map->xpath_regexp = XSTRDUP(MTYPE_MGMTD_XPATH, xpath);
+ map->subscr_info[id] = subscribed;
+}
+
+/*
+ * Load the initial mapping from static init map
+ */
+static void mgmt_be_xpath_map_init(void)
+{
+ struct mgmt_be_client_xpath *init, *end;
+ enum mgmt_be_client_id id;
+
+ MGMTD_BE_ADAPTER_DBG("Init XPath Maps");
+
+ FOREACH_MGMTD_BE_CLIENT_ID (id) {
+ init = mgmt_client_xpaths[id].xpaths;
+ end = init + mgmt_client_xpaths[id].nxpaths;
+ for (; init < end; init++) {
+ MGMTD_BE_ADAPTER_DBG(" - XPATH: '%s'", init->xpath);
+ mgmt_register_client_xpath(id, init->xpath,
+ init->subscribed);
+ }
+ }
+
+ MGMTD_BE_ADAPTER_DBG("Total XPath Maps: %u", darr_len(mgmt_xpath_map));
+}
+
+static void mgmt_be_xpath_map_cleanup(void)
+{
+ struct mgmt_be_xpath_map *map;
+
+ darr_foreach_p (mgmt_xpath_map, map)
+ XFREE(MTYPE_MGMTD_XPATH, map->xpath_regexp);
+ darr_free(mgmt_xpath_map);
+}
+
+static int mgmt_be_eval_regexp_match(const char *xpath_regexp,
+ const char *xpath)
+{
+ int match_len = 0, re_indx = 0, xp_indx = 0;
+ int rexp_len, xpath_len;
+ bool match = true, re_wild = false, xp_wild = false;
+ bool delim = false, enter_wild_match = false;
+ char wild_delim = 0;
+
+ rexp_len = strlen(xpath_regexp);
+ xpath_len = strlen(xpath);
+
+ /*
+ * Remove the trailing wildcard from the regexp and Xpath.
+ */
+ if (rexp_len && xpath_regexp[rexp_len-1] == '*')
+ rexp_len--;
+ if (xpath_len && xpath[xpath_len-1] == '*')
+ xpath_len--;
+
+ if (!rexp_len || !xpath_len)
+ return 0;
+
+ for (re_indx = 0, xp_indx = 0;
+ match && re_indx < rexp_len && xp_indx < xpath_len;) {
+ match = (xpath_regexp[re_indx] == xpath[xp_indx]);
+
+ /*
+ * Check if we need to enter wildcard matching.
+ */
+ if (!enter_wild_match && !match &&
+ (xpath_regexp[re_indx] == '*'
+ || xpath[xp_indx] == '*')) {
+ /*
+ * Found wildcard
+ */
+ enter_wild_match =
+ (xpath_regexp[re_indx-1] == '/'
+ || xpath_regexp[re_indx-1] == '\''
+ || xpath[xp_indx-1] == '/'
+ || xpath[xp_indx-1] == '\'');
+ if (enter_wild_match) {
+ if (xpath_regexp[re_indx] == '*') {
+ /*
+ * Begin RE wildcard match.
+ */
+ re_wild = true;
+ wild_delim = xpath_regexp[re_indx-1];
+ } else if (xpath[xp_indx] == '*') {
+ /*
+ * Begin XP wildcard match.
+ */
+ xp_wild = true;
+ wild_delim = xpath[xp_indx-1];
+ }
+ }
+ }
+
+ /*
+ * Check if we need to exit wildcard matching.
+ */
+ if (enter_wild_match) {
+ if (re_wild && xpath[xp_indx] == wild_delim) {
+ /*
+ * End RE wildcard matching.
+ */
+ re_wild = false;
+ if (re_indx < rexp_len-1)
+ re_indx++;
+ enter_wild_match = false;
+ } else if (xp_wild
+ && xpath_regexp[re_indx] == wild_delim) {
+ /*
+ * End XP wildcard matching.
+ */
+ xp_wild = false;
+ if (xp_indx < xpath_len-1)
+ xp_indx++;
+ enter_wild_match = false;
+ }
+ }
+
+ match = (xp_wild || re_wild
+ || xpath_regexp[re_indx] == xpath[xp_indx]);
+
+ /*
+ * Check if we found a delimiter in both the Xpaths
+ */
+ if ((xpath_regexp[re_indx] == '/'
+ && xpath[xp_indx] == '/')
+ || (xpath_regexp[re_indx] == ']'
+ && xpath[xp_indx] == ']')
+ || (xpath_regexp[re_indx] == '['
+ && xpath[xp_indx] == '[')) {
+ /*
+ * Increment the match count if we have a
+ * new delimiter.
+ */
+ if (match && re_indx && xp_indx && !delim)
+ match_len++;
+ delim = true;
+ } else {
+ delim = false;
+ }
+
+ /*
+ * Proceed to the next character in the RE/XP string as
+ * necessary.
+ */
+ if (!re_wild)
+ re_indx++;
+ if (!xp_wild)
+ xp_indx++;
+ }
+
+ /*
+ * If we finished matching and the last token was a full match
+ * increment the match count appropriately.
+ */
+ if (match && !delim &&
+ (xpath_regexp[re_indx] == '/'
+ || xpath_regexp[re_indx] == ']'))
+ match_len++;
+
+ return match_len;
+}
+
+static void mgmt_be_adapter_delete(struct mgmt_be_client_adapter *adapter)
+{
+ MGMTD_BE_ADAPTER_DBG("deleting client adapter '%s'", adapter->name);
+
+ /*
+ * Notify about disconnect for appropriate cleanup
+ */
+ mgmt_txn_notify_be_adapter_conn(adapter, false);
+ if (adapter->id < MGMTD_BE_CLIENT_ID_MAX) {
+ mgmt_be_adapters_by_id[adapter->id] = NULL;
+ adapter->id = MGMTD_BE_CLIENT_ID_MAX;
+ }
+
+ assert(adapter->refcount == 1);
+ mgmt_be_adapter_unlock(&adapter);
+}
+
+static int mgmt_be_adapter_notify_disconnect(struct msg_conn *conn)
+{
+ struct mgmt_be_client_adapter *adapter = conn->user;
+
+ MGMTD_BE_ADAPTER_DBG("notify disconnect for client adapter '%s'",
+ adapter->name);
+
+ mgmt_be_adapter_delete(adapter);
+
+ return 0;
+}
+
+static void
+mgmt_be_adapter_cleanup_old_conn(struct mgmt_be_client_adapter *adapter)
+{
+ struct mgmt_be_client_adapter *old;
+
+ FOREACH_ADAPTER_IN_LIST (old) {
+ if (old != adapter &&
+ !strncmp(adapter->name, old->name, sizeof(adapter->name))) {
+ /*
+ * We have a Zombie lingering around
+ */
+ MGMTD_BE_ADAPTER_DBG(
+ "Client '%s' (FD:%d) seems to have reconnected. Removing old connection (FD:%d)!",
+ adapter->name, adapter->conn->fd,
+ old->conn->fd);
+ /* this will/should delete old */
+ msg_conn_disconnect(old->conn, false);
+ }
+ }
+}
+
+
+static int mgmt_be_adapter_send_msg(struct mgmt_be_client_adapter *adapter,
+ Mgmtd__BeMessage *be_msg)
+{
+ return msg_conn_send_msg(
+ adapter->conn, MGMT_MSG_VERSION_PROTOBUF, be_msg,
+ mgmtd__be_message__get_packed_size(be_msg),
+ (size_t(*)(void *, void *))mgmtd__be_message__pack, false);
+}
+
+static int mgmt_be_send_subscr_reply(struct mgmt_be_client_adapter *adapter,
+ bool success)
+{
+ Mgmtd__BeMessage be_msg;
+ Mgmtd__BeSubscribeReply reply;
+
+ mgmtd__be_subscribe_reply__init(&reply);
+ reply.success = success;
+
+ mgmtd__be_message__init(&be_msg);
+ be_msg.message_case = MGMTD__BE_MESSAGE__MESSAGE_SUBSCR_REPLY;
+ be_msg.subscr_reply = &reply;
+
+ MGMTD_FE_CLIENT_DBG("Sending SUBSCR_REPLY client: %s sucess: %u",
+ adapter->name, success);
+
+ return mgmt_be_adapter_send_msg(adapter, &be_msg);
+}
+
+static int
+mgmt_be_adapter_handle_msg(struct mgmt_be_client_adapter *adapter,
+ Mgmtd__BeMessage *be_msg)
+{
+ /*
+ * protobuf-c adds a max size enum with an internal, and changing by
+ * version, name; cast to an int to avoid unhandled enum warnings
+ */
+ switch ((int)be_msg->message_case) {
+ case MGMTD__BE_MESSAGE__MESSAGE_SUBSCR_REQ:
+ MGMTD_BE_ADAPTER_DBG(
+ "Got SUBSCR_REQ from '%s' to %sregister %zu xpaths",
+ be_msg->subscr_req->client_name,
+ !be_msg->subscr_req->subscribe_xpaths &&
+ be_msg->subscr_req->n_xpath_reg
+ ? "de"
+ : "",
+ be_msg->subscr_req->n_xpath_reg);
+
+ if (strlen(be_msg->subscr_req->client_name)) {
+ strlcpy(adapter->name, be_msg->subscr_req->client_name,
+ sizeof(adapter->name));
+ adapter->id = mgmt_be_client_name2id(adapter->name);
+ if (adapter->id >= MGMTD_BE_CLIENT_ID_MAX) {
+ MGMTD_BE_ADAPTER_ERR(
+ "Unable to resolve adapter '%s' to a valid ID. Disconnecting!",
+ adapter->name);
+ /* this will/should delete old */
+ msg_conn_disconnect(adapter->conn, false);
+ zlog_err("XXX different from original code");
+ break;
+ }
+ mgmt_be_adapters_by_id[adapter->id] = adapter;
+ mgmt_be_adapter_cleanup_old_conn(adapter);
+
+ /* schedule INIT sequence now that it is registered */
+ mgmt_be_adapter_sched_init_event(adapter);
+ }
+
+ if (be_msg->subscr_req->n_xpath_reg)
+ /* we aren't handling dynamic xpaths yet */
+ mgmt_be_send_subscr_reply(adapter, false);
+ else
+ mgmt_be_send_subscr_reply(adapter, true);
+ break;
+ case MGMTD__BE_MESSAGE__MESSAGE_TXN_REPLY:
+ MGMTD_BE_ADAPTER_DBG(
+ "Got %s TXN_REPLY from '%s' txn-id %" PRIx64
+ " with '%s'",
+ be_msg->txn_reply->create ? "Create" : "Delete",
+ adapter->name, be_msg->txn_reply->txn_id,
+ be_msg->txn_reply->success ? "success" : "failure");
+ /*
+ * Forward the TXN_REPLY to txn module.
+ */
+ mgmt_txn_notify_be_txn_reply(
+ be_msg->txn_reply->txn_id,
+ be_msg->txn_reply->create,
+ be_msg->txn_reply->success, adapter);
+ break;
+ case MGMTD__BE_MESSAGE__MESSAGE_CFG_DATA_REPLY:
+ MGMTD_BE_ADAPTER_DBG(
+ "Got CFGDATA_REPLY from '%s' txn-id %" PRIx64
+ " batch-id %" PRIu64 " err:'%s'",
+ adapter->name, be_msg->cfg_data_reply->txn_id,
+ be_msg->cfg_data_reply->batch_id,
+ be_msg->cfg_data_reply->error_if_any
+ ? be_msg->cfg_data_reply->error_if_any
+ : "None");
+ /*
+ * Forward the CGFData-create reply to txn module.
+ */
+ mgmt_txn_notify_be_cfgdata_reply(
+ be_msg->cfg_data_reply->txn_id,
+ be_msg->cfg_data_reply->batch_id,
+ be_msg->cfg_data_reply->success,
+ be_msg->cfg_data_reply->error_if_any, adapter);
+ break;
+ case MGMTD__BE_MESSAGE__MESSAGE_CFG_APPLY_REPLY:
+ MGMTD_BE_ADAPTER_DBG(
+ "Got %s CFG_APPLY_REPLY from '%s' txn-id %" PRIx64
+ " for %zu batches id %" PRIu64 "-%" PRIu64 " err:'%s'",
+ be_msg->cfg_apply_reply->success ? "successful"
+ : "failed",
+ adapter->name, be_msg->cfg_apply_reply->txn_id,
+ be_msg->cfg_apply_reply->n_batch_ids,
+ be_msg->cfg_apply_reply->batch_ids[0],
+ be_msg->cfg_apply_reply->batch_ids
+ [be_msg->cfg_apply_reply->n_batch_ids - 1],
+ be_msg->cfg_apply_reply->error_if_any
+ ? be_msg->cfg_apply_reply->error_if_any
+ : "None");
+ /*
+ * Forward the CGFData-apply reply to txn module.
+ */
+ mgmt_txn_notify_be_cfg_apply_reply(
+ be_msg->cfg_apply_reply->txn_id,
+ be_msg->cfg_apply_reply->success,
+ (uint64_t *)be_msg->cfg_apply_reply->batch_ids,
+ be_msg->cfg_apply_reply->n_batch_ids,
+ be_msg->cfg_apply_reply->error_if_any, adapter);
+ break;
+ case MGMTD__BE_MESSAGE__MESSAGE_GET_REPLY:
+ /*
+ * TODO: Add handling code in future.
+ */
+ break;
+ /*
+ * NOTE: The following messages are always sent from MGMTD to
+ * Backend clients only and/or need not be handled on MGMTd.
+ */
+ case MGMTD__BE_MESSAGE__MESSAGE_SUBSCR_REPLY:
+ case MGMTD__BE_MESSAGE__MESSAGE_GET_REQ:
+ case MGMTD__BE_MESSAGE__MESSAGE_TXN_REQ:
+ case MGMTD__BE_MESSAGE__MESSAGE_CFG_DATA_REQ:
+ case MGMTD__BE_MESSAGE__MESSAGE_CFG_APPLY_REQ:
+ case MGMTD__BE_MESSAGE__MESSAGE__NOT_SET:
+ default:
+ /*
+ * A 'default' case is being added contrary to the
+ * FRR code guidelines to take care of build
+ * failures on certain build systems (courtesy of
+ * the proto-c package).
+ */
+ break;
+ }
+
+ return 0;
+}
+
+int mgmt_be_send_txn_req(struct mgmt_be_client_adapter *adapter,
+ uint64_t txn_id, bool create)
+{
+ Mgmtd__BeMessage be_msg;
+ Mgmtd__BeTxnReq txn_req;
+
+ mgmtd__be_txn_req__init(&txn_req);
+ txn_req.create = create;
+ txn_req.txn_id = txn_id;
+
+ mgmtd__be_message__init(&be_msg);
+ be_msg.message_case = MGMTD__BE_MESSAGE__MESSAGE_TXN_REQ;
+ be_msg.txn_req = &txn_req;
+
+ MGMTD_BE_ADAPTER_DBG("Sending TXN_REQ to '%s' txn-id: %" PRIu64,
+ adapter->name, txn_id);
+
+ return mgmt_be_adapter_send_msg(adapter, &be_msg);
+}
+
+int mgmt_be_send_cfgdata_req(struct mgmt_be_client_adapter *adapter,
+ uint64_t txn_id, uint64_t batch_id,
+ Mgmtd__YangCfgDataReq **cfgdata_reqs,
+ size_t num_reqs, bool end_of_data)
+{
+ Mgmtd__BeMessage be_msg;
+ Mgmtd__BeCfgDataCreateReq cfgdata_req;
+
+ mgmtd__be_cfg_data_create_req__init(&cfgdata_req);
+ cfgdata_req.batch_id = batch_id;
+ cfgdata_req.txn_id = txn_id;
+ cfgdata_req.data_req = cfgdata_reqs;
+ cfgdata_req.n_data_req = num_reqs;
+ cfgdata_req.end_of_data = end_of_data;
+
+ mgmtd__be_message__init(&be_msg);
+ be_msg.message_case = MGMTD__BE_MESSAGE__MESSAGE_CFG_DATA_REQ;
+ be_msg.cfg_data_req = &cfgdata_req;
+
+ MGMTD_BE_ADAPTER_DBG(
+ "Sending CFGDATA_CREATE_REQ to '%s' txn-id: %" PRIu64
+ " batch-id: %" PRIu64,
+ adapter->name, txn_id, batch_id);
+
+ return mgmt_be_adapter_send_msg(adapter, &be_msg);
+}
+
+int mgmt_be_send_cfgapply_req(struct mgmt_be_client_adapter *adapter,
+ uint64_t txn_id)
+{
+ Mgmtd__BeMessage be_msg;
+ Mgmtd__BeCfgDataApplyReq apply_req;
+
+ mgmtd__be_cfg_data_apply_req__init(&apply_req);
+ apply_req.txn_id = txn_id;
+
+ mgmtd__be_message__init(&be_msg);
+ be_msg.message_case = MGMTD__BE_MESSAGE__MESSAGE_CFG_APPLY_REQ;
+ be_msg.cfg_apply_req = &apply_req;
+
+ MGMTD_BE_ADAPTER_DBG("Sending CFG_APPLY_REQ to '%s' txn-id: %" PRIu64,
+ adapter->name, txn_id);
+
+ return mgmt_be_adapter_send_msg(adapter, &be_msg);
+}
+
+static void mgmt_be_adapter_process_msg(uint8_t version, uint8_t *data,
+ size_t len, struct msg_conn *conn)
+{
+ struct mgmt_be_client_adapter *adapter = conn->user;
+ Mgmtd__BeMessage *be_msg = mgmtd__be_message__unpack(NULL, len, data);
+
+ if (!be_msg) {
+ MGMTD_BE_ADAPTER_DBG(
+ "Failed to decode %zu bytes for adapter: %s", len,
+ adapter->name);
+ return;
+ }
+ MGMTD_BE_ADAPTER_DBG("Decoded %zu bytes of message: %u for adapter: %s",
+ len, be_msg->message_case, adapter->name);
+ (void)mgmt_be_adapter_handle_msg(adapter, be_msg);
+ mgmtd__be_message__free_unpacked(be_msg, NULL);
+}
+
+static void mgmt_be_iter_and_get_cfg(const char *xpath, struct lyd_node *node,
+ struct nb_node *nb_node, void *ctx)
+{
+ struct mgmt_be_get_adapter_config_params *parms = ctx;
+ struct mgmt_be_client_adapter *adapter = parms->adapter;
+ uint subscr;
+
+ subscr = mgmt_be_get_subscr_for_xpath_and_client(
+ xpath, adapter->id, MGMT_SUBSCR_NOTIFY_CFG);
+ if (subscr)
+ nb_config_diff_created(node, &parms->seq, parms->cfg_chgs);
+}
+
+/*
+ * Initialize a BE client over a new connection
+ */
+static void mgmt_be_adapter_conn_init(struct event *thread)
+{
+ struct mgmt_be_client_adapter *adapter;
+
+ adapter = (struct mgmt_be_client_adapter *)EVENT_ARG(thread);
+ assert(adapter && adapter->conn->fd >= 0);
+
+ /*
+ * Check first if the current session can run a CONFIG
+ * transaction or not. Reschedule if a CONFIG transaction
+ * from another session is already in progress.
+ */
+ if (mgmt_config_txn_in_progress() != MGMTD_SESSION_ID_NONE) {
+ zlog_err("XXX txn in progress, retry init");
+ mgmt_be_adapter_sched_init_event(adapter);
+ return;
+ }
+
+ /*
+ * Notify TXN module to create a CONFIG transaction and
+ * download the CONFIGs identified for this new client.
+ * If the TXN module fails to initiate the CONFIG transaction
+ * disconnect from the client forcing a reconnect later.
+ * That should also take care of destroying the adapter.
+ */
+ if (mgmt_txn_notify_be_adapter_conn(adapter, true) != 0) {
+ zlog_err("XXX notify be adapter conn fail");
+ msg_conn_disconnect(adapter->conn, false);
+ adapter = NULL;
+ }
+}
+
+/*
+ * Schedule the initialization of the BE client connection.
+ */
+static void
+mgmt_be_adapter_sched_init_event(struct mgmt_be_client_adapter *adapter)
+{
+ event_add_timer_msec(mgmt_loop, mgmt_be_adapter_conn_init, adapter,
+ MGMTD_BE_CONN_INIT_DELAY_MSEC,
+ &adapter->conn_init_ev);
+}
+
+void mgmt_be_adapter_lock(struct mgmt_be_client_adapter *adapter)
+{
+ adapter->refcount++;
+}
+
+extern void mgmt_be_adapter_unlock(struct mgmt_be_client_adapter **adapter)
+{
+ struct mgmt_be_client_adapter *a = *adapter;
+ assert(a && a->refcount);
+
+ if (!--a->refcount) {
+ mgmt_be_adapters_del(&mgmt_be_adapters, a);
+ EVENT_OFF(a->conn_init_ev);
+ msg_server_conn_delete(a->conn);
+ XFREE(MTYPE_MGMTD_BE_ADPATER, a);
+ }
+
+ *adapter = NULL;
+}
+
+/*
+ * Initialize the BE adapter module
+ */
+void mgmt_be_adapter_init(struct event_loop *tm)
+{
+ assert(!mgmt_loop);
+ mgmt_loop = tm;
+
+ mgmt_be_adapters_init(&mgmt_be_adapters);
+ mgmt_be_xpath_map_init();
+
+ if (msg_server_init(&mgmt_be_server, MGMTD_BE_SERVER_PATH, tm,
+ mgmt_be_create_adapter, "backend",
+ &mgmt_debug_be)) {
+ zlog_err("cannot initialize backend server");
+ exit(1);
+ }
+}
+
+/*
+ * Destroy the BE adapter module
+ */
+void mgmt_be_adapter_destroy(void)
+{
+ struct mgmt_be_client_adapter *adapter;
+
+ msg_server_cleanup(&mgmt_be_server);
+ FOREACH_ADAPTER_IN_LIST (adapter) {
+ mgmt_be_adapter_delete(adapter);
+ }
+ mgmt_be_xpath_map_cleanup();
+}
+
+/*
+ * The server accepted a new connection
+ */
+struct msg_conn *mgmt_be_create_adapter(int conn_fd, union sockunion *from)
+{
+ struct mgmt_be_client_adapter *adapter = NULL;
+
+ assert(!mgmt_be_find_adapter_by_fd(conn_fd));
+
+ adapter = XCALLOC(MTYPE_MGMTD_BE_ADPATER,
+ sizeof(struct mgmt_be_client_adapter));
+ adapter->id = MGMTD_BE_CLIENT_ID_MAX;
+ snprintf(adapter->name, sizeof(adapter->name), "Unknown-FD-%d",
+ conn_fd);
+
+ mgmt_be_adapter_lock(adapter);
+ mgmt_be_adapters_add_tail(&mgmt_be_adapters, adapter);
+ RB_INIT(nb_config_cbs, &adapter->cfg_chgs);
+
+ adapter->conn = msg_server_conn_create(
+ mgmt_loop, conn_fd, mgmt_be_adapter_notify_disconnect,
+ mgmt_be_adapter_process_msg, MGMTD_BE_MAX_NUM_MSG_PROC,
+ MGMTD_BE_MAX_NUM_MSG_WRITE, MGMTD_BE_MSG_MAX_LEN, adapter,
+ "BE-adapter");
+
+ MGMTD_BE_ADAPTER_DBG("Added new MGMTD Backend adapter '%s'",
+ adapter->name);
+
+ return adapter->conn;
+}
+
+struct mgmt_be_client_adapter *
+mgmt_be_get_adapter_by_id(enum mgmt_be_client_id id)
+{
+ return (id < MGMTD_BE_CLIENT_ID_MAX ? mgmt_be_adapters_by_id[id]
+ : NULL);
+}
+
+struct mgmt_be_client_adapter *
+mgmt_be_get_adapter_by_name(const char *name)
+{
+ return mgmt_be_find_adapter_by_name(name);
+}
+
+int mgmt_be_get_adapter_config(struct mgmt_be_client_adapter *adapter,
+ struct nb_config_cbs **cfg_chgs)
+{
+ struct mgmt_be_get_adapter_config_params parms;
+ struct nb_config *cfg_root = mgmt_ds_get_nb_config(mm->running_ds);
+
+ assert(cfg_chgs);
+
+ /*
+ * TODO: we should consider making this an assertable condition and
+ * guaranteeing it be true when this function is called. B/c what is
+ * going to happen if there are some changes being sent, and we don't
+ * gather a new snapshot, what new changes that came after the previous
+ * snapshot will then be lost?
+ */
+ if (RB_EMPTY(nb_config_cbs, &adapter->cfg_chgs)) {
+ parms.adapter = adapter;
+ parms.cfg_chgs = &adapter->cfg_chgs;
+ parms.seq = 0;
+
+ mgmt_ds_iter_data(MGMTD_DS_RUNNING, cfg_root, "",
+ mgmt_be_iter_and_get_cfg, (void *)&parms);
+ }
+
+ *cfg_chgs = &adapter->cfg_chgs;
+ return 0;
+}
+
+void mgmt_be_get_subscr_info_for_xpath(
+ const char *xpath, struct mgmt_be_client_subscr_info *subscr_info)
+{
+ struct mgmt_be_xpath_map *map;
+ enum mgmt_be_client_id id;
+
+ memset(subscr_info, 0, sizeof(*subscr_info));
+
+ MGMTD_BE_ADAPTER_DBG("XPATH: '%s'", xpath);
+ darr_foreach_p (mgmt_xpath_map, map) {
+ if (!mgmt_be_eval_regexp_match(map->xpath_regexp, xpath))
+ continue;
+ FOREACH_MGMTD_BE_CLIENT_ID (id) {
+ subscr_info->xpath_subscr[id] |= map->subscr_info[id];
+ }
+ }
+
+ if (DEBUG_MODE_CHECK(&mgmt_debug_be, DEBUG_MODE_ALL)) {
+ FOREACH_MGMTD_BE_CLIENT_ID (id) {
+ if (!subscr_info->xpath_subscr[id])
+ continue;
+ MGMTD_BE_ADAPTER_DBG("Cient: %s: subscribed: 0x%x",
+ mgmt_be_client_id2name(id),
+ subscr_info->xpath_subscr[id]);
+ }
+ }
+}
+
+/**
+ * Return the subscription info bits for a given `xpath` for a given
+ * `client_id`.
+ *
+ * Args:
+ * xpath - the xpath to check for subscription information.
+ * client_id - the BE client being checked for.
+ * subscr_mask - The subscr bits the caller is interested in seeing
+ * if set.
+ *
+ * Returns:
+ * The subscription info bits.
+ */
+static uint mgmt_be_get_subscr_for_xpath_and_client(
+ const char *xpath, enum mgmt_be_client_id client_id, uint subscr_mask)
+{
+ struct mgmt_be_client_xpath_map *map;
+ uint subscr = 0;
+ uint i;
+
+ assert(client_id < MGMTD_BE_CLIENT_ID_MAX);
+
+ MGMTD_BE_ADAPTER_DBG("Checking client: %s for xpath: '%s'",
+ mgmt_be_client_id2name(client_id), xpath);
+
+ map = &mgmt_client_xpaths[client_id];
+ for (i = 0; i < map->nxpaths; i++) {
+ if (!mgmt_be_eval_regexp_match(map->xpaths[i].xpath, xpath))
+ continue;
+ MGMTD_BE_ADAPTER_DBG("xpath: %s: matched: %s",
+ map->xpaths[i].xpath, xpath);
+ subscr |= map->xpaths[i].subscribed;
+ if ((subscr & subscr_mask) == subscr_mask)
+ break;
+ }
+ MGMTD_BE_ADAPTER_DBG("client: %s: subscribed: 0x%x",
+ mgmt_be_client_id2name(client_id), subscr);
+ return subscr;
+}
+
+void mgmt_be_adapter_status_write(struct vty *vty)
+{
+ struct mgmt_be_client_adapter *adapter;
+
+ vty_out(vty, "MGMTD Backend Adapters\n");
+
+ FOREACH_ADAPTER_IN_LIST (adapter) {
+ vty_out(vty, " Client: \t\t\t%s\n", adapter->name);
+ vty_out(vty, " Conn-FD: \t\t\t%d\n", adapter->conn->fd);
+ vty_out(vty, " Client-Id: \t\t\t%d\n", adapter->id);
+ vty_out(vty, " Ref-Count: \t\t\t%u\n", adapter->refcount);
+ vty_out(vty, " Msg-Recvd: \t\t\t%" PRIu64 "\n",
+ adapter->conn->mstate.nrxm);
+ vty_out(vty, " Bytes-Recvd: \t\t%" PRIu64 "\n",
+ adapter->conn->mstate.nrxb);
+ vty_out(vty, " Msg-Sent: \t\t\t%" PRIu64 "\n",
+ adapter->conn->mstate.ntxm);
+ vty_out(vty, " Bytes-Sent: \t\t%" PRIu64 "\n",
+ adapter->conn->mstate.ntxb);
+ }
+ vty_out(vty, " Total: %d\n",
+ (int)mgmt_be_adapters_count(&mgmt_be_adapters));
+}
+
+void mgmt_be_xpath_register_write(struct vty *vty)
+{
+ struct mgmt_be_xpath_map *map;
+ enum mgmt_be_client_id id;
+ struct mgmt_be_client_adapter *adapter;
+ uint info;
+
+ vty_out(vty, "MGMTD Backend XPath Registry\n");
+
+ darr_foreach_p (mgmt_xpath_map, map) {
+ vty_out(vty, " - XPATH: '%s'\n", map->xpath_regexp);
+ FOREACH_MGMTD_BE_CLIENT_ID (id) {
+ info = map->subscr_info[id];
+ if (!info)
+ continue;
+ vty_out(vty,
+ " -- Client: '%s'\tValidate:%d, Notify:%d, Own:%d\n",
+ mgmt_be_client_id2name(id),
+ (info & MGMT_SUBSCR_VALIDATE_CFG) != 0,
+ (info & MGMT_SUBSCR_NOTIFY_CFG) != 0,
+ (info & MGMT_SUBSCR_OPER_OWN) != 0);
+ adapter = mgmt_be_get_adapter_by_id(id);
+ if (adapter)
+ vty_out(vty, " -- Adapter: %p\n", adapter);
+ }
+ }
+
+ vty_out(vty, "Total XPath Registries: %u\n", darr_len(mgmt_xpath_map));
+}
+
+void mgmt_be_xpath_subscr_info_write(struct vty *vty, const char *xpath)
+{
+ struct mgmt_be_client_subscr_info subscr;
+ enum mgmt_be_client_id id;
+ struct mgmt_be_client_adapter *adapter;
+ uint info;
+
+ mgmt_be_get_subscr_info_for_xpath(xpath, &subscr);
+
+ vty_out(vty, "XPath: '%s'\n", xpath);
+ FOREACH_MGMTD_BE_CLIENT_ID (id) {
+ info = subscr.xpath_subscr[id];
+ if (!info)
+ continue;
+ vty_out(vty,
+ " -- Client: '%s'\tValidate:%d, Notify:%d, Own:%d\n",
+ mgmt_be_client_id2name(id),
+ (info & MGMT_SUBSCR_VALIDATE_CFG) != 0,
+ (info & MGMT_SUBSCR_NOTIFY_CFG) != 0,
+ (info & MGMT_SUBSCR_OPER_OWN) != 0);
+ adapter = mgmt_be_get_adapter_by_id(id);
+ if (adapter)
+ vty_out(vty, " -- Adapter: %p\n", adapter);
+ }
+}
diff --git a/mgmtd/mgmt_be_adapter.h b/mgmtd/mgmt_be_adapter.h
new file mode 100644
index 0000000..ca8f55c
--- /dev/null
+++ b/mgmtd/mgmt_be_adapter.h
@@ -0,0 +1,193 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MGMTD Backend Client Connection Adapter
+ *
+ * Copyright (C) 2021 Vmware, Inc.
+ * Pushpasis Sarkar <spushpasis@vmware.com>
+ * Copyright (c) 2023, LabN Consulting, L.L.C.
+ */
+
+#ifndef _FRR_MGMTD_BE_ADAPTER_H_
+#define _FRR_MGMTD_BE_ADAPTER_H_
+
+#include "mgmt_be_client.h"
+#include "mgmt_msg.h"
+#include "mgmtd/mgmt_defines.h"
+#include "mgmtd/mgmt_ds.h"
+
+#define MGMTD_BE_CONN_INIT_DELAY_MSEC 50
+
+#define MGMTD_FIND_ADAPTER_BY_INDEX(adapter_index) \
+ mgmt_adaptr_ref[adapter_index]
+
+enum mgmt_be_req_type {
+ MGMTD_BE_REQ_NONE = 0,
+ MGMTD_BE_REQ_CFG_VALIDATE,
+ MGMTD_BE_REQ_CFG_APPLY,
+ MGMTD_BE_REQ_DATA_GET_ELEM,
+ MGMTD_BE_REQ_DATA_GET_NEXT
+};
+
+struct mgmt_be_cfgreq {
+ Mgmtd__YangCfgDataReq **cfgdata_reqs;
+ size_t num_reqs;
+};
+
+struct mgmt_be_datareq {
+ Mgmtd__YangGetDataReq **getdata_reqs;
+ size_t num_reqs;
+};
+
+PREDECL_LIST(mgmt_be_adapters);
+PREDECL_LIST(mgmt_txn_badapters);
+
+struct mgmt_be_client_adapter {
+ struct msg_conn *conn;
+
+ struct event *conn_init_ev;
+
+ enum mgmt_be_client_id id;
+ uint32_t flags;
+ char name[MGMTD_CLIENT_NAME_MAX_LEN];
+ uint8_t num_xpath_reg;
+ char xpath_reg[MGMTD_MAX_NUM_XPATH_REG][MGMTD_MAX_XPATH_LEN];
+
+ int refcount;
+
+ /*
+ * List of config items that should be sent to the
+ * backend during re/connect. This is temporarily
+ * created and then freed-up as soon as the initial
+ * config items has been applied onto the backend.
+ */
+ struct nb_config_cbs cfg_chgs;
+
+ struct mgmt_be_adapters_item list_linkage;
+};
+
+#define MGMTD_BE_ADAPTER_FLAGS_CFG_SYNCED (1U << 0)
+
+DECLARE_LIST(mgmt_be_adapters, struct mgmt_be_client_adapter, list_linkage);
+
+/*
+ * MGMT_SUBSCR_xxx - flags for subscription types for xpaths registrations
+ *
+ * MGMT_SUBSCR_VALIDATE_CFG :: the client should be asked to validate config
+ * MGMT_SUBSCR_NOTIFY_CFG :: the client should be notified of config changes
+ * MGMT_SUBSCR_OPER_OWN :: the client owns the given oeprational state
+ */
+#define MGMT_SUBSCR_VALIDATE_CFG 0x1
+#define MGMT_SUBSCR_NOTIFY_CFG 0x2
+#define MGMT_SUBSCR_OPER_OWN 0x4
+#define MGMT_SUBSCR_ALL 0x7
+
+struct mgmt_be_client_subscr_info {
+ uint xpath_subscr[MGMTD_BE_CLIENT_ID_MAX];
+};
+
+/* Initialise backend adapter module. */
+extern void mgmt_be_adapter_init(struct event_loop *tm);
+
+/* Destroy the backend adapter module. */
+extern void mgmt_be_adapter_destroy(void);
+
+/* Acquire lock for backend adapter. */
+extern void mgmt_be_adapter_lock(struct mgmt_be_client_adapter *adapter);
+
+/* Remove lock from backend adapter. */
+extern void mgmt_be_adapter_unlock(struct mgmt_be_client_adapter **adapter);
+
+/* Create backend adapter. */
+extern struct msg_conn *mgmt_be_create_adapter(int conn_fd,
+ union sockunion *su);
+
+/* Fetch backend adapter given an adapter name. */
+extern struct mgmt_be_client_adapter *
+mgmt_be_get_adapter_by_name(const char *name);
+
+/* Fetch backend adapter given an client ID. */
+extern struct mgmt_be_client_adapter *
+mgmt_be_get_adapter_by_id(enum mgmt_be_client_id id);
+
+/* Fetch backend adapter config. */
+extern int mgmt_be_get_adapter_config(struct mgmt_be_client_adapter *adapter,
+ struct nb_config_cbs **cfg_chgs);
+
+/* Create/destroy a transaction. */
+extern int mgmt_be_send_txn_req(struct mgmt_be_client_adapter *adapter,
+ uint64_t txn_id, bool create);
+
+/*
+ * Send config data create request to backend client.
+ *
+ * adaptr
+ * Backend adapter information.
+ *
+ * txn_id
+ * Unique transaction identifier.
+ *
+ * batch_id
+ * Request batch ID.
+ *
+ * cfgdata_reqs
+ * An array of pointer to Mgmtd__YangCfgDataReq.
+ *
+ * num_reqs
+ * Length of the cfgdata_reqs array.
+ *
+ * end_of_data
+ * TRUE if the data from last batch, FALSE otherwise.
+ *
+ * Returns:
+ * 0 on success, -1 on failure.
+ */
+extern int mgmt_be_send_cfgdata_req(struct mgmt_be_client_adapter *adapter,
+ uint64_t txn_id, uint64_t batch_id,
+ Mgmtd__YangCfgDataReq **cfgdata_reqs,
+ size_t num_reqs, bool end_of_data);
+
+/*
+ * Send config apply request to backend client.
+ *
+ * adapter
+ * Backend adapter information.
+ *
+ * txn_id
+ * Unique transaction identifier.
+ *
+ * Returns:
+ * 0 on success, -1 on failure.
+ */
+extern int mgmt_be_send_cfgapply_req(struct mgmt_be_client_adapter *adapter,
+ uint64_t txn_id);
+
+/*
+ * Dump backend adapter status to vty.
+ */
+extern void mgmt_be_adapter_status_write(struct vty *vty);
+
+/*
+ * Dump xpath registry for each backend client to vty.
+ */
+extern void mgmt_be_xpath_register_write(struct vty *vty);
+
+/**
+ * Lookup the clients which are subscribed to a given `xpath`
+ * and the way they are subscribed.
+ *
+ * Args:
+ * xpath - the xpath to check for subscription information.
+ * subscr_info - An array of uint indexed by client id
+ * each eleemnt holds the subscription info
+ * for that client.
+ */
+extern void mgmt_be_get_subscr_info_for_xpath(
+ const char *xpath, struct mgmt_be_client_subscr_info *subscr_info);
+
+/*
+ * Dump backend client information for a given xpath to vty.
+ */
+extern void mgmt_be_xpath_subscr_info_write(struct vty *vty,
+ const char *xpath);
+
+#endif /* _FRR_MGMTD_BE_ADAPTER_H_ */
diff --git a/mgmtd/mgmt_defines.h b/mgmtd/mgmt_defines.h
new file mode 100644
index 0000000..40fa670
--- /dev/null
+++ b/mgmtd/mgmt_defines.h
@@ -0,0 +1,58 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MGMTD public defines.
+ *
+ * Copyright (C) 2021 Vmware, Inc.
+ * Pushpasis Sarkar <spushpasis@vmware.com>
+ */
+
+#ifndef _FRR_MGMTD_DEFINES_H
+#define _FRR_MGMTD_DEFINES_H
+
+#include "yang.h"
+
+#define MGMTD_CLIENT_NAME_MAX_LEN 32
+
+#define MGMTD_MAX_XPATH_LEN XPATH_MAXLEN
+
+#define MGMTD_MAX_YANG_VALUE_LEN YANG_VALUE_MAXLEN
+
+#define MGMTD_MAX_NUM_XPATH_REG 128
+
+#define MGMTD_MAX_NUM_DATA_REQ_IN_BATCH 32
+#define MGMTD_MAX_NUM_DATA_REPLY_IN_BATCH 8
+
+enum mgmt_result {
+ MGMTD_SUCCESS = 0,
+ MGMTD_INVALID_PARAM,
+ MGMTD_INTERNAL_ERROR,
+ MGMTD_NO_CFG_CHANGES,
+ MGMTD_DS_LOCK_FAILED,
+ MGMTD_DS_UNLOCK_FAILED,
+ MGMTD_UNKNOWN_FAILURE
+};
+
+enum mgmt_fe_event {
+ MGMTD_FE_SERVER = 1,
+ MGMTD_FE_CONN_READ,
+ MGMTD_FE_CONN_WRITE,
+ MGMTD_FE_PROC_MSG
+};
+
+enum mgmt_be_event {
+ MGMTD_BE_SERVER = 1,
+ MGMTD_BE_CONN_INIT,
+ MGMTD_BE_CONN_READ,
+ MGMTD_BE_CONN_WRITE,
+ MGMTD_BE_PROC_MSG,
+ MGMTD_BE_SCHED_CFG_PREPARE,
+ MGMTD_BE_RESCHED_CFG_PREPARE,
+ MGMTD_BE_SCHED_CFG_APPLY,
+ MGMTD_BE_RESCHED_CFG_APPLY,
+};
+
+#define MGMTD_TXN_ID_NONE 0
+
+#define MGMTD_TXN_BATCH_ID_NONE 0
+
+#endif /* _FRR_MGMTD_DEFINES_H */
diff --git a/mgmtd/mgmt_ds.c b/mgmtd/mgmt_ds.c
new file mode 100644
index 0000000..a0e610c
--- /dev/null
+++ b/mgmtd/mgmt_ds.c
@@ -0,0 +1,546 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MGMTD Datastores
+ *
+ * Copyright (C) 2021 Vmware, Inc.
+ * Pushpasis Sarkar <spushpasis@vmware.com>
+ */
+
+#include <zebra.h>
+#include "md5.h"
+#include "mgmtd/mgmt.h"
+#include "mgmtd/mgmt_memory.h"
+#include "mgmtd/mgmt_ds.h"
+#include "mgmtd/mgmt_history.h"
+#include "mgmtd/mgmt_txn.h"
+#include "libyang/libyang.h"
+
+#define MGMTD_DS_DBG(fmt, ...) \
+ DEBUGD(&mgmt_debug_ds, "DS: %s: " fmt, __func__, ##__VA_ARGS__)
+#define MGMTD_DS_ERR(fmt, ...) \
+ zlog_err("%s: ERROR: " fmt, __func__, ##__VA_ARGS__)
+
+struct mgmt_ds_ctx {
+ Mgmtd__DatastoreId ds_id;
+
+ bool locked;
+ uint64_t vty_session_id; /* Owner of the lock or 0 */
+
+ bool config_ds;
+
+ union {
+ struct nb_config *cfg_root;
+ struct lyd_node *dnode_root;
+ } root;
+};
+
+const char *mgmt_ds_names[MGMTD_DS_MAX_ID + 1] = {
+ MGMTD_DS_NAME_NONE, /* MGMTD_DS_NONE */
+ MGMTD_DS_NAME_RUNNING, /* MGMTD_DS_RUNNING */
+ MGMTD_DS_NAME_CANDIDATE, /* MGMTD_DS_CANDIDATE */
+ MGMTD_DS_NAME_OPERATIONAL, /* MGMTD_DS_OPERATIONAL */
+ "Unknown/Invalid", /* MGMTD_DS_ID_MAX */
+};
+
+static struct mgmt_master *mgmt_ds_mm;
+static struct mgmt_ds_ctx running, candidate, oper;
+
+/* Dump the data tree of the specified format in the file pointed by the path */
+static int mgmt_ds_dump_in_memory(struct mgmt_ds_ctx *ds_ctx,
+ const char *base_xpath, LYD_FORMAT format,
+ struct ly_out *out)
+{
+ struct lyd_node *root;
+ uint32_t options = 0;
+
+ if (base_xpath[0] == '\0')
+ root = ds_ctx->config_ds ? ds_ctx->root.cfg_root->dnode
+ : ds_ctx->root.dnode_root;
+ else
+ root = yang_dnode_get(ds_ctx->config_ds
+ ? ds_ctx->root.cfg_root->dnode
+ : ds_ctx->root.dnode_root,
+ base_xpath);
+ if (!root)
+ return -1;
+
+ options = ds_ctx->config_ds ? LYD_PRINT_WD_TRIM :
+ LYD_PRINT_WD_EXPLICIT;
+
+ if (base_xpath[0] == '\0')
+ lyd_print_all(out, root, format, options);
+ else
+ lyd_print_tree(out, root, format, options);
+
+ return 0;
+}
+
+static int mgmt_ds_replace_dst_with_src_ds(struct mgmt_ds_ctx *src,
+ struct mgmt_ds_ctx *dst)
+{
+ if (!src || !dst)
+ return -1;
+
+ MGMTD_DS_DBG("Replacing %s with %s", mgmt_ds_id2name(dst->ds_id),
+ mgmt_ds_id2name(src->ds_id));
+
+ if (src->config_ds && dst->config_ds)
+ nb_config_replace(dst->root.cfg_root, src->root.cfg_root, true);
+ else {
+ assert(!src->config_ds && !dst->config_ds);
+ if (dst->root.dnode_root)
+ yang_dnode_free(dst->root.dnode_root);
+ dst->root.dnode_root = yang_dnode_dup(src->root.dnode_root);
+ }
+
+ if (src->ds_id == MGMTD_DS_CANDIDATE) {
+ /*
+ * Drop the changes in scratch-buffer.
+ */
+ MGMTD_DS_DBG("Emptying Candidate Scratch buffer!");
+ nb_config_diff_del_changes(&src->root.cfg_root->cfg_chgs);
+ }
+
+ return 0;
+}
+
+static int mgmt_ds_merge_src_with_dst_ds(struct mgmt_ds_ctx *src,
+ struct mgmt_ds_ctx *dst)
+{
+ int ret;
+
+ if (!src || !dst)
+ return -1;
+
+ MGMTD_DS_DBG("Merging DS %d with %d", dst->ds_id, src->ds_id);
+ if (src->config_ds && dst->config_ds)
+ ret = nb_config_merge(dst->root.cfg_root, src->root.cfg_root,
+ true);
+ else {
+ assert(!src->config_ds && !dst->config_ds);
+ ret = lyd_merge_siblings(&dst->root.dnode_root,
+ src->root.dnode_root, 0);
+ }
+ if (ret != 0) {
+ MGMTD_DS_ERR("merge failed with err: %d", ret);
+ return ret;
+ }
+
+ if (src->ds_id == MGMTD_DS_CANDIDATE) {
+ /*
+ * Drop the changes in scratch-buffer.
+ */
+ MGMTD_DS_DBG("Emptying Candidate Scratch buffer!");
+ nb_config_diff_del_changes(&src->root.cfg_root->cfg_chgs);
+ }
+
+ return 0;
+}
+
+static int mgmt_ds_load_cfg_from_file(const char *filepath,
+ struct lyd_node **dnode)
+{
+ LY_ERR ret;
+
+ *dnode = NULL;
+ ret = lyd_parse_data_path(ly_native_ctx, filepath, LYD_JSON,
+ LYD_PARSE_STRICT, 0, dnode);
+
+ if (ret != LY_SUCCESS) {
+ if (*dnode)
+ yang_dnode_free(*dnode);
+ return -1;
+ }
+
+ return 0;
+}
+
+void mgmt_ds_reset_candidate(void)
+{
+ struct lyd_node *dnode = mm->candidate_ds->root.cfg_root->dnode;
+
+ if (dnode)
+ yang_dnode_free(dnode);
+
+ dnode = yang_dnode_new(ly_native_ctx, true);
+ mm->candidate_ds->root.cfg_root->dnode = dnode;
+}
+
+
+int mgmt_ds_init(struct mgmt_master *mm)
+{
+ if (mgmt_ds_mm || mm->running_ds || mm->candidate_ds || mm->oper_ds)
+ assert(!"MGMTD: Call ds_init only once!");
+
+ /* Use Running DS from NB module??? */
+ if (!running_config)
+ assert(!"MGMTD: Call ds_init after frr_init only!");
+
+ running.root.cfg_root = running_config;
+ running.config_ds = true;
+ running.ds_id = MGMTD_DS_RUNNING;
+
+ candidate.root.cfg_root = nb_config_dup(running.root.cfg_root);
+ candidate.config_ds = true;
+ candidate.ds_id = MGMTD_DS_CANDIDATE;
+
+ /*
+ * Redirect lib/vty candidate-config datastore to the global candidate
+ * config Ds on the MGMTD process.
+ */
+ vty_mgmt_candidate_config = candidate.root.cfg_root;
+
+ oper.root.dnode_root = yang_dnode_new(ly_native_ctx, true);
+ oper.config_ds = false;
+ oper.ds_id = MGMTD_DS_OPERATIONAL;
+
+ mm->running_ds = &running;
+ mm->candidate_ds = &candidate;
+ mm->oper_ds = &oper;
+ mgmt_ds_mm = mm;
+
+ return 0;
+}
+
+void mgmt_ds_destroy(void)
+{
+ nb_config_free(candidate.root.cfg_root);
+ candidate.root.cfg_root = NULL;
+
+ yang_dnode_free(oper.root.dnode_root);
+ oper.root.dnode_root = NULL;
+}
+
+struct mgmt_ds_ctx *mgmt_ds_get_ctx_by_id(struct mgmt_master *mm,
+ Mgmtd__DatastoreId ds_id)
+{
+ switch (ds_id) {
+ case MGMTD_DS_CANDIDATE:
+ return (mm->candidate_ds);
+ case MGMTD_DS_RUNNING:
+ return (mm->running_ds);
+ case MGMTD_DS_OPERATIONAL:
+ return (mm->oper_ds);
+ case MGMTD_DS_NONE:
+ case MGMTD__DATASTORE_ID__STARTUP_DS:
+ case _MGMTD__DATASTORE_ID_IS_INT_SIZE:
+ return 0;
+ }
+
+ return 0;
+}
+
+bool mgmt_ds_is_config(struct mgmt_ds_ctx *ds_ctx)
+{
+ if (!ds_ctx)
+ return false;
+
+ return ds_ctx->config_ds;
+}
+
+bool mgmt_ds_is_locked(struct mgmt_ds_ctx *ds_ctx, uint64_t session_id)
+{
+ assert(ds_ctx);
+ return (ds_ctx->locked && ds_ctx->vty_session_id == session_id);
+}
+
+int mgmt_ds_lock(struct mgmt_ds_ctx *ds_ctx, uint64_t session_id)
+{
+ assert(ds_ctx);
+
+ if (ds_ctx->locked)
+ return EBUSY;
+
+ ds_ctx->locked = true;
+ ds_ctx->vty_session_id = session_id;
+ return 0;
+}
+
+void mgmt_ds_unlock(struct mgmt_ds_ctx *ds_ctx)
+{
+ assert(ds_ctx);
+ if (!ds_ctx->locked)
+ zlog_warn(
+ "%s: WARNING: unlock on unlocked in DS:%s last session-id %" PRIu64,
+ __func__, mgmt_ds_id2name(ds_ctx->ds_id),
+ ds_ctx->vty_session_id);
+ ds_ctx->locked = 0;
+}
+
+int mgmt_ds_copy_dss(struct mgmt_ds_ctx *src_ds_ctx,
+ struct mgmt_ds_ctx *dst_ds_ctx, bool updt_cmt_rec)
+{
+ if (mgmt_ds_replace_dst_with_src_ds(src_ds_ctx, dst_ds_ctx) != 0)
+ return -1;
+
+ if (updt_cmt_rec && dst_ds_ctx->ds_id == MGMTD_DS_RUNNING)
+ mgmt_history_new_record(dst_ds_ctx);
+
+ return 0;
+}
+
+int mgmt_ds_dump_ds_to_file(char *file_name, struct mgmt_ds_ctx *ds_ctx)
+{
+ struct ly_out *out;
+ int ret = 0;
+
+ if (ly_out_new_filepath(file_name, &out) == LY_SUCCESS) {
+ ret = mgmt_ds_dump_in_memory(ds_ctx, "", LYD_JSON, out);
+ ly_out_free(out, NULL, 0);
+ }
+
+ return ret;
+}
+
+struct nb_config *mgmt_ds_get_nb_config(struct mgmt_ds_ctx *ds_ctx)
+{
+ if (!ds_ctx)
+ return NULL;
+
+ return ds_ctx->config_ds ? ds_ctx->root.cfg_root : NULL;
+}
+
+static int mgmt_walk_ds_nodes(
+ struct nb_config *root, const char *base_xpath,
+ struct lyd_node *base_dnode,
+ void (*mgmt_ds_node_iter_fn)(const char *xpath, struct lyd_node *node,
+ struct nb_node *nb_node, void *ctx),
+ void *ctx)
+{
+ /* this is 1k per recursion... */
+ char xpath[MGMTD_MAX_XPATH_LEN];
+ struct lyd_node *dnode;
+ struct nb_node *nbnode;
+ int ret = 0;
+
+ assert(mgmt_ds_node_iter_fn);
+
+ MGMTD_DS_DBG(" -- START: base xpath: '%s'", base_xpath);
+
+ if (!base_dnode)
+ /*
+ * This function only returns the first node of a possible set
+ * of matches issuing a warning if more than 1 matches
+ */
+ base_dnode = yang_dnode_get(root->dnode, base_xpath);
+ if (!base_dnode)
+ return -1;
+
+ MGMTD_DS_DBG(" search base schema: '%s'",
+ lysc_path(base_dnode->schema, LYSC_PATH_LOG, xpath,
+ sizeof(xpath)));
+
+ nbnode = (struct nb_node *)base_dnode->schema->priv;
+ (*mgmt_ds_node_iter_fn)(base_xpath, base_dnode, nbnode, ctx);
+
+ /*
+ * If the base_xpath points to a leaf node we can skip the tree walk.
+ */
+ if (base_dnode->schema->nodetype & LYD_NODE_TERM)
+ return 0;
+
+ /*
+ * at this point the xpath matched this container node (or some parent
+ * and we're wildcard descending now) so by walking it's children we
+ * continue to change the meaning of an xpath regex to rather be a
+ * prefix matching path
+ */
+
+ LY_LIST_FOR (lyd_child(base_dnode), dnode) {
+ assert(dnode->schema && dnode->schema->priv);
+
+ (void)lyd_path(dnode, LYD_PATH_STD, xpath, sizeof(xpath));
+
+ MGMTD_DS_DBG(" -- Child xpath: %s", xpath);
+
+ ret = mgmt_walk_ds_nodes(root, xpath, dnode,
+ mgmt_ds_node_iter_fn, ctx);
+ if (ret != 0)
+ break;
+ }
+
+ MGMTD_DS_DBG(" -- END: base xpath: '%s'", base_xpath);
+
+ return ret;
+}
+
+struct lyd_node *mgmt_ds_find_data_node_by_xpath(struct mgmt_ds_ctx *ds_ctx,
+ const char *xpath)
+{
+ if (!ds_ctx)
+ return NULL;
+
+ return yang_dnode_get(ds_ctx->config_ds ? ds_ctx->root.cfg_root->dnode
+ : ds_ctx->root.dnode_root,
+ xpath);
+}
+
+int mgmt_ds_delete_data_nodes(struct mgmt_ds_ctx *ds_ctx, const char *xpath)
+{
+ struct nb_node *nb_node;
+ struct lyd_node *dnode, *dep_dnode;
+ char dep_xpath[XPATH_MAXLEN];
+
+ if (!ds_ctx)
+ return -1;
+
+ nb_node = nb_node_find(xpath);
+
+ dnode = yang_dnode_get(ds_ctx->config_ds
+ ? ds_ctx->root.cfg_root->dnode
+ : ds_ctx->root.dnode_root,
+ xpath);
+
+ if (!dnode)
+ /*
+ * Return a special error code so the caller can choose
+ * whether to ignore it or not.
+ */
+ return NB_ERR_NOT_FOUND;
+ /* destroy dependant */
+ if (nb_node && nb_node->dep_cbs.get_dependant_xpath) {
+ nb_node->dep_cbs.get_dependant_xpath(dnode, dep_xpath);
+
+ dep_dnode = yang_dnode_get(
+ ds_ctx->config_ds ? ds_ctx->root.cfg_root->dnode
+ : ds_ctx->root.dnode_root,
+ dep_xpath);
+ if (dep_dnode)
+ lyd_free_tree(dep_dnode);
+ }
+ lyd_free_tree(dnode);
+
+ return 0;
+}
+
+int mgmt_ds_load_config_from_file(struct mgmt_ds_ctx *dst,
+ const char *file_path, bool merge)
+{
+ struct lyd_node *iter;
+ struct mgmt_ds_ctx parsed;
+
+ if (!dst)
+ return -1;
+
+ if (mgmt_ds_load_cfg_from_file(file_path, &iter) != 0) {
+ MGMTD_DS_ERR("Failed to load config from the file %s",
+ file_path);
+ return -1;
+ }
+
+ parsed.root.cfg_root = nb_config_new(iter);
+ parsed.config_ds = true;
+ parsed.ds_id = dst->ds_id;
+
+ if (merge)
+ mgmt_ds_merge_src_with_dst_ds(&parsed, dst);
+ else
+ mgmt_ds_replace_dst_with_src_ds(&parsed, dst);
+
+ nb_config_free(parsed.root.cfg_root);
+
+ return 0;
+}
+
+int mgmt_ds_iter_data(Mgmtd__DatastoreId ds_id, struct nb_config *root,
+ const char *base_xpath,
+ void (*mgmt_ds_node_iter_fn)(const char *xpath,
+ struct lyd_node *node,
+ struct nb_node *nb_node,
+ void *ctx),
+ void *ctx)
+{
+ int ret = 0;
+ char xpath[MGMTD_MAX_XPATH_LEN];
+ struct lyd_node *base_dnode = NULL;
+ struct lyd_node *node;
+
+ if (!root)
+ return -1;
+
+ strlcpy(xpath, base_xpath, sizeof(xpath));
+ mgmt_remove_trailing_separator(xpath, '/');
+
+ /*
+ * mgmt_ds_iter_data is the only user of mgmt_walk_ds_nodes other than
+ * mgmt_walk_ds_nodes itself, so we can modify the API if we would like.
+ * Oper-state should be kept in mind though for the prefix walk
+ */
+
+ MGMTD_DS_DBG(" -- START DS walk for DSid: %d", ds_id);
+
+ /* If the base_xpath is empty then crawl the sibblings */
+ if (xpath[0] == 0) {
+ base_dnode = root->dnode;
+
+ /* get first top-level sibling */
+ while (base_dnode->parent)
+ base_dnode = lyd_parent(base_dnode);
+
+ while (base_dnode->prev->next)
+ base_dnode = base_dnode->prev;
+
+ LY_LIST_FOR (base_dnode, node) {
+ ret = mgmt_walk_ds_nodes(root, xpath, node,
+ mgmt_ds_node_iter_fn, ctx);
+ }
+ } else
+ ret = mgmt_walk_ds_nodes(root, xpath, base_dnode,
+ mgmt_ds_node_iter_fn, ctx);
+
+ return ret;
+}
+
+void mgmt_ds_dump_tree(struct vty *vty, struct mgmt_ds_ctx *ds_ctx,
+ const char *xpath, FILE *f, LYD_FORMAT format)
+{
+ struct ly_out *out;
+ char *str;
+ char base_xpath[MGMTD_MAX_XPATH_LEN] = {0};
+
+ if (!ds_ctx) {
+ vty_out(vty, " >>>>> Datastore Not Initialized!\n");
+ return;
+ }
+
+ if (xpath) {
+ strlcpy(base_xpath, xpath, MGMTD_MAX_XPATH_LEN);
+ mgmt_remove_trailing_separator(base_xpath, '/');
+ }
+
+ if (f)
+ ly_out_new_file(f, &out);
+ else
+ ly_out_new_memory(&str, 0, &out);
+
+ mgmt_ds_dump_in_memory(ds_ctx, base_xpath, format, out);
+
+ if (!f)
+ vty_out(vty, "%s\n", str);
+
+ ly_out_free(out, NULL, 0);
+}
+
+void mgmt_ds_status_write_one(struct vty *vty, struct mgmt_ds_ctx *ds_ctx)
+{
+ if (!ds_ctx) {
+ vty_out(vty, " >>>>> Datastore Not Initialized!\n");
+ return;
+ }
+
+ vty_out(vty, " DS: %s\n", mgmt_ds_id2name(ds_ctx->ds_id));
+ vty_out(vty, " DS-Hndl: \t\t\t%p\n", ds_ctx);
+ vty_out(vty, " Config: \t\t\t%s\n",
+ ds_ctx->config_ds ? "True" : "False");
+}
+
+void mgmt_ds_status_write(struct vty *vty)
+{
+ vty_out(vty, "MGMTD Datastores\n");
+
+ mgmt_ds_status_write_one(vty, mgmt_ds_mm->running_ds);
+
+ mgmt_ds_status_write_one(vty, mgmt_ds_mm->candidate_ds);
+
+ mgmt_ds_status_write_one(vty, mgmt_ds_mm->oper_ds);
+}
diff --git a/mgmtd/mgmt_ds.h b/mgmtd/mgmt_ds.h
new file mode 100644
index 0000000..1cf4816
--- /dev/null
+++ b/mgmtd/mgmt_ds.h
@@ -0,0 +1,336 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MGMTD Datastores
+ *
+ * Copyright (C) 2021 Vmware, Inc.
+ * Pushpasis Sarkar <spushpasis@vmware.com>
+ */
+
+#ifndef _FRR_MGMTD_DS_H_
+#define _FRR_MGMTD_DS_H_
+
+#include "mgmt_fe_client.h"
+#include "northbound.h"
+
+#include "mgmtd/mgmt_defines.h"
+#include "mgmtd/mgmt_be_adapter.h"
+#include "mgmtd/mgmt_fe_adapter.h"
+
+#define MGMTD_MAX_NUM_DSNODES_PER_BATCH 128
+
+#define MGMTD_DS_NAME_MAX_LEN 32
+#define MGMTD_DS_NAME_NONE "none"
+#define MGMTD_DS_NAME_RUNNING "running"
+#define MGMTD_DS_NAME_CANDIDATE "candidate"
+#define MGMTD_DS_NAME_OPERATIONAL "operational"
+
+#define FOREACH_MGMTD_DS_ID(id) \
+ for ((id) = MGMTD_DS_NONE; (id) < MGMTD_DS_MAX_ID; (id)++)
+
+#define MGMTD_MAX_COMMIT_LIST 10
+
+#define MGMTD_COMMIT_FILE_PATH DAEMON_DB_DIR "/commit-%s.json"
+#define MGMTD_COMMIT_INDEX_FILE_NAME DAEMON_DB_DIR "/commit-index.dat"
+
+extern struct nb_config *running_config;
+
+struct mgmt_ds_ctx;
+
+/***************************************************************
+ * Global data exported
+ ***************************************************************/
+
+extern const char *mgmt_ds_names[MGMTD_DS_MAX_ID + 1];
+
+/*
+ * Convert datastore ID to datastore name.
+ *
+ * id
+ * Datastore ID.
+ *
+ * Returns:
+ * Datastore name.
+ */
+static inline const char *mgmt_ds_id2name(Mgmtd__DatastoreId id)
+{
+ if (id > MGMTD_DS_MAX_ID)
+ id = MGMTD_DS_MAX_ID;
+ return mgmt_ds_names[id];
+}
+
+/*
+ * Convert datastore name to datastore ID.
+ *
+ * id
+ * Datastore name.
+ *
+ * Returns:
+ * Datastore ID.
+ */
+static inline Mgmtd__DatastoreId mgmt_ds_name2id(const char *name)
+{
+ Mgmtd__DatastoreId id;
+
+ FOREACH_MGMTD_DS_ID (id) {
+ if (!strncmp(mgmt_ds_names[id], name, MGMTD_DS_NAME_MAX_LEN))
+ return id;
+ }
+
+ return MGMTD_DS_NONE;
+}
+
+/*
+ * Convert datastore ID to datastore name.
+ *
+ * similar to above funtion.
+ */
+static inline Mgmtd__DatastoreId mgmt_get_ds_id_by_name(const char *ds_name)
+{
+ if (!strncmp(ds_name, "candidate", sizeof("candidate")))
+ return MGMTD_DS_CANDIDATE;
+ else if (!strncmp(ds_name, "running", sizeof("running")))
+ return MGMTD_DS_RUNNING;
+ else if (!strncmp(ds_name, "operational", sizeof("operational")))
+ return MGMTD_DS_OPERATIONAL;
+ return MGMTD_DS_NONE;
+}
+
+/*
+ * Appends trail wildcard '/' '*' to a given xpath.
+ *
+ * xpath
+ * YANG xpath.
+ *
+ * path_len
+ * xpath length.
+ */
+static inline void mgmt_xpath_append_trail_wildcard(char *xpath,
+ size_t *xpath_len)
+{
+ if (!xpath || !xpath_len)
+ return;
+
+ if (!*xpath_len)
+ *xpath_len = strlen(xpath);
+
+ if (*xpath_len > 2 && *xpath_len < MGMTD_MAX_XPATH_LEN - 2) {
+ if (xpath[*xpath_len - 1] == '/') {
+ xpath[*xpath_len] = '*';
+ xpath[*xpath_len + 1] = 0;
+ (*xpath_len)++;
+ } else if (xpath[*xpath_len - 1] != '*') {
+ xpath[*xpath_len] = '/';
+ xpath[*xpath_len + 1] = '*';
+ xpath[*xpath_len + 2] = 0;
+ (*xpath_len) += 2;
+ }
+ }
+}
+
+/*
+ * Removes trail wildcard '/' '*' from a given xpath.
+ *
+ * xpath
+ * YANG xpath.
+ *
+ * path_len
+ * xpath length.
+ */
+static inline void mgmt_xpath_remove_trail_wildcard(char *xpath,
+ size_t *xpath_len)
+{
+ if (!xpath || !xpath_len)
+ return;
+
+ if (!*xpath_len)
+ *xpath_len = strlen(xpath);
+
+ if (*xpath_len > 2 && xpath[*xpath_len - 2] == '/'
+ && xpath[*xpath_len - 1] == '*') {
+ xpath[*xpath_len - 2] = 0;
+ (*xpath_len) -= 2;
+ }
+}
+
+/* Initialise datastore */
+extern int mgmt_ds_init(struct mgmt_master *cm);
+
+/* Destroy datastore */
+extern void mgmt_ds_destroy(void);
+
+/*
+ * Get datastore handler by ID
+ *
+ * mm
+ * Management master structure.
+ *
+ * ds_id
+ * Datastore ID.
+ *
+ * Returns:
+ * Datastore context (Holds info about ID, lock, root node etc).
+ */
+extern struct mgmt_ds_ctx *mgmt_ds_get_ctx_by_id(struct mgmt_master *mm,
+ Mgmtd__DatastoreId ds_id);
+
+/*
+ * Check if a given datastore is config ds
+ */
+extern bool mgmt_ds_is_config(struct mgmt_ds_ctx *ds_ctx);
+
+/*
+ * Check if a given datastore is locked by a given session
+ */
+extern bool mgmt_ds_is_locked(struct mgmt_ds_ctx *ds_ctx, uint64_t session_id);
+
+/*
+ * Acquire write lock to a ds given a ds_handle
+ */
+extern int mgmt_ds_lock(struct mgmt_ds_ctx *ds_ctx, uint64_t session_id);
+
+/*
+ * Remove a lock from ds given a ds_handle
+ */
+extern void mgmt_ds_unlock(struct mgmt_ds_ctx *ds_ctx);
+
+/*
+ * Copy from source to destination datastore.
+ *
+ * src_ds
+ * Source datastore handle (ds to be copied from).
+ *
+ * dst_ds
+ * Destination datastore handle (ds to be copied to).
+ *
+ * update_cmd_rec
+ * TRUE if need to update commit record, FALSE otherwise.
+ *
+ * Returns:
+ * 0 on success, -1 on failure.
+ */
+extern int mgmt_ds_copy_dss(struct mgmt_ds_ctx *src_ds_ctx,
+ struct mgmt_ds_ctx *dst_ds_ctx,
+ bool update_cmt_rec);
+
+/*
+ * Fetch northbound configuration for a given datastore context.
+ */
+extern struct nb_config *mgmt_ds_get_nb_config(struct mgmt_ds_ctx *ds_ctx);
+
+/*
+ * Find YANG data node given a datastore handle YANG xpath.
+ */
+extern struct lyd_node *
+mgmt_ds_find_data_node_by_xpath(struct mgmt_ds_ctx *ds_ctx,
+ const char *xpath);
+
+/*
+ * Delete YANG data node given a datastore handle and YANG xpath.
+ */
+extern int mgmt_ds_delete_data_nodes(struct mgmt_ds_ctx *ds_ctx,
+ const char *xpath);
+
+/*
+ * Iterate over datastore data.
+ *
+ * ds_id
+ * Datastore ID..
+ *
+ * root
+ * The root of the tree to iterate over.
+ *
+ * base_xpath
+ * Base YANG xpath from where needs to be iterated.
+ *
+ * iter_fn
+ * function that will be called during each iteration.
+ *
+ * ctx
+ * User defined opaque value normally used to pass
+ * reference to some user private context that will
+ * be passed to the iterator function provided in
+ * 'iter_fn'.
+ *
+ * Returns:
+ * 0 on success, -1 on failure.
+ */
+extern int mgmt_ds_iter_data(
+ Mgmtd__DatastoreId ds_id, struct nb_config *root,
+ const char *base_xpath,
+ void (*mgmt_ds_node_iter_fn)(const char *xpath, struct lyd_node *node,
+ struct nb_node *nb_node, void *ctx),
+ void *ctx);
+
+/*
+ * Load config to datastore from a file.
+ *
+ * ds_ctx
+ * Datastore context.
+ *
+ * file_path
+ * File path of the configuration file.
+ *
+ * merge
+ * TRUE if you want to merge with existing config,
+ * FALSE if you want to replace with existing config
+ *
+ * Returns:
+ * 0 on success, -1 on failure.
+ */
+extern int mgmt_ds_load_config_from_file(struct mgmt_ds_ctx *ds_ctx,
+ const char *file_path, bool merge);
+
+/*
+ * Dump the data tree to a file with JSON/XML format.
+ *
+ * vty
+ * VTY context.
+ *
+ * ds_ctx
+ * Datastore context.
+ *
+ * xpath
+ * Base YANG xpath from where data needs to be dumped.
+ *
+ * f
+ * File pointer to where data to be dumped.
+ *
+ * format
+ * JSON/XML
+ */
+extern void mgmt_ds_dump_tree(struct vty *vty, struct mgmt_ds_ctx *ds_ctx,
+ const char *xpath, FILE *f, LYD_FORMAT format);
+
+/*
+ * Dump the complete data tree to a file with JSON format.
+ *
+ * file_name
+ * File path to where data to be dumped.
+ *
+ * ds
+ * Datastore context.
+ *
+ * Returns:
+ * 0 on success, -1 on failure.
+ */
+extern int mgmt_ds_dump_ds_to_file(char *file_name,
+ struct mgmt_ds_ctx *ds_ctx);
+
+/*
+ * Dump information about specific datastore.
+ */
+extern void mgmt_ds_status_write_one(struct vty *vty,
+ struct mgmt_ds_ctx *ds_ctx);
+
+/*
+ * Dump information about all the datastores.
+ */
+extern void mgmt_ds_status_write(struct vty *vty);
+
+
+/*
+ * Reset the candidate DS to empty state
+ */
+void mgmt_ds_reset_candidate(void);
+
+#endif /* _FRR_MGMTD_DS_H_ */
diff --git a/mgmtd/mgmt_fe_adapter.c b/mgmtd/mgmt_fe_adapter.c
new file mode 100644
index 0000000..2b2471c
--- /dev/null
+++ b/mgmtd/mgmt_fe_adapter.c
@@ -0,0 +1,1402 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MGMTD Frontend Client Connection Adapter
+ *
+ * Copyright (C) 2021 Vmware, Inc.
+ * Pushpasis Sarkar <spushpasis@vmware.com>
+ * Copyright (c) 2023, LabN Consulting, L.L.C.
+ */
+
+#include <zebra.h>
+#include "sockopt.h"
+#include "network.h"
+#include "libfrr.h"
+#include "mgmt_fe_client.h"
+#include "mgmt_msg.h"
+#include "mgmt_pb.h"
+#include "hash.h"
+#include "jhash.h"
+#include "mgmtd/mgmt.h"
+#include "mgmtd/mgmt_ds.h"
+#include "mgmtd/mgmt_memory.h"
+#include "mgmtd/mgmt_fe_adapter.h"
+
+#define MGMTD_FE_ADAPTER_DBG(fmt, ...) \
+ DEBUGD(&mgmt_debug_fe, "FE-ADAPTER: %s: " fmt, __func__, ##__VA_ARGS__)
+#define MGMTD_FE_ADAPTER_ERR(fmt, ...) \
+ zlog_err("FE-ADAPTER: %s: ERROR: " fmt, __func__, ##__VA_ARGS__)
+
+#define FOREACH_ADAPTER_IN_LIST(adapter) \
+ frr_each_safe (mgmt_fe_adapters, &mgmt_fe_adapters, (adapter))
+
+enum mgmt_session_event {
+ MGMTD_FE_SESSION_CFG_TXN_CLNUP = 1,
+ MGMTD_FE_SESSION_SHOW_TXN_CLNUP,
+};
+
+struct mgmt_fe_session_ctx {
+ struct mgmt_fe_client_adapter *adapter;
+ uint64_t session_id;
+ uint64_t client_id;
+ uint64_t txn_id;
+ uint64_t cfg_txn_id;
+ uint8_t ds_locked[MGMTD_DS_MAX_ID];
+ struct event *proc_cfg_txn_clnp;
+ struct event *proc_show_txn_clnp;
+
+ struct mgmt_fe_sessions_item list_linkage;
+};
+
+DECLARE_LIST(mgmt_fe_sessions, struct mgmt_fe_session_ctx, list_linkage);
+
+#define FOREACH_SESSION_IN_LIST(adapter, session) \
+ frr_each_safe (mgmt_fe_sessions, &(adapter)->fe_sessions, (session))
+
+static struct event_loop *mgmt_loop;
+static struct msg_server mgmt_fe_server = {.fd = -1};
+
+static struct mgmt_fe_adapters_head mgmt_fe_adapters;
+
+static struct hash *mgmt_fe_sessions;
+static uint64_t mgmt_fe_next_session_id;
+
+/* Forward declarations */
+static void
+mgmt_fe_session_register_event(struct mgmt_fe_session_ctx *session,
+ enum mgmt_session_event event);
+
+static int
+mgmt_fe_session_write_lock_ds(Mgmtd__DatastoreId ds_id,
+ struct mgmt_ds_ctx *ds_ctx,
+ struct mgmt_fe_session_ctx *session)
+{
+ if (session->ds_locked[ds_id])
+ zlog_warn("multiple lock taken by session-id: %" PRIu64
+ " on DS:%s",
+ session->session_id, mgmt_ds_id2name(ds_id));
+ else {
+ if (mgmt_ds_lock(ds_ctx, session->session_id)) {
+ MGMTD_FE_ADAPTER_DBG(
+ "Failed to lock the DS:%s for session-id: %" PRIu64
+ " from %s!",
+ mgmt_ds_id2name(ds_id), session->session_id,
+ session->adapter->name);
+ return -1;
+ }
+
+ session->ds_locked[ds_id] = true;
+ MGMTD_FE_ADAPTER_DBG(
+ "Write-Locked the DS:%s for session-id: %" PRIu64
+ " from %s",
+ mgmt_ds_id2name(ds_id), session->session_id,
+ session->adapter->name);
+ }
+
+ return 0;
+}
+
+static void mgmt_fe_session_unlock_ds(Mgmtd__DatastoreId ds_id,
+ struct mgmt_ds_ctx *ds_ctx,
+ struct mgmt_fe_session_ctx *session)
+{
+ if (!session->ds_locked[ds_id])
+ zlog_warn("unlock unlocked by session-id: %" PRIu64 " on DS:%s",
+ session->session_id, mgmt_ds_id2name(ds_id));
+
+ session->ds_locked[ds_id] = false;
+ mgmt_ds_unlock(ds_ctx);
+ MGMTD_FE_ADAPTER_DBG(
+ "Unlocked DS:%s write-locked earlier by session-id: %" PRIu64
+ " from %s",
+ mgmt_ds_id2name(ds_id), session->session_id,
+ session->adapter->name);
+}
+
+static void
+mgmt_fe_session_cfg_txn_cleanup(struct mgmt_fe_session_ctx *session)
+{
+ /*
+ * Ensure any uncommitted changes in Candidate DS
+ * is discarded.
+ */
+ mgmt_ds_copy_dss(mm->running_ds, mm->candidate_ds, false);
+
+ /*
+ * Destroy the actual transaction created earlier.
+ */
+ if (session->cfg_txn_id != MGMTD_TXN_ID_NONE)
+ mgmt_destroy_txn(&session->cfg_txn_id);
+}
+
+static void
+mgmt_fe_session_show_txn_cleanup(struct mgmt_fe_session_ctx *session)
+{
+ /*
+ * Destroy the transaction created recently.
+ */
+ if (session->txn_id != MGMTD_TXN_ID_NONE)
+ mgmt_destroy_txn(&session->txn_id);
+}
+
+static void
+mgmt_fe_adapter_compute_set_cfg_timers(struct mgmt_setcfg_stats *setcfg_stats)
+{
+ setcfg_stats->last_exec_tm = timeval_elapsed(setcfg_stats->last_end,
+ setcfg_stats->last_start);
+ if (setcfg_stats->last_exec_tm > setcfg_stats->max_tm)
+ setcfg_stats->max_tm = setcfg_stats->last_exec_tm;
+
+ if (setcfg_stats->last_exec_tm < setcfg_stats->min_tm)
+ setcfg_stats->min_tm = setcfg_stats->last_exec_tm;
+
+ setcfg_stats->avg_tm =
+ (((setcfg_stats->avg_tm * (setcfg_stats->set_cfg_count - 1))
+ + setcfg_stats->last_exec_tm)
+ / setcfg_stats->set_cfg_count);
+}
+
+static void
+mgmt_fe_session_compute_commit_timers(struct mgmt_commit_stats *cmt_stats)
+{
+ cmt_stats->last_exec_tm =
+ timeval_elapsed(cmt_stats->last_end, cmt_stats->last_start);
+ if (cmt_stats->last_exec_tm > cmt_stats->max_tm) {
+ cmt_stats->max_tm = cmt_stats->last_exec_tm;
+ cmt_stats->max_batch_cnt = cmt_stats->last_batch_cnt;
+ }
+
+ if (cmt_stats->last_exec_tm < cmt_stats->min_tm) {
+ cmt_stats->min_tm = cmt_stats->last_exec_tm;
+ cmt_stats->min_batch_cnt = cmt_stats->last_batch_cnt;
+ }
+}
+
+static void mgmt_fe_cleanup_session(struct mgmt_fe_session_ctx **sessionp)
+{
+ Mgmtd__DatastoreId ds_id;
+ struct mgmt_ds_ctx *ds_ctx;
+ struct mgmt_fe_session_ctx *session = *sessionp;
+
+ if (session->adapter) {
+ mgmt_fe_session_cfg_txn_cleanup(session);
+ mgmt_fe_session_show_txn_cleanup(session);
+ for (ds_id = 0; ds_id < MGMTD_DS_MAX_ID; ds_id++) {
+ ds_ctx = mgmt_ds_get_ctx_by_id(mm, ds_id);
+ if (ds_ctx && session->ds_locked[ds_id])
+ mgmt_fe_session_unlock_ds(ds_id, ds_ctx,
+ session);
+ }
+ mgmt_fe_sessions_del(&session->adapter->fe_sessions, session);
+ assert(session->adapter->refcount > 1);
+ mgmt_fe_adapter_unlock(&session->adapter);
+ }
+
+ hash_release(mgmt_fe_sessions, session);
+ XFREE(MTYPE_MGMTD_FE_SESSION, session);
+ *sessionp = NULL;
+}
+
+static struct mgmt_fe_session_ctx *
+mgmt_fe_find_session_by_client_id(struct mgmt_fe_client_adapter *adapter,
+ uint64_t client_id)
+{
+ struct mgmt_fe_session_ctx *session;
+
+ FOREACH_SESSION_IN_LIST (adapter, session) {
+ if (session->client_id == client_id) {
+ MGMTD_FE_ADAPTER_DBG("Found session-id %" PRIu64
+ " using client-id %" PRIu64,
+ session->session_id, client_id);
+ return session;
+ }
+ }
+ MGMTD_FE_ADAPTER_DBG("Session not found using client-id %" PRIu64,
+ client_id);
+ return NULL;
+}
+
+static unsigned int mgmt_fe_session_hash_key(const void *data)
+{
+ const struct mgmt_fe_session_ctx *session = data;
+
+ return jhash2((uint32_t *) &session->session_id,
+ sizeof(session->session_id) / sizeof(uint32_t), 0);
+}
+
+static bool mgmt_fe_session_hash_cmp(const void *d1, const void *d2)
+{
+ const struct mgmt_fe_session_ctx *session1 = d1;
+ const struct mgmt_fe_session_ctx *session2 = d2;
+
+ return (session1->session_id == session2->session_id);
+}
+
+static inline struct mgmt_fe_session_ctx *
+mgmt_session_id2ctx(uint64_t session_id)
+{
+ struct mgmt_fe_session_ctx key = {0};
+ struct mgmt_fe_session_ctx *session;
+
+ if (!mgmt_fe_sessions)
+ return NULL;
+
+ key.session_id = session_id;
+ session = hash_lookup(mgmt_fe_sessions, &key);
+
+ return session;
+}
+
+static struct mgmt_fe_session_ctx *
+mgmt_fe_create_session(struct mgmt_fe_client_adapter *adapter,
+ uint64_t client_id)
+{
+ struct mgmt_fe_session_ctx *session;
+
+ session = mgmt_fe_find_session_by_client_id(adapter, client_id);
+ if (session)
+ mgmt_fe_cleanup_session(&session);
+
+ session = XCALLOC(MTYPE_MGMTD_FE_SESSION,
+ sizeof(struct mgmt_fe_session_ctx));
+ assert(session);
+ session->client_id = client_id;
+ session->adapter = adapter;
+ session->txn_id = MGMTD_TXN_ID_NONE;
+ session->cfg_txn_id = MGMTD_TXN_ID_NONE;
+ mgmt_fe_adapter_lock(adapter);
+ mgmt_fe_sessions_add_tail(&adapter->fe_sessions, session);
+ if (!mgmt_fe_next_session_id)
+ mgmt_fe_next_session_id++;
+ session->session_id = mgmt_fe_next_session_id++;
+ hash_get(mgmt_fe_sessions, session, hash_alloc_intern);
+
+ return session;
+}
+
+static int fe_adapter_send_msg(struct mgmt_fe_client_adapter *adapter,
+ Mgmtd__FeMessage *fe_msg, bool short_circuit_ok)
+{
+ return msg_conn_send_msg(
+ adapter->conn, MGMT_MSG_VERSION_PROTOBUF, fe_msg,
+ mgmtd__fe_message__get_packed_size(fe_msg),
+ (size_t(*)(void *, void *))mgmtd__fe_message__pack,
+ short_circuit_ok);
+}
+
+static int fe_adapter_send_session_reply(struct mgmt_fe_client_adapter *adapter,
+ struct mgmt_fe_session_ctx *session,
+ bool create, bool success)
+{
+ Mgmtd__FeMessage fe_msg;
+ Mgmtd__FeSessionReply session_reply;
+
+ mgmtd__fe_session_reply__init(&session_reply);
+ session_reply.create = create;
+ if (create) {
+ session_reply.has_client_conn_id = 1;
+ session_reply.client_conn_id = session->client_id;
+ }
+ session_reply.session_id = session->session_id;
+ session_reply.success = success;
+
+ mgmtd__fe_message__init(&fe_msg);
+ fe_msg.message_case = MGMTD__FE_MESSAGE__MESSAGE_SESSION_REPLY;
+ fe_msg.session_reply = &session_reply;
+
+ MGMTD_FE_ADAPTER_DBG(
+ "Sending SESSION_REPLY message to MGMTD Frontend client '%s'",
+ adapter->name);
+
+ return fe_adapter_send_msg(adapter, &fe_msg, true);
+}
+
+static int fe_adapter_send_lockds_reply(struct mgmt_fe_session_ctx *session,
+ Mgmtd__DatastoreId ds_id,
+ uint64_t req_id, bool lock_ds,
+ bool success, const char *error_if_any)
+{
+ Mgmtd__FeMessage fe_msg;
+ Mgmtd__FeLockDsReply lockds_reply;
+ bool scok = session->adapter->conn->is_short_circuit;
+
+ assert(session->adapter);
+
+ mgmtd__fe_lock_ds_reply__init(&lockds_reply);
+ lockds_reply.session_id = session->session_id;
+ lockds_reply.ds_id = ds_id;
+ lockds_reply.req_id = req_id;
+ lockds_reply.lock = lock_ds;
+ lockds_reply.success = success;
+ if (error_if_any)
+ lockds_reply.error_if_any = (char *)error_if_any;
+
+ mgmtd__fe_message__init(&fe_msg);
+ fe_msg.message_case = MGMTD__FE_MESSAGE__MESSAGE_LOCKDS_REPLY;
+ fe_msg.lockds_reply = &lockds_reply;
+
+ MGMTD_FE_ADAPTER_DBG(
+ "Sending LOCK_DS_REPLY message to MGMTD Frontend client '%s' scok: %d",
+ session->adapter->name, scok);
+
+ return fe_adapter_send_msg(session->adapter, &fe_msg, scok);
+}
+
+static int fe_adapter_send_set_cfg_reply(struct mgmt_fe_session_ctx *session,
+ Mgmtd__DatastoreId ds_id,
+ uint64_t req_id, bool success,
+ const char *error_if_any,
+ bool implicit_commit)
+{
+ Mgmtd__FeMessage fe_msg;
+ Mgmtd__FeSetConfigReply setcfg_reply;
+
+ assert(session->adapter);
+
+ if (implicit_commit && session->cfg_txn_id)
+ mgmt_fe_session_register_event(
+ session, MGMTD_FE_SESSION_CFG_TXN_CLNUP);
+
+ mgmtd__fe_set_config_reply__init(&setcfg_reply);
+ setcfg_reply.session_id = session->session_id;
+ setcfg_reply.ds_id = ds_id;
+ setcfg_reply.req_id = req_id;
+ setcfg_reply.success = success;
+ setcfg_reply.implicit_commit = implicit_commit;
+ if (error_if_any)
+ setcfg_reply.error_if_any = (char *)error_if_any;
+
+ mgmtd__fe_message__init(&fe_msg);
+ fe_msg.message_case = MGMTD__FE_MESSAGE__MESSAGE_SETCFG_REPLY;
+ fe_msg.setcfg_reply = &setcfg_reply;
+
+ MGMTD_FE_ADAPTER_DBG(
+ "Sending SETCFG_REPLY message to MGMTD Frontend client '%s'",
+ session->adapter->name);
+
+ if (implicit_commit) {
+ if (mm->perf_stats_en)
+ gettimeofday(&session->adapter->cmt_stats.last_end,
+ NULL);
+ mgmt_fe_session_compute_commit_timers(
+ &session->adapter->cmt_stats);
+ }
+
+ if (mm->perf_stats_en)
+ gettimeofday(&session->adapter->setcfg_stats.last_end, NULL);
+ mgmt_fe_adapter_compute_set_cfg_timers(&session->adapter->setcfg_stats);
+
+ return fe_adapter_send_msg(session->adapter, &fe_msg, false);
+}
+
+static int fe_adapter_send_commit_cfg_reply(
+ struct mgmt_fe_session_ctx *session, Mgmtd__DatastoreId src_ds_id,
+ Mgmtd__DatastoreId dst_ds_id, uint64_t req_id, enum mgmt_result result,
+ bool validate_only, const char *error_if_any)
+{
+ Mgmtd__FeMessage fe_msg;
+ Mgmtd__FeCommitConfigReply commcfg_reply;
+
+ assert(session->adapter);
+
+ mgmtd__fe_commit_config_reply__init(&commcfg_reply);
+ commcfg_reply.session_id = session->session_id;
+ commcfg_reply.src_ds_id = src_ds_id;
+ commcfg_reply.dst_ds_id = dst_ds_id;
+ commcfg_reply.req_id = req_id;
+ commcfg_reply.success =
+ (result == MGMTD_SUCCESS || result == MGMTD_NO_CFG_CHANGES)
+ ? true
+ : false;
+ commcfg_reply.validate_only = validate_only;
+ if (error_if_any)
+ commcfg_reply.error_if_any = (char *)error_if_any;
+
+ mgmtd__fe_message__init(&fe_msg);
+ fe_msg.message_case = MGMTD__FE_MESSAGE__MESSAGE_COMMCFG_REPLY;
+ fe_msg.commcfg_reply = &commcfg_reply;
+
+ MGMTD_FE_ADAPTER_DBG(
+ "Sending COMMIT_CONFIG_REPLY message to MGMTD Frontend client '%s'",
+ session->adapter->name);
+
+ /*
+ * Cleanup the CONFIG transaction associated with this session.
+ */
+ if (session->cfg_txn_id
+ && ((result == MGMTD_SUCCESS && !validate_only)
+ || (result == MGMTD_NO_CFG_CHANGES)))
+ mgmt_fe_session_register_event(
+ session, MGMTD_FE_SESSION_CFG_TXN_CLNUP);
+
+ if (mm->perf_stats_en)
+ gettimeofday(&session->adapter->cmt_stats.last_end, NULL);
+ mgmt_fe_session_compute_commit_timers(&session->adapter->cmt_stats);
+ return fe_adapter_send_msg(session->adapter, &fe_msg, false);
+}
+
+static int fe_adapter_send_get_reply(struct mgmt_fe_session_ctx *session,
+ Mgmtd__DatastoreId ds_id, uint64_t req_id,
+ bool success, Mgmtd__YangDataReply *data,
+ const char *error_if_any)
+{
+ Mgmtd__FeMessage fe_msg;
+ Mgmtd__FeGetReply get_reply;
+
+ assert(session->adapter);
+
+ mgmtd__fe_get_reply__init(&get_reply);
+ get_reply.session_id = session->session_id;
+ get_reply.ds_id = ds_id;
+ get_reply.req_id = req_id;
+ get_reply.success = success;
+ get_reply.data = data;
+ if (error_if_any)
+ get_reply.error_if_any = (char *)error_if_any;
+
+ mgmtd__fe_message__init(&fe_msg);
+ fe_msg.message_case = MGMTD__FE_MESSAGE__MESSAGE_GET_REPLY;
+ fe_msg.get_reply = &get_reply;
+
+ MGMTD_FE_ADAPTER_DBG("Sending GET_REPLY message to MGMTD Frontend client '%s'",
+ session->adapter->name);
+
+ /*
+ * Cleanup the SHOW transaction associated with this session.
+ */
+ if (session->txn_id && (!success || (data && data->next_indx < 0)))
+ mgmt_fe_session_register_event(session,
+ MGMTD_FE_SESSION_SHOW_TXN_CLNUP);
+
+ return fe_adapter_send_msg(session->adapter, &fe_msg, false);
+}
+
+static void mgmt_fe_session_cfg_txn_clnup(struct event *thread)
+{
+ struct mgmt_fe_session_ctx *session;
+
+ session = (struct mgmt_fe_session_ctx *)EVENT_ARG(thread);
+
+ mgmt_fe_session_cfg_txn_cleanup(session);
+}
+
+static void mgmt_fe_session_show_txn_clnup(struct event *thread)
+{
+ struct mgmt_fe_session_ctx *session;
+
+ session = (struct mgmt_fe_session_ctx *)EVENT_ARG(thread);
+
+ mgmt_fe_session_show_txn_cleanup(session);
+}
+
+static void
+mgmt_fe_session_register_event(struct mgmt_fe_session_ctx *session,
+ enum mgmt_session_event event)
+{
+ struct timeval tv = {.tv_sec = 0,
+ .tv_usec = MGMTD_FE_MSG_PROC_DELAY_USEC};
+
+ switch (event) {
+ case MGMTD_FE_SESSION_CFG_TXN_CLNUP:
+ event_add_timer_tv(mgmt_loop, mgmt_fe_session_cfg_txn_clnup,
+ session, &tv, &session->proc_cfg_txn_clnp);
+ break;
+ case MGMTD_FE_SESSION_SHOW_TXN_CLNUP:
+ event_add_timer_tv(mgmt_loop, mgmt_fe_session_show_txn_clnup,
+ session, &tv, &session->proc_show_txn_clnp);
+ break;
+ }
+}
+
+static struct mgmt_fe_client_adapter *
+mgmt_fe_find_adapter_by_fd(int conn_fd)
+{
+ struct mgmt_fe_client_adapter *adapter;
+
+ FOREACH_ADAPTER_IN_LIST (adapter) {
+ if (adapter->conn->fd == conn_fd)
+ return adapter;
+ }
+
+ return NULL;
+}
+
+static void mgmt_fe_adapter_delete(struct mgmt_fe_client_adapter *adapter)
+{
+ struct mgmt_fe_session_ctx *session;
+ MGMTD_FE_ADAPTER_DBG("deleting client adapter '%s'", adapter->name);
+
+ /* TODO: notify about client disconnect for appropriate cleanup */
+ FOREACH_SESSION_IN_LIST (adapter, session)
+ mgmt_fe_cleanup_session(&session);
+ mgmt_fe_sessions_fini(&adapter->fe_sessions);
+
+ assert(adapter->refcount == 1);
+ mgmt_fe_adapter_unlock(&adapter);
+}
+
+static int mgmt_fe_adapter_notify_disconnect(struct msg_conn *conn)
+{
+ struct mgmt_fe_client_adapter *adapter = conn->user;
+
+ MGMTD_FE_ADAPTER_DBG("notify disconnect for client adapter '%s'",
+ adapter->name);
+
+ mgmt_fe_adapter_delete(adapter);
+
+ return 0;
+}
+
+/*
+ * Purge any old connections that share the same client name with `adapter`
+ */
+static void
+mgmt_fe_adapter_cleanup_old_conn(struct mgmt_fe_client_adapter *adapter)
+{
+ struct mgmt_fe_client_adapter *old;
+
+ FOREACH_ADAPTER_IN_LIST (old) {
+ if (old == adapter)
+ continue;
+ if (strncmp(adapter->name, old->name, sizeof(adapter->name)))
+ continue;
+
+ MGMTD_FE_ADAPTER_DBG(
+ "Client '%s' (FD:%d) seems to have reconnected. Removing old connection (FD:%d)",
+ adapter->name, adapter->conn->fd,
+ old->conn->fd);
+ msg_conn_disconnect(old->conn, false);
+ }
+}
+
+static int
+mgmt_fe_session_handle_lockds_req_msg(struct mgmt_fe_session_ctx *session,
+ Mgmtd__FeLockDsReq *lockds_req)
+{
+ struct mgmt_ds_ctx *ds_ctx;
+
+ if (lockds_req->ds_id != MGMTD_DS_CANDIDATE &&
+ lockds_req->ds_id != MGMTD_DS_RUNNING) {
+ fe_adapter_send_lockds_reply(
+ session, lockds_req->ds_id, lockds_req->req_id,
+ lockds_req->lock, false,
+ "Lock/Unlock on DS other than candidate or running DS not supported");
+ return -1;
+ }
+
+ ds_ctx = mgmt_ds_get_ctx_by_id(mm, lockds_req->ds_id);
+ if (!ds_ctx) {
+ fe_adapter_send_lockds_reply(session, lockds_req->ds_id,
+ lockds_req->req_id,
+ lockds_req->lock, false,
+ "Failed to retrieve handle for DS!");
+ return -1;
+ }
+
+ if (lockds_req->lock) {
+ if (mgmt_fe_session_write_lock_ds(lockds_req->ds_id, ds_ctx,
+ session)) {
+ fe_adapter_send_lockds_reply(
+ session, lockds_req->ds_id, lockds_req->req_id,
+ lockds_req->lock, false,
+ "Lock already taken on DS by another session!");
+ return -1;
+ }
+ } else {
+ if (!session->ds_locked[lockds_req->ds_id]) {
+ fe_adapter_send_lockds_reply(
+ session, lockds_req->ds_id, lockds_req->req_id,
+ lockds_req->lock, false,
+ "Lock on DS was not taken by this session!");
+ return 0;
+ }
+
+ mgmt_fe_session_unlock_ds(lockds_req->ds_id, ds_ctx, session);
+ }
+
+ if (fe_adapter_send_lockds_reply(session, lockds_req->ds_id,
+ lockds_req->req_id, lockds_req->lock,
+ true, NULL) != 0) {
+ MGMTD_FE_ADAPTER_DBG(
+ "Failed to send LOCK_DS_REPLY for DS %u session-id: %" PRIu64
+ " from %s",
+ lockds_req->ds_id, session->session_id,
+ session->adapter->name);
+ }
+
+ return 0;
+}
+
+/*
+ * TODO: this function has too many conditionals relating to complex error
+ * conditions. It needs to be simplified and these complex error conditions
+ * probably need to just disconnect the client with a suitably loud log message.
+ */
+static int
+mgmt_fe_session_handle_setcfg_req_msg(struct mgmt_fe_session_ctx *session,
+ Mgmtd__FeSetConfigReq *setcfg_req)
+{
+ struct mgmt_ds_ctx *ds_ctx, *dst_ds_ctx = NULL;
+ bool txn_created = false;
+
+ if (mm->perf_stats_en)
+ gettimeofday(&session->adapter->setcfg_stats.last_start, NULL);
+
+ /* MGMTD currently only supports editing the candidate DS. */
+ if (setcfg_req->ds_id != MGMTD_DS_CANDIDATE) {
+ fe_adapter_send_set_cfg_reply(
+ session, setcfg_req->ds_id, setcfg_req->req_id, false,
+ "Set-Config on datastores other than Candidate DS not supported",
+ setcfg_req->implicit_commit);
+ return 0;
+ }
+ ds_ctx = mgmt_ds_get_ctx_by_id(mm, setcfg_req->ds_id);
+ assert(ds_ctx);
+
+ /* MGMTD currently only supports targetting the running DS. */
+ if (setcfg_req->implicit_commit &&
+ setcfg_req->commit_ds_id != MGMTD_DS_RUNNING) {
+ fe_adapter_send_set_cfg_reply(
+ session, setcfg_req->ds_id, setcfg_req->req_id, false,
+ "Implicit commit on datastores other than running DS not supported",
+ setcfg_req->implicit_commit);
+ return 0;
+ }
+ dst_ds_ctx = mgmt_ds_get_ctx_by_id(mm, setcfg_req->commit_ds_id);
+ assert(dst_ds_ctx);
+
+ /* User should have write lock to change the DS */
+ if (!session->ds_locked[setcfg_req->ds_id]) {
+ fe_adapter_send_set_cfg_reply(session, setcfg_req->ds_id,
+ setcfg_req->req_id, false,
+ "Candidate DS is not locked",
+ setcfg_req->implicit_commit);
+ return 0;
+ }
+
+ if (session->cfg_txn_id == MGMTD_TXN_ID_NONE) {
+ /* as we have the lock no-one else should have a config txn */
+ assert(mgmt_config_txn_in_progress() == MGMTD_SESSION_ID_NONE);
+
+ /* Start a CONFIG Transaction (if not started already) */
+ session->cfg_txn_id = mgmt_create_txn(session->session_id,
+ MGMTD_TXN_TYPE_CONFIG);
+ if (session->cfg_txn_id == MGMTD_SESSION_ID_NONE) {
+ fe_adapter_send_set_cfg_reply(
+ session, setcfg_req->ds_id, setcfg_req->req_id,
+ false,
+ "Failed to create a Configuration session!",
+ setcfg_req->implicit_commit);
+ return 0;
+ }
+ txn_created = true;
+
+ MGMTD_FE_ADAPTER_DBG("Created new Config txn-id: %" PRIu64
+ " for session-id %" PRIu64,
+ session->cfg_txn_id, session->session_id);
+ } else {
+ MGMTD_FE_ADAPTER_DBG("Config txn-id: %" PRIu64
+ " for session-id: %" PRIu64
+ " already created",
+ session->cfg_txn_id, session->session_id);
+
+ if (setcfg_req->implicit_commit) {
+ /*
+ * In this scenario need to skip cleanup of the txn,
+ * so setting implicit commit to false.
+ */
+ fe_adapter_send_set_cfg_reply(
+ session, setcfg_req->ds_id, setcfg_req->req_id,
+ false,
+ "A Configuration transaction is already in progress!",
+ false);
+ return 0;
+ }
+ }
+
+ /* Create the SETConfig request under the transaction. */
+ if (mgmt_txn_send_set_config_req(session->cfg_txn_id, setcfg_req->req_id,
+ setcfg_req->ds_id, ds_ctx,
+ setcfg_req->data, setcfg_req->n_data,
+ setcfg_req->implicit_commit,
+ setcfg_req->commit_ds_id,
+ dst_ds_ctx) != 0) {
+ fe_adapter_send_set_cfg_reply(session, setcfg_req->ds_id,
+ setcfg_req->req_id, false,
+ "Request processing for SET-CONFIG failed!",
+ setcfg_req->implicit_commit);
+
+ /* delete transaction if we just created it */
+ if (txn_created)
+ mgmt_destroy_txn(&session->cfg_txn_id);
+ }
+
+ return 0;
+}
+
+static int mgmt_fe_session_handle_get_req_msg(struct mgmt_fe_session_ctx *session,
+ Mgmtd__FeGetReq *get_req)
+{
+ struct mgmt_ds_ctx *ds_ctx;
+ struct nb_config *cfg_root = NULL;
+ Mgmtd__DatastoreId ds_id = get_req->ds_id;
+ uint64_t req_id = get_req->req_id;
+ bool is_cfg = get_req->config;
+ bool ds_ok = true;
+
+ if (is_cfg && ds_id != MGMTD_DS_CANDIDATE && ds_id != MGMTD_DS_RUNNING)
+ ds_ok = false;
+ else if (!is_cfg && ds_id != MGMTD_DS_OPERATIONAL)
+ ds_ok = false;
+ if (!ds_ok) {
+ fe_adapter_send_get_reply(session, ds_id, req_id, false, NULL,
+ "get-req on unsupported datastore");
+ return 0;
+ }
+ ds_ctx = mgmt_ds_get_ctx_by_id(mm, ds_id);
+ assert(ds_ctx);
+
+ if (session->txn_id == MGMTD_TXN_ID_NONE) {
+ /*
+ * Start a SHOW Transaction (if not started already)
+ */
+ session->txn_id = mgmt_create_txn(session->session_id,
+ MGMTD_TXN_TYPE_SHOW);
+ if (session->txn_id == MGMTD_SESSION_ID_NONE) {
+ fe_adapter_send_get_reply(session, ds_id, req_id, false,
+ NULL,
+ "Failed to create a Show transaction!");
+ return -1;
+ }
+
+ MGMTD_FE_ADAPTER_DBG("Created new show txn-id: %" PRIu64
+ " for session-id: %" PRIu64,
+ session->txn_id, session->session_id);
+ } else {
+ fe_adapter_send_get_reply(session, ds_id, req_id, false, NULL,
+ "Request processing for GET failed!");
+ MGMTD_FE_ADAPTER_DBG("Transaction in progress txn-id: %" PRIu64
+ " for session-id: %" PRIu64,
+ session->txn_id, session->session_id);
+ return -1;
+ }
+
+ /*
+ * Get a copy of the datastore config root, avoids locking.
+ */
+ if (is_cfg)
+ cfg_root = nb_config_dup(mgmt_ds_get_nb_config(ds_ctx));
+
+ /*
+ * Create a GET request under the transaction.
+ */
+ if (mgmt_txn_send_get_req(session->txn_id, req_id, ds_id, cfg_root,
+ get_req->data, get_req->n_data)) {
+ fe_adapter_send_get_reply(session, ds_id, req_id, false, NULL,
+ "Request processing for GET failed!");
+
+ goto failed;
+ }
+
+ return 0;
+failed:
+ if (cfg_root)
+ nb_config_free(cfg_root);
+ /*
+ * Destroy the transaction created recently.
+ */
+ if (session->txn_id != MGMTD_TXN_ID_NONE)
+ mgmt_destroy_txn(&session->txn_id);
+
+ return -1;
+}
+
+
+static int mgmt_fe_session_handle_commit_config_req_msg(
+ struct mgmt_fe_session_ctx *session,
+ Mgmtd__FeCommitConfigReq *commcfg_req)
+{
+ struct mgmt_ds_ctx *src_ds_ctx, *dst_ds_ctx;
+
+ if (mm->perf_stats_en)
+ gettimeofday(&session->adapter->cmt_stats.last_start, NULL);
+ session->adapter->cmt_stats.commit_cnt++;
+
+ /* Validate source and dest DS */
+ if (commcfg_req->src_ds_id != MGMTD_DS_CANDIDATE ||
+ commcfg_req->dst_ds_id != MGMTD_DS_RUNNING) {
+ fe_adapter_send_commit_cfg_reply(
+ session, commcfg_req->src_ds_id, commcfg_req->dst_ds_id,
+ commcfg_req->req_id, MGMTD_INTERNAL_ERROR,
+ commcfg_req->validate_only,
+ "Source/Dest for commit must be candidate/running DS");
+ return 0;
+ }
+ src_ds_ctx = mgmt_ds_get_ctx_by_id(mm, commcfg_req->src_ds_id);
+ assert(src_ds_ctx);
+ dst_ds_ctx = mgmt_ds_get_ctx_by_id(mm, commcfg_req->dst_ds_id);
+ assert(dst_ds_ctx);
+
+ /* User should have lock on both source and dest DS */
+ if (!session->ds_locked[commcfg_req->dst_ds_id] ||
+ !session->ds_locked[commcfg_req->src_ds_id]) {
+ fe_adapter_send_commit_cfg_reply(
+ session, commcfg_req->src_ds_id, commcfg_req->dst_ds_id,
+ commcfg_req->req_id, MGMTD_DS_LOCK_FAILED,
+ commcfg_req->validate_only,
+ "Commit requires lock on candidate and/or running DS");
+ return 0;
+ }
+
+ if (session->cfg_txn_id == MGMTD_TXN_ID_NONE) {
+ /*
+ * Start a CONFIG Transaction (if not started already)
+ */
+ session->cfg_txn_id = mgmt_create_txn(session->session_id,
+ MGMTD_TXN_TYPE_CONFIG);
+ if (session->cfg_txn_id == MGMTD_SESSION_ID_NONE) {
+ fe_adapter_send_commit_cfg_reply(
+ session, commcfg_req->src_ds_id,
+ commcfg_req->dst_ds_id, commcfg_req->req_id,
+ MGMTD_INTERNAL_ERROR, commcfg_req->validate_only,
+ "Failed to create a Configuration session!");
+ return 0;
+ }
+ MGMTD_FE_ADAPTER_DBG("Created txn-id: %" PRIu64
+ " for session-id %" PRIu64
+ " for COMMIT-CFG-REQ",
+ session->cfg_txn_id, session->session_id);
+ }
+
+ /*
+ * Create COMMITConfig request under the transaction
+ */
+ if (mgmt_txn_send_commit_config_req(
+ session->cfg_txn_id, commcfg_req->req_id,
+ commcfg_req->src_ds_id, src_ds_ctx, commcfg_req->dst_ds_id,
+ dst_ds_ctx, commcfg_req->validate_only, commcfg_req->abort,
+ false) != 0) {
+ fe_adapter_send_commit_cfg_reply(
+ session, commcfg_req->src_ds_id, commcfg_req->dst_ds_id,
+ commcfg_req->req_id, MGMTD_INTERNAL_ERROR,
+ commcfg_req->validate_only,
+ "Request processing for COMMIT-CONFIG failed!");
+ return 0;
+ }
+
+ return 0;
+}
+
+static int
+mgmt_fe_adapter_handle_msg(struct mgmt_fe_client_adapter *adapter,
+ Mgmtd__FeMessage *fe_msg)
+{
+ struct mgmt_fe_session_ctx *session;
+
+ /*
+ * protobuf-c adds a max size enum with an internal, and changing by
+ * version, name; cast to an int to avoid unhandled enum warnings
+ */
+ switch ((int)fe_msg->message_case) {
+ case MGMTD__FE_MESSAGE__MESSAGE_REGISTER_REQ:
+ MGMTD_FE_ADAPTER_DBG("Got REGISTER_REQ from '%s'",
+ fe_msg->register_req->client_name);
+
+ if (strlen(fe_msg->register_req->client_name)) {
+ strlcpy(adapter->name,
+ fe_msg->register_req->client_name,
+ sizeof(adapter->name));
+ mgmt_fe_adapter_cleanup_old_conn(adapter);
+ }
+ break;
+ case MGMTD__FE_MESSAGE__MESSAGE_SESSION_REQ:
+ if (fe_msg->session_req->create
+ && fe_msg->session_req->id_case
+ == MGMTD__FE_SESSION_REQ__ID_CLIENT_CONN_ID) {
+ MGMTD_FE_ADAPTER_DBG(
+ "Got SESSION_REQ (create) for client-id %" PRIu64
+ " from '%s'",
+ fe_msg->session_req->client_conn_id,
+ adapter->name);
+
+ session = mgmt_fe_create_session(
+ adapter, fe_msg->session_req->client_conn_id);
+ fe_adapter_send_session_reply(adapter, session, true,
+ session ? true : false);
+ } else if (
+ !fe_msg->session_req->create
+ && fe_msg->session_req->id_case
+ == MGMTD__FE_SESSION_REQ__ID_SESSION_ID) {
+ MGMTD_FE_ADAPTER_DBG(
+ "Got SESSION_REQ (destroy) for session-id %" PRIu64
+ "from '%s'",
+ fe_msg->session_req->session_id, adapter->name);
+
+ session = mgmt_session_id2ctx(
+ fe_msg->session_req->session_id);
+ fe_adapter_send_session_reply(adapter, session, false,
+ true);
+ mgmt_fe_cleanup_session(&session);
+ }
+ break;
+ case MGMTD__FE_MESSAGE__MESSAGE_LOCKDS_REQ:
+ session = mgmt_session_id2ctx(
+ fe_msg->lockds_req->session_id);
+ MGMTD_FE_ADAPTER_DBG(
+ "Got LOCKDS_REQ (%sLOCK) for DS:%s for session-id %" PRIu64
+ " from '%s'",
+ fe_msg->lockds_req->lock ? "" : "UN",
+ mgmt_ds_id2name(fe_msg->lockds_req->ds_id),
+ fe_msg->lockds_req->session_id, adapter->name);
+ mgmt_fe_session_handle_lockds_req_msg(
+ session, fe_msg->lockds_req);
+ break;
+ case MGMTD__FE_MESSAGE__MESSAGE_SETCFG_REQ:
+ session = mgmt_session_id2ctx(
+ fe_msg->setcfg_req->session_id);
+ session->adapter->setcfg_stats.set_cfg_count++;
+ MGMTD_FE_ADAPTER_DBG(
+ "Got SETCFG_REQ (%d Xpaths, Implicit:%c) on DS:%s for session-id %" PRIu64
+ " from '%s'",
+ (int)fe_msg->setcfg_req->n_data,
+ fe_msg->setcfg_req->implicit_commit ? 'T' : 'F',
+ mgmt_ds_id2name(fe_msg->setcfg_req->ds_id),
+ fe_msg->setcfg_req->session_id, adapter->name);
+
+ mgmt_fe_session_handle_setcfg_req_msg(
+ session, fe_msg->setcfg_req);
+ break;
+ case MGMTD__FE_MESSAGE__MESSAGE_COMMCFG_REQ:
+ session = mgmt_session_id2ctx(
+ fe_msg->commcfg_req->session_id);
+ MGMTD_FE_ADAPTER_DBG(
+ "Got COMMCFG_REQ for src-DS:%s dst-DS:%s (Abort:%c) on session-id %" PRIu64
+ " from '%s'",
+ mgmt_ds_id2name(fe_msg->commcfg_req->src_ds_id),
+ mgmt_ds_id2name(fe_msg->commcfg_req->dst_ds_id),
+ fe_msg->commcfg_req->abort ? 'T' : 'F',
+ fe_msg->commcfg_req->session_id, adapter->name);
+ mgmt_fe_session_handle_commit_config_req_msg(
+ session, fe_msg->commcfg_req);
+ break;
+ case MGMTD__FE_MESSAGE__MESSAGE_GET_REQ:
+ session = mgmt_session_id2ctx(fe_msg->get_req->session_id);
+ MGMTD_FE_ADAPTER_DBG("Got GET_REQ (iscfg %d) for DS:%s (xpaths: %d) on session-id %" PRIu64
+ " from '%s'",
+ (int)fe_msg->get_req->config,
+ mgmt_ds_id2name(fe_msg->get_req->ds_id),
+ (int)fe_msg->get_req->n_data,
+ fe_msg->get_req->session_id, adapter->name);
+ mgmt_fe_session_handle_get_req_msg(session, fe_msg->get_req);
+ break;
+ case MGMTD__FE_MESSAGE__MESSAGE_NOTIFY_DATA_REQ:
+ case MGMTD__FE_MESSAGE__MESSAGE_REGNOTIFY_REQ:
+ MGMTD_FE_ADAPTER_ERR(
+ "Got unhandled message of type %u from '%s'",
+ fe_msg->message_case, adapter->name);
+ /*
+ * TODO: Add handling code in future.
+ */
+ break;
+ /*
+ * NOTE: The following messages are always sent from MGMTD to
+ * Frontend clients only and/or need not be handled on MGMTd.
+ */
+ case MGMTD__FE_MESSAGE__MESSAGE_SESSION_REPLY:
+ case MGMTD__FE_MESSAGE__MESSAGE_LOCKDS_REPLY:
+ case MGMTD__FE_MESSAGE__MESSAGE_SETCFG_REPLY:
+ case MGMTD__FE_MESSAGE__MESSAGE_COMMCFG_REPLY:
+ case MGMTD__FE_MESSAGE__MESSAGE_GET_REPLY:
+ case MGMTD__FE_MESSAGE__MESSAGE__NOT_SET:
+ default:
+ /*
+ * A 'default' case is being added contrary to the
+ * FRR code guidelines to take care of build
+ * failures on certain build systems (courtesy of
+ * the proto-c package).
+ */
+ break;
+ }
+
+ return 0;
+}
+
+static void mgmt_fe_adapter_process_msg(uint8_t version, uint8_t *data,
+ size_t len, struct msg_conn *conn)
+{
+ struct mgmt_fe_client_adapter *adapter = conn->user;
+ Mgmtd__FeMessage *fe_msg = mgmtd__fe_message__unpack(NULL, len, data);
+
+ if (!fe_msg) {
+ MGMTD_FE_ADAPTER_DBG(
+ "Failed to decode %zu bytes for adapter: %s", len,
+ adapter->name);
+ return;
+ }
+ MGMTD_FE_ADAPTER_DBG(
+ "Decoded %zu bytes of message: %u from adapter: %s", len,
+ fe_msg->message_case, adapter->name);
+ (void)mgmt_fe_adapter_handle_msg(adapter, fe_msg);
+ mgmtd__fe_message__free_unpacked(fe_msg, NULL);
+}
+
+void mgmt_fe_adapter_lock(struct mgmt_fe_client_adapter *adapter)
+{
+ adapter->refcount++;
+}
+
+extern void mgmt_fe_adapter_unlock(struct mgmt_fe_client_adapter **adapter)
+{
+ struct mgmt_fe_client_adapter *a = *adapter;
+ assert(a && a->refcount);
+
+ if (!--a->refcount) {
+ mgmt_fe_adapters_del(&mgmt_fe_adapters, a);
+ msg_server_conn_delete(a->conn);
+ XFREE(MTYPE_MGMTD_FE_ADPATER, a);
+ }
+ *adapter = NULL;
+}
+
+/*
+ * Initialize the FE adapter module
+ */
+void mgmt_fe_adapter_init(struct event_loop *tm)
+{
+ assert(!mgmt_loop);
+ mgmt_loop = tm;
+
+ mgmt_fe_adapters_init(&mgmt_fe_adapters);
+
+ assert(!mgmt_fe_sessions);
+ mgmt_fe_sessions =
+ hash_create(mgmt_fe_session_hash_key, mgmt_fe_session_hash_cmp,
+ "MGMT Frontend Sessions");
+
+ if (msg_server_init(&mgmt_fe_server, MGMTD_FE_SERVER_PATH, tm,
+ mgmt_fe_create_adapter, "frontend",
+ &mgmt_debug_fe)) {
+ zlog_err("cannot initialize frontend server");
+ exit(1);
+ }
+}
+
+static void mgmt_fe_abort_if_session(void *data)
+{
+ struct mgmt_fe_session_ctx *session = data;
+
+ MGMTD_FE_ADAPTER_ERR("found orphaned session id %" PRIu64
+ " client id %" PRIu64 " adapter %s",
+ session->session_id, session->client_id,
+ session->adapter ? session->adapter->name
+ : "NULL");
+ abort();
+}
+
+/*
+ * Destroy the FE adapter module
+ */
+void mgmt_fe_adapter_destroy(void)
+{
+ struct mgmt_fe_client_adapter *adapter;
+
+ msg_server_cleanup(&mgmt_fe_server);
+
+ /* Deleting the adapters will delete all the sessions */
+ FOREACH_ADAPTER_IN_LIST (adapter)
+ mgmt_fe_adapter_delete(adapter);
+
+ hash_clean_and_free(&mgmt_fe_sessions, mgmt_fe_abort_if_session);
+}
+
+/*
+ * The server accepted a new connection
+ */
+struct msg_conn *mgmt_fe_create_adapter(int conn_fd, union sockunion *from)
+{
+ struct mgmt_fe_client_adapter *adapter = NULL;
+
+ adapter = mgmt_fe_find_adapter_by_fd(conn_fd);
+ if (!adapter) {
+ adapter = XCALLOC(MTYPE_MGMTD_FE_ADPATER,
+ sizeof(struct mgmt_fe_client_adapter));
+ snprintf(adapter->name, sizeof(adapter->name), "Unknown-FD-%d",
+ conn_fd);
+
+ mgmt_fe_sessions_init(&adapter->fe_sessions);
+ mgmt_fe_adapter_lock(adapter);
+ mgmt_fe_adapters_add_tail(&mgmt_fe_adapters, adapter);
+
+ adapter->conn = msg_server_conn_create(
+ mgmt_loop, conn_fd, mgmt_fe_adapter_notify_disconnect,
+ mgmt_fe_adapter_process_msg, MGMTD_FE_MAX_NUM_MSG_PROC,
+ MGMTD_FE_MAX_NUM_MSG_WRITE, MGMTD_FE_MSG_MAX_LEN,
+ adapter, "FE-adapter");
+
+ adapter->setcfg_stats.min_tm = ULONG_MAX;
+ adapter->cmt_stats.min_tm = ULONG_MAX;
+ MGMTD_FE_ADAPTER_DBG("Added new MGMTD Frontend adapter '%s'",
+ adapter->name);
+ }
+ return adapter->conn;
+}
+
+int mgmt_fe_send_set_cfg_reply(uint64_t session_id, uint64_t txn_id,
+ Mgmtd__DatastoreId ds_id, uint64_t req_id,
+ enum mgmt_result result,
+ const char *error_if_any,
+ bool implicit_commit)
+{
+ struct mgmt_fe_session_ctx *session;
+
+ session = mgmt_session_id2ctx(session_id);
+ if (!session || session->cfg_txn_id != txn_id) {
+ if (session)
+ MGMTD_FE_ADAPTER_ERR(
+ "txn-id doesn't match, session txn-id is %" PRIu64
+ " current txnid: %" PRIu64,
+ session->cfg_txn_id, txn_id);
+ return -1;
+ }
+
+ return fe_adapter_send_set_cfg_reply(session, ds_id, req_id,
+ result == MGMTD_SUCCESS,
+ error_if_any, implicit_commit);
+}
+
+int mgmt_fe_send_commit_cfg_reply(uint64_t session_id, uint64_t txn_id,
+ Mgmtd__DatastoreId src_ds_id,
+ Mgmtd__DatastoreId dst_ds_id,
+ uint64_t req_id, bool validate_only,
+ enum mgmt_result result,
+ const char *error_if_any)
+{
+ struct mgmt_fe_session_ctx *session;
+
+ session = mgmt_session_id2ctx(session_id);
+ if (!session || session->cfg_txn_id != txn_id)
+ return -1;
+
+ return fe_adapter_send_commit_cfg_reply(session, src_ds_id, dst_ds_id,
+ req_id, result, validate_only,
+ error_if_any);
+}
+
+int mgmt_fe_send_get_reply(uint64_t session_id, uint64_t txn_id,
+ Mgmtd__DatastoreId ds_id, uint64_t req_id,
+ enum mgmt_result result,
+ Mgmtd__YangDataReply *data_resp,
+ const char *error_if_any)
+{
+ struct mgmt_fe_session_ctx *session;
+
+ session = mgmt_session_id2ctx(session_id);
+ if (!session || session->txn_id != txn_id)
+ return -1;
+
+ return fe_adapter_send_get_reply(session, ds_id, req_id,
+ result == MGMTD_SUCCESS, data_resp,
+ error_if_any);
+}
+
+struct mgmt_setcfg_stats *
+mgmt_fe_get_session_setcfg_stats(uint64_t session_id)
+{
+ struct mgmt_fe_session_ctx *session;
+
+ session = mgmt_session_id2ctx(session_id);
+ if (!session || !session->adapter)
+ return NULL;
+
+ return &session->adapter->setcfg_stats;
+}
+
+struct mgmt_commit_stats *
+mgmt_fe_get_session_commit_stats(uint64_t session_id)
+{
+ struct mgmt_fe_session_ctx *session;
+
+ session = mgmt_session_id2ctx(session_id);
+ if (!session || !session->adapter)
+ return NULL;
+
+ return &session->adapter->cmt_stats;
+}
+
+static void
+mgmt_fe_adapter_cmt_stats_write(struct vty *vty,
+ struct mgmt_fe_client_adapter *adapter)
+{
+ char buf[MGMT_LONG_TIME_MAX_LEN];
+
+ if (!mm->perf_stats_en)
+ return;
+
+ vty_out(vty, " Num-Commits: \t\t\t%lu\n",
+ adapter->cmt_stats.commit_cnt);
+ if (adapter->cmt_stats.commit_cnt > 0) {
+ if (mm->perf_stats_en)
+ vty_out(vty, " Max-Commit-Duration: \t\t%lu uSecs\n",
+ adapter->cmt_stats.max_tm);
+ vty_out(vty, " Max-Commit-Batch-Size: \t\t%lu\n",
+ adapter->cmt_stats.max_batch_cnt);
+ if (mm->perf_stats_en)
+ vty_out(vty, " Min-Commit-Duration: \t\t%lu uSecs\n",
+ adapter->cmt_stats.min_tm);
+ vty_out(vty, " Min-Commit-Batch-Size: \t\t%lu\n",
+ adapter->cmt_stats.min_batch_cnt);
+ if (mm->perf_stats_en)
+ vty_out(vty,
+ " Last-Commit-Duration: \t\t%lu uSecs\n",
+ adapter->cmt_stats.last_exec_tm);
+ vty_out(vty, " Last-Commit-Batch-Size: \t\t%lu\n",
+ adapter->cmt_stats.last_batch_cnt);
+ vty_out(vty, " Last-Commit-CfgData-Reqs: \t\t%lu\n",
+ adapter->cmt_stats.last_num_cfgdata_reqs);
+ vty_out(vty, " Last-Commit-CfgApply-Reqs: \t\t%lu\n",
+ adapter->cmt_stats.last_num_apply_reqs);
+ if (mm->perf_stats_en) {
+ vty_out(vty, " Last-Commit-Details:\n");
+ vty_out(vty, " Commit Start: \t\t\t%s\n",
+ mgmt_realtime_to_string(
+ &adapter->cmt_stats.last_start, buf,
+ sizeof(buf)));
+#ifdef MGMTD_LOCAL_VALIDATIONS_ENABLED
+ vty_out(vty, " Config-Validate Start: \t\t%s\n",
+ mgmt_realtime_to_string(
+ &adapter->cmt_stats.validate_start, buf,
+ sizeof(buf)));
+#endif
+ vty_out(vty, " Prep-Config Start: \t\t%s\n",
+ mgmt_realtime_to_string(
+ &adapter->cmt_stats.prep_cfg_start, buf,
+ sizeof(buf)));
+ vty_out(vty, " Txn-Create Start: \t\t%s\n",
+ mgmt_realtime_to_string(
+ &adapter->cmt_stats.txn_create_start,
+ buf, sizeof(buf)));
+ vty_out(vty,
+#ifdef MGMTD_LOCAL_VALIDATIONS_ENABLED
+ " Send-Config Start: \t\t%s\n",
+#else
+ " Send-Config-Validate Start: \t%s\n",
+#endif
+ mgmt_realtime_to_string(
+ &adapter->cmt_stats.send_cfg_start, buf,
+ sizeof(buf)));
+ vty_out(vty, " Apply-Config Start: \t\t%s\n",
+ mgmt_realtime_to_string(
+ &adapter->cmt_stats.apply_cfg_start,
+ buf, sizeof(buf)));
+ vty_out(vty, " Apply-Config End: \t\t%s\n",
+ mgmt_realtime_to_string(
+ &adapter->cmt_stats.apply_cfg_end, buf,
+ sizeof(buf)));
+ vty_out(vty, " Txn-Delete Start: \t\t%s\n",
+ mgmt_realtime_to_string(
+ &adapter->cmt_stats.txn_del_start, buf,
+ sizeof(buf)));
+ vty_out(vty, " Commit End: \t\t\t%s\n",
+ mgmt_realtime_to_string(
+ &adapter->cmt_stats.last_end, buf,
+ sizeof(buf)));
+ }
+ }
+}
+
+static void
+mgmt_fe_adapter_setcfg_stats_write(struct vty *vty,
+ struct mgmt_fe_client_adapter *adapter)
+{
+ char buf[MGMT_LONG_TIME_MAX_LEN];
+
+ if (!mm->perf_stats_en)
+ return;
+
+ vty_out(vty, " Num-Set-Cfg: \t\t\t%lu\n",
+ adapter->setcfg_stats.set_cfg_count);
+ if (mm->perf_stats_en && adapter->setcfg_stats.set_cfg_count > 0) {
+ vty_out(vty, " Max-Set-Cfg-Duration: \t\t%lu uSec\n",
+ adapter->setcfg_stats.max_tm);
+ vty_out(vty, " Min-Set-Cfg-Duration: \t\t%lu uSec\n",
+ adapter->setcfg_stats.min_tm);
+ vty_out(vty, " Avg-Set-Cfg-Duration: \t\t%lu uSec\n",
+ adapter->setcfg_stats.avg_tm);
+ vty_out(vty, " Last-Set-Cfg-Details:\n");
+ vty_out(vty, " Set-Cfg Start: \t\t\t%s\n",
+ mgmt_realtime_to_string(
+ &adapter->setcfg_stats.last_start, buf,
+ sizeof(buf)));
+ vty_out(vty, " Set-Cfg End: \t\t\t%s\n",
+ mgmt_realtime_to_string(&adapter->setcfg_stats.last_end,
+ buf, sizeof(buf)));
+ }
+}
+
+void mgmt_fe_adapter_status_write(struct vty *vty, bool detail)
+{
+ struct mgmt_fe_client_adapter *adapter;
+ struct mgmt_fe_session_ctx *session;
+ Mgmtd__DatastoreId ds_id;
+ bool locked = false;
+
+ vty_out(vty, "MGMTD Frontend Adpaters\n");
+
+ FOREACH_ADAPTER_IN_LIST (adapter) {
+ vty_out(vty, " Client: \t\t\t\t%s\n", adapter->name);
+ vty_out(vty, " Conn-FD: \t\t\t\t%d\n", adapter->conn->fd);
+ if (detail) {
+ mgmt_fe_adapter_setcfg_stats_write(vty, adapter);
+ mgmt_fe_adapter_cmt_stats_write(vty, adapter);
+ }
+ vty_out(vty, " Sessions\n");
+ FOREACH_SESSION_IN_LIST (adapter, session) {
+ vty_out(vty, " Session: \t\t\t\t%p\n", session);
+ vty_out(vty, " Client-Id: \t\t\t%" PRIu64 "\n",
+ session->client_id);
+ vty_out(vty, " Session-Id: \t\t\t%" PRIu64 "\n",
+ session->session_id);
+ vty_out(vty, " DS-Locks:\n");
+ FOREACH_MGMTD_DS_ID (ds_id) {
+ if (session->ds_locked[ds_id]) {
+ locked = true;
+ vty_out(vty, " %s\n",
+ mgmt_ds_id2name(ds_id));
+ }
+ }
+ if (!locked)
+ vty_out(vty, " None\n");
+ }
+ vty_out(vty, " Total-Sessions: \t\t\t%d\n",
+ (int)mgmt_fe_sessions_count(&adapter->fe_sessions));
+ vty_out(vty, " Msg-Recvd: \t\t\t\t%" PRIu64 "\n",
+ adapter->conn->mstate.nrxm);
+ vty_out(vty, " Bytes-Recvd: \t\t\t%" PRIu64 "\n",
+ adapter->conn->mstate.nrxb);
+ vty_out(vty, " Msg-Sent: \t\t\t\t%" PRIu64 "\n",
+ adapter->conn->mstate.ntxm);
+ vty_out(vty, " Bytes-Sent: \t\t\t%" PRIu64 "\n",
+ adapter->conn->mstate.ntxb);
+ }
+ vty_out(vty, " Total: %d\n",
+ (int)mgmt_fe_adapters_count(&mgmt_fe_adapters));
+}
+
+void mgmt_fe_adapter_perf_measurement(struct vty *vty, bool config)
+{
+ mm->perf_stats_en = config;
+}
+
+void mgmt_fe_adapter_reset_perf_stats(struct vty *vty)
+{
+ struct mgmt_fe_client_adapter *adapter;
+ struct mgmt_fe_session_ctx *session;
+
+ FOREACH_ADAPTER_IN_LIST (adapter) {
+ memset(&adapter->setcfg_stats, 0,
+ sizeof(adapter->setcfg_stats));
+ FOREACH_SESSION_IN_LIST (adapter, session) {
+ memset(&adapter->cmt_stats, 0,
+ sizeof(adapter->cmt_stats));
+ }
+ }
+}
diff --git a/mgmtd/mgmt_fe_adapter.h b/mgmtd/mgmt_fe_adapter.h
new file mode 100644
index 0000000..d2991ec
--- /dev/null
+++ b/mgmtd/mgmt_fe_adapter.h
@@ -0,0 +1,152 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MGMTD Frontend Client Connection Adapter
+ *
+ * Copyright (C) 2021 Vmware, Inc.
+ * Pushpasis Sarkar <spushpasis@vmware.com>
+ * Copyright (c) 2023, LabN Consulting, L.L.C.
+ */
+
+#ifndef _FRR_MGMTD_FE_ADAPTER_H_
+#define _FRR_MGMTD_FE_ADAPTER_H_
+
+#include "mgmt_fe_client.h"
+#include "mgmt_msg.h"
+#include "mgmtd/mgmt_defines.h"
+
+struct mgmt_fe_client_adapter;
+struct mgmt_master;
+
+struct mgmt_commit_stats {
+ struct timeval last_start;
+#ifdef MGMTD_LOCAL_VALIDATIONS_ENABLED
+ struct timeval validate_start;
+#endif
+ struct timeval prep_cfg_start;
+ struct timeval txn_create_start;
+ struct timeval send_cfg_start;
+ struct timeval apply_cfg_start;
+ struct timeval apply_cfg_end;
+ struct timeval txn_del_start;
+ struct timeval last_end;
+ unsigned long last_exec_tm;
+ unsigned long max_tm;
+ unsigned long min_tm;
+ unsigned long last_batch_cnt;
+ unsigned long last_num_cfgdata_reqs;
+ unsigned long last_num_apply_reqs;
+ unsigned long max_batch_cnt;
+ unsigned long min_batch_cnt;
+ unsigned long commit_cnt;
+};
+
+struct mgmt_setcfg_stats {
+ struct timeval last_start;
+ struct timeval last_end;
+ unsigned long last_exec_tm;
+ unsigned long max_tm;
+ unsigned long min_tm;
+ unsigned long avg_tm;
+ unsigned long set_cfg_count;
+};
+
+PREDECL_LIST(mgmt_fe_sessions);
+
+PREDECL_LIST(mgmt_fe_adapters);
+
+struct mgmt_fe_client_adapter {
+ struct msg_conn *conn;
+ char name[MGMTD_CLIENT_NAME_MAX_LEN];
+
+ /* List of sessions created and being maintained for this client. */
+ struct mgmt_fe_sessions_head fe_sessions;
+
+ int refcount;
+ struct mgmt_commit_stats cmt_stats;
+ struct mgmt_setcfg_stats setcfg_stats;
+
+ struct mgmt_fe_adapters_item list_linkage;
+};
+
+DECLARE_LIST(mgmt_fe_adapters, struct mgmt_fe_client_adapter, list_linkage);
+
+/* Initialise frontend adapter module */
+extern void mgmt_fe_adapter_init(struct event_loop *tm);
+
+/* Destroy frontend adapter module */
+extern void mgmt_fe_adapter_destroy(void);
+
+/* Acquire lock for frontend adapter */
+extern void mgmt_fe_adapter_lock(struct mgmt_fe_client_adapter *adapter);
+
+/* Remove lock from frontend adapter */
+extern void
+mgmt_fe_adapter_unlock(struct mgmt_fe_client_adapter **adapter);
+
+/* Create frontend adapter */
+extern struct msg_conn *mgmt_fe_create_adapter(int conn_fd,
+ union sockunion *su);
+
+/*
+ * Send set-config reply to the frontend client.
+ *
+ * session
+ * Unique session identifier.
+ *
+ * txn_id
+ * Unique transaction identifier.
+ *
+ * ds_id
+ * Datastore ID.
+ *
+ * req_id
+ * Config request ID.
+ *
+ * result
+ * Config request result (MGMT_*).
+ *
+ * error_if_any
+ * Buffer to store human-readable error message in case of error.
+ *
+ * implicit_commit
+ * TRUE if the commit is implicit, FALSE otherwise.
+ *
+ * Returns:
+ * 0 on success, -1 on failures.
+ */
+extern int mgmt_fe_send_set_cfg_reply(uint64_t session_id, uint64_t txn_id,
+ Mgmtd__DatastoreId ds_id,
+ uint64_t req_id,
+ enum mgmt_result result,
+ const char *error_if_any,
+ bool implcit_commit);
+
+/*
+ * Send commit-config reply to the frontend client.
+ */
+extern int mgmt_fe_send_commit_cfg_reply(
+ uint64_t session_id, uint64_t txn_id, Mgmtd__DatastoreId src_ds_id,
+ Mgmtd__DatastoreId dst_ds_id, uint64_t req_id, bool validate_only,
+ enum mgmt_result result, const char *error_if_any);
+
+/*
+ * Send get-config/get-data reply to the frontend client.
+ */
+extern int mgmt_fe_send_get_reply(uint64_t session_id, uint64_t txn_id,
+ Mgmtd__DatastoreId ds_id, uint64_t req_id,
+ enum mgmt_result result,
+ Mgmtd__YangDataReply *data_resp,
+ const char *error_if_any);
+
+/* Fetch frontend client session set-config stats */
+extern struct mgmt_setcfg_stats *
+mgmt_fe_get_session_setcfg_stats(uint64_t session_id);
+
+/* Fetch frontend client session commit stats */
+extern struct mgmt_commit_stats *
+mgmt_fe_get_session_commit_stats(uint64_t session_id);
+
+extern void mgmt_fe_adapter_status_write(struct vty *vty, bool detail);
+extern void mgmt_fe_adapter_perf_measurement(struct vty *vty, bool config);
+extern void mgmt_fe_adapter_reset_perf_stats(struct vty *vty);
+#endif /* _FRR_MGMTD_FE_ADAPTER_H_ */
diff --git a/mgmtd/mgmt_history.c b/mgmtd/mgmt_history.c
new file mode 100644
index 0000000..d406932
--- /dev/null
+++ b/mgmtd/mgmt_history.c
@@ -0,0 +1,379 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2021 Vmware, Inc.
+ * Pushpasis Sarkar <spushpasis@vmware.com>
+ * Copyright (c) 2023, LabN Consulting, L.L.C.
+ */
+
+#include <zebra.h>
+#include "md5.h"
+#include "frrevent.h"
+#include "xref.h"
+
+#include "mgmt_fe_client.h"
+#include "mgmtd/mgmt.h"
+#include "mgmtd/mgmt_ds.h"
+#include "mgmtd/mgmt_history.h"
+
+struct mgmt_cmt_info_t {
+ struct mgmt_cmt_infos_item cmts;
+
+ char cmtid_str[MGMT_SHORT_TIME_MAX_LEN];
+ char time_str[MGMT_LONG_TIME_MAX_LEN];
+ char cmt_json_file[PATH_MAX];
+};
+
+
+DECLARE_DLIST(mgmt_cmt_infos, struct mgmt_cmt_info_t, cmts);
+
+#define FOREACH_CMT_REC(mm, cmt_info) \
+ frr_each_safe (mgmt_cmt_infos, &mm->cmts, cmt_info)
+
+/*
+ * The only instance of VTY session that has triggered an ongoing
+ * config rollback operation.
+ */
+static struct vty *rollback_vty;
+
+static bool file_exists(const char *path)
+{
+ return !access(path, F_OK);
+}
+
+static void remove_file(const char *path)
+{
+ if (!file_exists(path))
+ return;
+ if (unlink(path))
+ zlog_err("Failed to remove commit history file %s: %s", path,
+ safe_strerror(errno));
+}
+
+static struct mgmt_cmt_info_t *mgmt_history_new_cmt_info(void)
+{
+ struct mgmt_cmt_info_t *new;
+ struct timespec tv;
+ struct tm tm;
+
+ new = XCALLOC(MTYPE_MGMTD_CMT_INFO, sizeof(struct mgmt_cmt_info_t));
+
+ clock_gettime(CLOCK_REALTIME, &tv);
+ localtime_r(&tv.tv_sec, &tm);
+
+ mgmt_time_to_string(&tv, true, new->time_str, sizeof(new->time_str));
+ mgmt_time_to_string(&tv, false, new->cmtid_str, sizeof(new->cmtid_str));
+ snprintf(new->cmt_json_file, sizeof(new->cmt_json_file),
+ MGMTD_COMMIT_FILE_PATH, new->cmtid_str);
+
+ return new;
+}
+
+static struct mgmt_cmt_info_t *mgmt_history_create_cmt_rec(void)
+{
+ struct mgmt_cmt_info_t *new = mgmt_history_new_cmt_info();
+ struct mgmt_cmt_info_t *cmt_info;
+ struct mgmt_cmt_info_t *last_cmt_info = NULL;
+
+ if (mgmt_cmt_infos_count(&mm->cmts) == MGMTD_MAX_COMMIT_LIST) {
+ FOREACH_CMT_REC (mm, cmt_info)
+ last_cmt_info = cmt_info;
+
+ if (last_cmt_info) {
+ remove_file(last_cmt_info->cmt_json_file);
+ mgmt_cmt_infos_del(&mm->cmts, last_cmt_info);
+ XFREE(MTYPE_MGMTD_CMT_INFO, last_cmt_info);
+ }
+ }
+
+ mgmt_cmt_infos_add_head(&mm->cmts, new);
+ return new;
+}
+
+static struct mgmt_cmt_info_t *
+mgmt_history_find_cmt_record(const char *cmtid_str)
+{
+ struct mgmt_cmt_info_t *cmt_info;
+
+ FOREACH_CMT_REC (mm, cmt_info) {
+ if (strcmp(cmt_info->cmtid_str, cmtid_str) == 0)
+ return cmt_info;
+ }
+
+ return NULL;
+}
+
+static bool mgmt_history_read_cmt_record_index(void)
+{
+ FILE *fp;
+ struct mgmt_cmt_info_t cmt_info;
+ struct mgmt_cmt_info_t *new;
+ int cnt = 0;
+
+ if (!file_exists(MGMTD_COMMIT_FILE_PATH))
+ return false;
+
+ fp = fopen(MGMTD_COMMIT_INDEX_FILE_NAME, "rb");
+ if (!fp) {
+ zlog_err("Failed to open commit history %s for reading: %s",
+ MGMTD_COMMIT_INDEX_FILE_NAME, safe_strerror(errno));
+ return false;
+ }
+
+ while ((fread(&cmt_info, sizeof(cmt_info), 1, fp)) > 0) {
+ if (cnt < MGMTD_MAX_COMMIT_LIST) {
+ if (!file_exists(cmt_info.cmt_json_file)) {
+ zlog_err("Commit in index, but file %s missing",
+ cmt_info.cmt_json_file);
+ continue;
+ }
+
+ new = XCALLOC(MTYPE_MGMTD_CMT_INFO,
+ sizeof(struct mgmt_cmt_info_t));
+ memcpy(new, &cmt_info, sizeof(struct mgmt_cmt_info_t));
+ mgmt_cmt_infos_add_tail(&mm->cmts, new);
+ } else {
+ zlog_warn(
+ "More records found in commit history file %s than expected",
+ MGMTD_COMMIT_INDEX_FILE_NAME);
+ fclose(fp);
+ return false;
+ }
+
+ cnt++;
+ }
+
+ fclose(fp);
+ return true;
+}
+
+static bool mgmt_history_dump_cmt_record_index(void)
+{
+ FILE *fp;
+ int ret = 0;
+ struct mgmt_cmt_info_t *cmt_info;
+ struct mgmt_cmt_info_t cmt_info_set[10];
+ int cnt = 0;
+
+ fp = fopen(MGMTD_COMMIT_INDEX_FILE_NAME, "wb");
+ if (!fp) {
+ zlog_err("Failed to open commit history %s for writing: %s",
+ MGMTD_COMMIT_INDEX_FILE_NAME, safe_strerror(errno));
+ return false;
+ }
+
+ FOREACH_CMT_REC (mm, cmt_info) {
+ memcpy(&cmt_info_set[cnt], cmt_info,
+ sizeof(struct mgmt_cmt_info_t));
+ cnt++;
+ }
+
+ if (!cnt) {
+ fclose(fp);
+ return false;
+ }
+
+ ret = fwrite(&cmt_info_set, sizeof(struct mgmt_cmt_info_t), cnt, fp);
+ fclose(fp);
+ if (ret != cnt) {
+ zlog_err("Failed to write full commit history, removing file");
+ remove_file(MGMTD_COMMIT_INDEX_FILE_NAME);
+ return false;
+ }
+ return true;
+}
+
+static int mgmt_history_rollback_to_cmt(struct vty *vty,
+ struct mgmt_cmt_info_t *cmt_info,
+ bool skip_file_load)
+{
+ struct mgmt_ds_ctx *src_ds_ctx;
+ struct mgmt_ds_ctx *dst_ds_ctx;
+ int ret = 0;
+
+ if (rollback_vty) {
+ vty_out(vty, "ERROR: Rollback already in progress!\n");
+ return -1;
+ }
+
+ src_ds_ctx = mgmt_ds_get_ctx_by_id(mm, MGMTD_DS_CANDIDATE);
+ dst_ds_ctx = mgmt_ds_get_ctx_by_id(mm, MGMTD_DS_RUNNING);
+ assert(src_ds_ctx);
+ assert(dst_ds_ctx);
+
+ ret = mgmt_ds_lock(src_ds_ctx, vty->mgmt_session_id);
+ if (ret != 0) {
+ vty_out(vty,
+ "Failed to lock the DS %u for rollback Reason: %s!\n",
+ MGMTD_DS_RUNNING, strerror(ret));
+ return -1;
+ }
+
+ ret = mgmt_ds_lock(dst_ds_ctx, vty->mgmt_session_id);
+ if (ret != 0) {
+ mgmt_ds_unlock(src_ds_ctx);
+ vty_out(vty,
+ "Failed to lock the DS %u for rollback Reason: %s!\n",
+ MGMTD_DS_RUNNING, strerror(ret));
+ return -1;
+ }
+
+ if (!skip_file_load) {
+ ret = mgmt_ds_load_config_from_file(
+ src_ds_ctx, cmt_info->cmt_json_file, false);
+ if (ret != 0) {
+ vty_out(vty,
+ "Error with parsing the file with error code %d\n",
+ ret);
+ goto failed_unlock;
+ }
+ }
+
+ /* Internally trigger a commit-request. */
+ ret = mgmt_txn_rollback_trigger_cfg_apply(src_ds_ctx, dst_ds_ctx);
+ if (ret != 0) {
+ vty_out(vty,
+ "Error with creating commit apply txn with error code %d\n",
+ ret);
+ goto failed_unlock;
+ }
+
+ mgmt_history_dump_cmt_record_index();
+
+ /*
+ * TODO: Cleanup: the generic TXN code currently checks for rollback
+ * and does the unlock when it completes.
+ */
+
+ /*
+ * Block the rollback command from returning till the rollback
+ * is completed. On rollback completion mgmt_history_rollback_complete()
+ * shall be called to resume the rollback command return to VTYSH.
+ */
+ vty->mgmt_req_pending_cmd = "ROLLBACK";
+ rollback_vty = vty;
+ return 0;
+
+failed_unlock:
+ mgmt_ds_unlock(src_ds_ctx);
+ mgmt_ds_unlock(dst_ds_ctx);
+ return ret;
+}
+
+void mgmt_history_rollback_complete(bool success)
+{
+ vty_mgmt_resume_response(rollback_vty, success);
+ rollback_vty = NULL;
+}
+
+int mgmt_history_rollback_by_id(struct vty *vty, const char *cmtid_str)
+{
+ int ret = 0;
+ struct mgmt_cmt_info_t *cmt_info;
+
+ if (!mgmt_cmt_infos_count(&mm->cmts) ||
+ !mgmt_history_find_cmt_record(cmtid_str)) {
+ vty_out(vty, "Invalid commit Id\n");
+ return -1;
+ }
+
+ FOREACH_CMT_REC (mm, cmt_info) {
+ if (strcmp(cmt_info->cmtid_str, cmtid_str) == 0) {
+ ret = mgmt_history_rollback_to_cmt(vty, cmt_info,
+ false);
+ return ret;
+ }
+
+ remove_file(cmt_info->cmt_json_file);
+ mgmt_cmt_infos_del(&mm->cmts, cmt_info);
+ XFREE(MTYPE_MGMTD_CMT_INFO, cmt_info);
+ }
+
+ return 0;
+}
+
+int mgmt_history_rollback_n(struct vty *vty, int num_cmts)
+{
+ int ret = 0;
+ int cnt = 0;
+ struct mgmt_cmt_info_t *cmt_info;
+ size_t cmts;
+
+ if (!num_cmts)
+ num_cmts = 1;
+
+ cmts = mgmt_cmt_infos_count(&mm->cmts);
+ if ((int)cmts < num_cmts) {
+ vty_out(vty,
+ "Number of commits found (%d) less than required to rollback\n",
+ (int)cmts);
+ return -1;
+ }
+
+ if ((int)cmts == 1 || (int)cmts == num_cmts) {
+ vty_out(vty,
+ "Number of commits found (%d), Rollback of last commit is not supported\n",
+ (int)cmts);
+ return -1;
+ }
+
+ FOREACH_CMT_REC (mm, cmt_info) {
+ if (cnt == num_cmts) {
+ ret = mgmt_history_rollback_to_cmt(vty, cmt_info,
+ false);
+ return ret;
+ }
+
+ cnt++;
+ remove_file(cmt_info->cmt_json_file);
+ mgmt_cmt_infos_del(&mm->cmts, cmt_info);
+ XFREE(MTYPE_MGMTD_CMT_INFO, cmt_info);
+ }
+
+ if (!mgmt_cmt_infos_count(&mm->cmts)) {
+ mgmt_ds_reset_candidate();
+ ret = mgmt_history_rollback_to_cmt(vty, cmt_info, true);
+ }
+
+ return ret;
+}
+
+void show_mgmt_cmt_history(struct vty *vty)
+{
+ struct mgmt_cmt_info_t *cmt_info;
+ int slno = 0;
+
+ vty_out(vty, "Last 10 commit history:\n");
+ vty_out(vty, "Slot Commit-ID Commit-Record-Time\n");
+ FOREACH_CMT_REC (mm, cmt_info) {
+ vty_out(vty, "%4d %23s %s\n", slno, cmt_info->cmtid_str,
+ cmt_info->time_str);
+ slno++;
+ }
+}
+
+void mgmt_history_new_record(struct mgmt_ds_ctx *ds_ctx)
+{
+ struct mgmt_cmt_info_t *cmt_info = mgmt_history_create_cmt_rec();
+
+ mgmt_ds_dump_ds_to_file(cmt_info->cmt_json_file, ds_ctx);
+ mgmt_history_dump_cmt_record_index();
+}
+
+void mgmt_history_init(void)
+{
+ /* Create commit record for previously stored commit-apply */
+ mgmt_cmt_infos_init(&mm->cmts);
+ mgmt_history_read_cmt_record_index();
+}
+
+void mgmt_history_destroy(void)
+{
+ struct mgmt_cmt_info_t *cmt_info;
+
+ FOREACH_CMT_REC(mm, cmt_info) {
+ mgmt_cmt_infos_del(&mm->cmts, cmt_info);
+ XFREE(MTYPE_MGMTD_CMT_INFO, cmt_info);
+ }
+
+ mgmt_cmt_infos_fini(&mm->cmts);
+}
diff --git a/mgmtd/mgmt_history.h b/mgmtd/mgmt_history.h
new file mode 100644
index 0000000..5d9b662
--- /dev/null
+++ b/mgmtd/mgmt_history.h
@@ -0,0 +1,97 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2021 Vmware, Inc.
+ * Pushpasis Sarkar <spushpasis@vmware.com>
+ * Copyright (c) 2023, LabN Consulting, L.L.C.
+ *
+ */
+#ifndef _FRR_MGMTD_HISTORY_H_
+#define _FRR_MGMTD_HISTORY_H_
+
+#include "vrf.h"
+
+PREDECL_DLIST(mgmt_cmt_infos);
+
+struct mgmt_ds_ctx;
+
+/*
+ * Rollback specific commit from commit history.
+ *
+ * vty
+ * VTY context.
+ *
+ * cmtid_str
+ * Specific commit id from commit history.
+ *
+ * Returns:
+ * 0 on success, -1 on failure.
+ */
+extern int mgmt_history_rollback_by_id(struct vty *vty, const char *cmtid_str);
+
+/*
+ * Rollback n commits from commit history.
+ *
+ * vty
+ * VTY context.
+ *
+ * num_cmts
+ * Number of commits to be rolled back.
+ *
+ * Returns:
+ * 0 on success, -1 on failure.
+ */
+extern int mgmt_history_rollback_n(struct vty *vty, int num_cmts);
+
+extern void mgmt_history_rollback_complete(bool success);
+
+/*
+ * Show mgmt commit history.
+ */
+extern void show_mgmt_cmt_history(struct vty *vty);
+
+extern void mgmt_history_new_record(struct mgmt_ds_ctx *ds_ctx);
+
+extern void mgmt_history_destroy(void);
+extern void mgmt_history_init(void);
+
+/*
+ * 012345678901234567890123456789
+ * 2023-12-31T12:12:12,012345678
+ * 20231231121212012345678
+ */
+#define MGMT_LONG_TIME_FMT "%Y-%m-%dT%H:%M:%S"
+#define MGMT_LONG_TIME_MAX_LEN 30
+#define MGMT_SHORT_TIME_FMT "%Y%m%d%H%M%S"
+#define MGMT_SHORT_TIME_MAX_LEN 24
+
+static inline const char *
+mgmt_time_to_string(struct timespec *tv, bool long_fmt, char *buffer, size_t sz)
+{
+ struct tm tm;
+ size_t n;
+
+ localtime_r(&tv->tv_sec, &tm);
+
+ if (long_fmt) {
+ n = strftime(buffer, sz, MGMT_LONG_TIME_FMT, &tm);
+ assert(n < sz);
+ snprintf(&buffer[n], sz - n, ",%09lu", tv->tv_nsec);
+ } else {
+ n = strftime(buffer, sz, MGMT_SHORT_TIME_FMT, &tm);
+ assert(n < sz);
+ snprintf(&buffer[n], sz - n, "%09lu", tv->tv_nsec);
+ }
+
+ return buffer;
+}
+
+static inline const char *mgmt_realtime_to_string(struct timeval *tv, char *buf,
+ size_t sz)
+{
+ struct timespec ts = {.tv_sec = tv->tv_sec,
+ .tv_nsec = tv->tv_usec * 1000};
+
+ return mgmt_time_to_string(&ts, true, buf, sz);
+}
+
+#endif /* _FRR_MGMTD_HISTORY_H_ */
diff --git a/mgmtd/mgmt_main.c b/mgmtd/mgmt_main.c
new file mode 100644
index 0000000..39362fa
--- /dev/null
+++ b/mgmtd/mgmt_main.c
@@ -0,0 +1,287 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Main routine of mgmt.
+ *
+ * Copyright (C) 2021 Vmware, Inc.
+ * Pushpasis Sarkar
+ */
+
+#include <zebra.h>
+#include "lib/version.h"
+#include "routemap.h"
+#include "filter.h"
+#include "libfrr.h"
+#include "frr_pthread.h"
+#include "mgmtd/mgmt.h"
+#include "mgmtd/mgmt_ds.h"
+#include "routing_nb.h"
+
+
+/* mgmt options, we use GNU getopt library. */
+static const struct option longopts[] = {
+ {"skip_runas", no_argument, NULL, 'S'},
+ {"no_zebra", no_argument, NULL, 'Z'},
+ {"socket_size", required_argument, NULL, 's'},
+ {0}};
+
+static void mgmt_exit(int);
+static void mgmt_vrf_terminate(void);
+
+/* privileges */
+static zebra_capabilities_t _caps_p[] = {ZCAP_BIND, ZCAP_NET_RAW,
+ ZCAP_NET_ADMIN, ZCAP_SYS_ADMIN};
+
+struct zebra_privs_t mgmt_privs = {
+#if defined(FRR_USER) && defined(FRR_GROUP)
+ .user = FRR_USER,
+ .group = FRR_GROUP,
+#endif
+#ifdef VTY_GROUP
+ .vty_group = VTY_GROUP,
+#endif
+ .caps_p = _caps_p,
+ .cap_num_p = array_size(_caps_p),
+ .cap_num_i = 0,
+};
+
+static struct frr_daemon_info mgmtd_di;
+char backup_config_file[256];
+
+/* SIGHUP handler. */
+static void sighup(void)
+{
+ zlog_info("SIGHUP received, ignoring");
+
+ return;
+
+ /*
+ * This is turned off for the moment. There is all
+ * sorts of config turned off by mgmt_terminate
+ * that is not setup properly again in mgmt_reset.
+ * I see no easy way to do this nor do I see that
+ * this is a desirable way to reload config
+ * given the yang work.
+ */
+ /* Terminate all thread. */
+ mgmt_terminate();
+
+ /*
+ * mgmt_reset();
+ */
+ zlog_info("MGMTD restarting!");
+
+ /*
+ * Reload config file.
+ * vty_read_config(NULL, mgmtd_di.config_file, config_default);
+ */
+ /* Try to return to normal operation. */
+}
+
+/* SIGINT handler. */
+static __attribute__((__noreturn__)) void sigint(void)
+{
+ zlog_notice("Terminating on signal");
+ assert(mm->terminating == false);
+ mm->terminating = true; /* global flag that shutting down */
+
+ mgmt_terminate();
+
+ mgmt_exit(0);
+
+ exit(0);
+}
+
+/* SIGUSR1 handler. */
+static void sigusr1(void)
+{
+ zlog_rotate();
+}
+
+/*
+ * Try to free up allocations we know about so that diagnostic tools such as
+ * valgrind are able to better illuminate leaks.
+ *
+ * Zebra route removal and protocol teardown are not meant to be done here.
+ * For example, "retain_mode" may be set.
+ */
+static __attribute__((__noreturn__)) void mgmt_exit(int status)
+{
+ /* it only makes sense for this to be called on a clean exit */
+ assert(status == 0);
+
+ frr_early_fini();
+
+ /* stop pthreads (if any) */
+ frr_pthread_stop_all();
+
+ mgmt_vrf_terminate();
+
+ frr_fini();
+ exit(status);
+}
+
+static struct frr_signal_t mgmt_signals[] = {
+ {
+ .signal = SIGHUP,
+ .handler = &sighup,
+ },
+ {
+ .signal = SIGUSR1,
+ .handler = &sigusr1,
+ },
+ {
+ .signal = SIGINT,
+ .handler = &sigint,
+ },
+ {
+ .signal = SIGTERM,
+ .handler = &sigint,
+ },
+};
+
+static int mgmt_vrf_new(struct vrf *vrf)
+{
+ zlog_debug("VRF Created: %s(%u)", vrf->name, vrf->vrf_id);
+
+ return 0;
+}
+
+static int mgmt_vrf_delete(struct vrf *vrf)
+{
+ zlog_debug("VRF Deletion: %s(%u)", vrf->name, vrf->vrf_id);
+
+ return 0;
+}
+
+static int mgmt_vrf_enable(struct vrf *vrf)
+{
+ zlog_debug("VRF Enable: %s(%u)", vrf->name, vrf->vrf_id);
+
+ return 0;
+}
+
+static int mgmt_vrf_disable(struct vrf *vrf)
+{
+ zlog_debug("VRF Disable: %s(%u)", vrf->name, vrf->vrf_id);
+
+ /* Note: This is a callback, the VRF will be deleted by the caller. */
+ return 0;
+}
+
+static int mgmt_vrf_config_write(struct vty *vty)
+{
+ return 0;
+}
+
+static void mgmt_vrf_init(void)
+{
+ vrf_init(mgmt_vrf_new, mgmt_vrf_enable, mgmt_vrf_disable,
+ mgmt_vrf_delete);
+ vrf_cmd_init(mgmt_vrf_config_write);
+}
+
+static void mgmt_vrf_terminate(void)
+{
+ vrf_terminate();
+}
+
+/*
+ * List of YANG modules to be loaded in the process context of
+ * MGMTd.
+ *
+ * NOTE: In future this will also include the YANG modules of
+ * all individual Backend clients.
+ */
+static const struct frr_yang_module_info *const mgmt_yang_modules[] = {
+ &frr_filter_info,
+ &frr_interface_info,
+ &frr_route_map_info,
+ &frr_routing_info,
+ &frr_vrf_info,
+/*
+ * YANG module info supported by backend clients get added here.
+ * NOTE: Always set .ignore_cbs true for to avoid validating
+ * backend northbound callbacks during loading.
+ */
+#ifdef HAVE_STATICD
+ &(struct frr_yang_module_info){.name = "frr-staticd",
+ .ignore_cbs = true},
+#endif
+};
+
+FRR_DAEMON_INFO(mgmtd, MGMTD, .vty_port = MGMTD_VTY_PORT,
+
+ .proghelp = "FRR Management Daemon.",
+
+ .signals = mgmt_signals, .n_signals = array_size(mgmt_signals),
+
+ .privs = &mgmt_privs, .yang_modules = mgmt_yang_modules,
+ .n_yang_modules = array_size(mgmt_yang_modules),
+
+ /* avoid libfrr trying to read our config file for us */
+ .flags = FRR_MANUAL_VTY_START);
+
+#define DEPRECATED_OPTIONS ""
+
+struct frr_daemon_info *mgmt_daemon_info = &mgmtd_di;
+
+/* Main routine of mgmt. Treatment of argument and start mgmt finite
+ * state machine is handled at here.
+ */
+int main(int argc, char **argv)
+{
+ int opt;
+ int buffer_size = MGMTD_SOCKET_BUF_SIZE;
+
+ frr_preinit(&mgmtd_di, argc, argv);
+ frr_opt_add(
+ "s:" DEPRECATED_OPTIONS, longopts,
+ " -s, --socket_size Set MGMTD peer socket send buffer size\n");
+
+ /* Command line argument treatment. */
+ while (1) {
+ opt = frr_getopt(argc, argv, 0);
+
+ if (opt && opt < 128 && strchr(DEPRECATED_OPTIONS, opt)) {
+ fprintf(stderr,
+ "The -%c option no longer exists.\nPlease refer to the manual.\n",
+ opt);
+ continue;
+ }
+
+ if (opt == EOF)
+ break;
+
+ switch (opt) {
+ case 0:
+ break;
+ case 's':
+ buffer_size = atoi(optarg);
+ break;
+ default:
+ frr_help_exit(1);
+ break;
+ }
+ }
+
+ /* MGMTD master init. */
+ mgmt_master_init(frr_init(), buffer_size);
+
+ /* VRF Initializations. */
+ mgmt_vrf_init();
+
+ /* MGMTD related initialization. */
+ mgmt_init();
+
+ snprintf(backup_config_file, sizeof(backup_config_file),
+ "%s/zebra.conf", frr_sysconfdir);
+ mgmtd_di.backup_config_file = backup_config_file;
+
+ /* this will queue a read configs event */
+ frr_config_fork();
+
+ frr_run(mm->master);
+
+ /* Not reached. */
+ return 0;
+}
diff --git a/mgmtd/mgmt_memory.c b/mgmtd/mgmt_memory.c
new file mode 100644
index 0000000..b2a0f0e
--- /dev/null
+++ b/mgmtd/mgmt_memory.c
@@ -0,0 +1,33 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * mgmt memory type definitions
+ *
+ * Copyright (C) 2021 Vmware, Inc.
+ * Pushpasis Sarkar <spushpasis@vmware.com>
+ */
+
+#include <zebra.h>
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include "mgmt_memory.h"
+
+/* this file is temporary in nature; definitions should be moved to the
+ * files they're used in
+ */
+
+DEFINE_MGROUP(MGMTD, "mgmt");
+DEFINE_MTYPE(MGMTD, MGMTD, "instance");
+DEFINE_MTYPE(MGMTD, MGMTD_XPATH, "xpath regex");
+DEFINE_MTYPE(MGMTD, MGMTD_BE_ADPATER, "backend adapter");
+DEFINE_MTYPE(MGMTD, MGMTD_FE_ADPATER, "frontend adapter");
+DEFINE_MTYPE(MGMTD, MGMTD_FE_SESSION, "frontend session");
+DEFINE_MTYPE(MGMTD, MGMTD_TXN, "txn");
+DEFINE_MTYPE(MGMTD, MGMTD_TXN_REQ, "txn request");
+DEFINE_MTYPE(MGMTD, MGMTD_TXN_SETCFG_REQ, "txn set-config requests");
+DEFINE_MTYPE(MGMTD, MGMTD_TXN_COMMCFG_REQ, "txn commit-config requests");
+DEFINE_MTYPE(MGMTD, MGMTD_TXN_GETDATA_REQ, "txn get-data requests");
+DEFINE_MTYPE(MGMTD, MGMTD_TXN_GETDATA_REPLY, "txn get-data replies");
+DEFINE_MTYPE(MGMTD, MGMTD_TXN_CFG_BATCH, "txn config batches");
+DEFINE_MTYPE(MGMTD, MGMTD_CMT_INFO, "commit info");
diff --git a/mgmtd/mgmt_memory.h b/mgmtd/mgmt_memory.h
new file mode 100644
index 0000000..06518e3
--- /dev/null
+++ b/mgmtd/mgmt_memory.h
@@ -0,0 +1,29 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * mgmt memory type declarations
+ *
+ * Copyright (C) 2021 Vmware, Inc.
+ * Pushpasis Sarkar <spushpasis@vmware.com>
+ */
+
+#ifndef _FRR_MGMTD_MEMORY_H
+#define _FRR_MGMTD_MEMORY_H
+
+#include "memory.h"
+
+DECLARE_MGROUP(MGMTD);
+DECLARE_MTYPE(MGMTD);
+DECLARE_MTYPE(MGMTD_XPATH);
+DECLARE_MTYPE(MGMTD_BE_ADPATER);
+DECLARE_MTYPE(MGMTD_FE_ADPATER);
+DECLARE_MTYPE(MGMTD_FE_SESSION);
+DECLARE_MTYPE(MGMTD_TXN);
+DECLARE_MTYPE(MGMTD_TXN_REQ);
+DECLARE_MTYPE(MGMTD_TXN_SETCFG_REQ);
+DECLARE_MTYPE(MGMTD_TXN_COMMCFG_REQ);
+DECLARE_MTYPE(MGMTD_TXN_GETDATA_REQ);
+DECLARE_MTYPE(MGMTD_TXN_GETDATA_REPLY);
+DECLARE_MTYPE(MGMTD_TXN_CFG_BATCH);
+DECLARE_MTYPE(MGMTD_BE_ADAPTER_MSG_BUF);
+DECLARE_MTYPE(MGMTD_CMT_INFO);
+#endif /* _FRR_MGMTD_MEMORY_H */
diff --git a/mgmtd/mgmt_txn.c b/mgmtd/mgmt_txn.c
new file mode 100644
index 0000000..452f9c8
--- /dev/null
+++ b/mgmtd/mgmt_txn.c
@@ -0,0 +1,2644 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MGMTD Transactions
+ *
+ * Copyright (C) 2021 Vmware, Inc.
+ * Pushpasis Sarkar <spushpasis@vmware.com>
+ */
+
+#include <zebra.h>
+#include "hash.h"
+#include "jhash.h"
+#include "libfrr.h"
+#include "mgmtd/mgmt.h"
+#include "mgmtd/mgmt_memory.h"
+#include "mgmtd/mgmt_txn.h"
+
+#define MGMTD_TXN_DBG(fmt, ...) \
+ DEBUGD(&mgmt_debug_txn, "TXN: %s: " fmt, __func__, ##__VA_ARGS__)
+#define MGMTD_TXN_ERR(fmt, ...) \
+ zlog_err("%s: ERROR: " fmt, __func__, ##__VA_ARGS__)
+
+#define MGMTD_TXN_LOCK(txn) mgmt_txn_lock(txn, __FILE__, __LINE__)
+#define MGMTD_TXN_UNLOCK(txn) mgmt_txn_unlock(txn, __FILE__, __LINE__)
+
+enum mgmt_txn_event {
+ MGMTD_TXN_PROC_SETCFG = 1,
+ MGMTD_TXN_PROC_COMMITCFG,
+ MGMTD_TXN_PROC_GETCFG,
+ MGMTD_TXN_PROC_GETDATA,
+ MGMTD_TXN_COMMITCFG_TIMEOUT,
+ MGMTD_TXN_CLEANUP
+};
+
+PREDECL_LIST(mgmt_txn_reqs);
+
+struct mgmt_set_cfg_req {
+ Mgmtd__DatastoreId ds_id;
+ struct mgmt_ds_ctx *ds_ctx;
+ struct nb_cfg_change cfg_changes[MGMTD_MAX_CFG_CHANGES_IN_BATCH];
+ uint16_t num_cfg_changes;
+ bool implicit_commit;
+ Mgmtd__DatastoreId dst_ds_id;
+ struct mgmt_ds_ctx *dst_ds_ctx;
+ struct mgmt_setcfg_stats *setcfg_stats;
+};
+
+enum mgmt_commit_phase {
+ MGMTD_COMMIT_PHASE_PREPARE_CFG = 0,
+ MGMTD_COMMIT_PHASE_TXN_CREATE,
+ MGMTD_COMMIT_PHASE_SEND_CFG,
+ MGMTD_COMMIT_PHASE_APPLY_CFG,
+ MGMTD_COMMIT_PHASE_TXN_DELETE,
+ MGMTD_COMMIT_PHASE_MAX
+};
+
+static inline const char *mgmt_commit_phase2str(enum mgmt_commit_phase cmt_phase)
+{
+ switch (cmt_phase) {
+ case MGMTD_COMMIT_PHASE_PREPARE_CFG:
+ return "PREP-CFG";
+ case MGMTD_COMMIT_PHASE_TXN_CREATE:
+ return "CREATE-TXN";
+ case MGMTD_COMMIT_PHASE_SEND_CFG:
+ return "SEND-CFG";
+ case MGMTD_COMMIT_PHASE_APPLY_CFG:
+ return "APPLY-CFG";
+ case MGMTD_COMMIT_PHASE_TXN_DELETE:
+ return "DELETE-TXN";
+ case MGMTD_COMMIT_PHASE_MAX:
+ return "Invalid/Unknown";
+ }
+
+ return "Invalid/Unknown";
+}
+
+PREDECL_LIST(mgmt_txn_batches);
+
+struct mgmt_txn_be_cfg_batch {
+ struct mgmt_txn_ctx *txn;
+ uint64_t batch_id;
+ enum mgmt_be_client_id be_id;
+ struct mgmt_be_client_adapter *be_adapter;
+ uint xp_subscr[MGMTD_MAX_CFG_CHANGES_IN_BATCH];
+ Mgmtd__YangCfgDataReq cfg_data[MGMTD_MAX_CFG_CHANGES_IN_BATCH];
+ Mgmtd__YangCfgDataReq *cfg_datap[MGMTD_MAX_CFG_CHANGES_IN_BATCH];
+ Mgmtd__YangData data[MGMTD_MAX_CFG_CHANGES_IN_BATCH];
+ Mgmtd__YangDataValue value[MGMTD_MAX_CFG_CHANGES_IN_BATCH];
+ size_t num_cfg_data;
+ int buf_space_left;
+ enum mgmt_commit_phase comm_phase;
+ struct mgmt_txn_batches_item list_linkage;
+};
+
+DECLARE_LIST(mgmt_txn_batches, struct mgmt_txn_be_cfg_batch, list_linkage);
+
+#define FOREACH_TXN_CFG_BATCH_IN_LIST(list, batch) \
+ frr_each_safe (mgmt_txn_batches, list, batch)
+
+struct mgmt_commit_cfg_req {
+ Mgmtd__DatastoreId src_ds_id;
+ struct mgmt_ds_ctx *src_ds_ctx;
+ Mgmtd__DatastoreId dst_ds_id;
+ struct mgmt_ds_ctx *dst_ds_ctx;
+ uint32_t nb_txn_id;
+ uint8_t validate_only : 1;
+ uint8_t abort : 1;
+ uint8_t implicit : 1;
+ uint8_t rollback : 1;
+
+ /* Track commit phases */
+ enum mgmt_commit_phase curr_phase;
+ enum mgmt_commit_phase next_phase;
+
+ /*
+ * Set of config changes to commit. This is used only
+ * when changes are NOT to be determined by comparing
+ * candidate and running DSs. This is typically used
+ * for downloading all relevant configs for a new backend
+ * client that has recently come up and connected with
+ * MGMTD.
+ */
+ struct nb_config_cbs *cfg_chgs;
+
+ /*
+ * Details on all the Backend Clients associated with
+ * this commit.
+ */
+ struct mgmt_be_client_subscr_info subscr_info;
+
+ /*
+ * List of backend batches for this commit to be validated
+ * and applied at the backend.
+ *
+ * FIXME: Need to re-think this design for the case set of
+ * validators for a given YANG data item is different from
+ * the set of notifiers for the same. We may need to have
+ * separate list of batches for VALIDATE and APPLY.
+ */
+ struct mgmt_txn_batches_head curr_batches[MGMTD_BE_CLIENT_ID_MAX];
+ struct mgmt_txn_batches_head next_batches[MGMTD_BE_CLIENT_ID_MAX];
+ /*
+ * The last batch added for any backend client. This is always on
+ * 'curr_batches'
+ */
+ struct mgmt_txn_be_cfg_batch *last_be_cfg_batch[MGMTD_BE_CLIENT_ID_MAX];
+ struct hash *batches;
+ uint64_t next_batch_id;
+
+ struct mgmt_commit_stats *cmt_stats;
+};
+
+struct mgmt_get_data_reply {
+ /* Buffer space for preparing data reply */
+ int num_reply;
+ int last_batch;
+ Mgmtd__YangDataReply data_reply;
+ Mgmtd__YangData reply_data[MGMTD_MAX_NUM_DATA_REPLY_IN_BATCH];
+ Mgmtd__YangData *reply_datap[MGMTD_MAX_NUM_DATA_REPLY_IN_BATCH];
+ Mgmtd__YangDataValue reply_value[MGMTD_MAX_NUM_DATA_REPLY_IN_BATCH];
+ char *reply_xpathp[MGMTD_MAX_NUM_DATA_REPLY_IN_BATCH];
+};
+
+struct mgmt_get_data_req {
+ Mgmtd__DatastoreId ds_id;
+ struct nb_config *cfg_root;
+ char *xpaths[MGMTD_MAX_NUM_DATA_REQ_IN_BATCH];
+ int num_xpaths;
+
+ /*
+ * Buffer space for preparing reply.
+ * NOTE: Should only be malloc-ed on demand to reduce
+ * memory footprint. Freed up via mgmt_trx_req_free()
+ */
+ struct mgmt_get_data_reply *reply;
+
+ int total_reply;
+};
+
+struct mgmt_txn_req {
+ struct mgmt_txn_ctx *txn;
+ enum mgmt_txn_event req_event;
+ uint64_t req_id;
+ union {
+ struct mgmt_set_cfg_req *set_cfg;
+ struct mgmt_get_data_req *get_data;
+ struct mgmt_commit_cfg_req commit_cfg;
+ } req;
+
+ bool pending_be_proc;
+ struct mgmt_txn_reqs_item list_linkage;
+};
+
+DECLARE_LIST(mgmt_txn_reqs, struct mgmt_txn_req, list_linkage);
+
+#define FOREACH_TXN_REQ_IN_LIST(list, req) \
+ frr_each_safe (mgmt_txn_reqs, list, req)
+
+struct mgmt_txn_ctx {
+ uint64_t session_id; /* One transaction per client session */
+ uint64_t txn_id;
+ enum mgmt_txn_type type;
+
+ /* struct mgmt_master *mm; */
+
+ struct event *proc_set_cfg;
+ struct event *proc_comm_cfg;
+ struct event *proc_get_cfg;
+ struct event *proc_get_data;
+ struct event *comm_cfg_timeout;
+ struct event *clnup;
+
+ /* List of backend adapters involved in this transaction */
+ struct mgmt_txn_badapters_head be_adapters;
+
+ int refcount;
+
+ struct mgmt_txns_item list_linkage;
+
+ /*
+ * List of pending set-config requests for a given
+ * transaction/session. Just one list for requests
+ * not processed at all. There's no backend interaction
+ * involved.
+ */
+ struct mgmt_txn_reqs_head set_cfg_reqs;
+ /*
+ * List of pending get-config requests for a given
+ * transaction/session. Just one list for requests
+ * not processed at all. There's no backend interaction
+ * involved.
+ */
+ struct mgmt_txn_reqs_head get_cfg_reqs;
+ /*
+ * List of pending get-data requests for a given
+ * transaction/session Two lists, one for requests
+ * not processed at all, and one for requests that
+ * has been sent to backend for processing.
+ */
+ struct mgmt_txn_reqs_head get_data_reqs;
+ struct mgmt_txn_reqs_head pending_get_datas;
+ /*
+ * There will always be one commit-config allowed for a given
+ * transaction/session. No need to maintain lists for it.
+ */
+ struct mgmt_txn_req *commit_cfg_req;
+};
+
+DECLARE_LIST(mgmt_txns, struct mgmt_txn_ctx, list_linkage);
+
+#define FOREACH_TXN_IN_LIST(mm, txn) \
+ frr_each_safe (mgmt_txns, &(mm)->txn_list, (txn))
+
+static int mgmt_txn_send_commit_cfg_reply(struct mgmt_txn_ctx *txn,
+ enum mgmt_result result,
+ const char *error_if_any);
+
+static inline const char *mgmt_txn_commit_phase_str(struct mgmt_txn_ctx *txn,
+ bool curr)
+{
+ if (!txn->commit_cfg_req)
+ return "None";
+
+ return (mgmt_commit_phase2str(
+ curr ? txn->commit_cfg_req->req.commit_cfg.curr_phase
+ : txn->commit_cfg_req->req.commit_cfg.next_phase));
+}
+
+static void mgmt_txn_lock(struct mgmt_txn_ctx *txn, const char *file, int line);
+static void mgmt_txn_unlock(struct mgmt_txn_ctx **txn, const char *file,
+ int line);
+static int mgmt_txn_send_be_txn_delete(struct mgmt_txn_ctx *txn,
+ struct mgmt_be_client_adapter *adapter);
+
+static struct event_loop *mgmt_txn_tm;
+static struct mgmt_master *mgmt_txn_mm;
+
+static void mgmt_txn_register_event(struct mgmt_txn_ctx *txn,
+ enum mgmt_txn_event event);
+
+static int
+mgmt_move_be_commit_to_next_phase(struct mgmt_txn_ctx *txn,
+ struct mgmt_be_client_adapter *adapter);
+
+static struct mgmt_txn_be_cfg_batch *
+mgmt_txn_cfg_batch_alloc(struct mgmt_txn_ctx *txn, enum mgmt_be_client_id id,
+ struct mgmt_be_client_adapter *be_adapter)
+{
+ struct mgmt_txn_be_cfg_batch *batch;
+
+ batch = XCALLOC(MTYPE_MGMTD_TXN_CFG_BATCH,
+ sizeof(struct mgmt_txn_be_cfg_batch));
+ assert(batch);
+ batch->be_id = id;
+
+ batch->txn = txn;
+ MGMTD_TXN_LOCK(txn);
+ assert(txn->commit_cfg_req);
+ mgmt_txn_batches_add_tail(&txn->commit_cfg_req->req.commit_cfg
+ .curr_batches[id],
+ batch);
+ batch->be_adapter = be_adapter;
+ batch->buf_space_left = MGMTD_BE_CFGDATA_MAX_MSG_LEN;
+ if (be_adapter)
+ mgmt_be_adapter_lock(be_adapter);
+
+ txn->commit_cfg_req->req.commit_cfg.last_be_cfg_batch[id] = batch;
+ if (!txn->commit_cfg_req->req.commit_cfg.next_batch_id)
+ txn->commit_cfg_req->req.commit_cfg.next_batch_id++;
+ batch->batch_id = txn->commit_cfg_req->req.commit_cfg.next_batch_id++;
+ hash_get(txn->commit_cfg_req->req.commit_cfg.batches, batch,
+ hash_alloc_intern);
+
+ return batch;
+}
+
+static void mgmt_txn_cfg_batch_free(struct mgmt_txn_be_cfg_batch **batch)
+{
+ size_t indx;
+ struct mgmt_commit_cfg_req *cmtcfg_req;
+
+ MGMTD_TXN_DBG(" freeing batch-id: %" PRIu64 " txn-id %" PRIu64,
+ (*batch)->batch_id, (*batch)->txn->txn_id);
+
+ assert((*batch)->txn && (*batch)->txn->type == MGMTD_TXN_TYPE_CONFIG);
+
+ cmtcfg_req = &(*batch)->txn->commit_cfg_req->req.commit_cfg;
+ hash_release(cmtcfg_req->batches, *batch);
+ mgmt_txn_batches_del(&cmtcfg_req->curr_batches[(*batch)->be_id], *batch);
+ mgmt_txn_batches_del(&cmtcfg_req->next_batches[(*batch)->be_id], *batch);
+
+ if ((*batch)->be_adapter)
+ mgmt_be_adapter_unlock(&(*batch)->be_adapter);
+
+ for (indx = 0; indx < (*batch)->num_cfg_data; indx++) {
+ if ((*batch)->data[indx].xpath) {
+ free((*batch)->data[indx].xpath);
+ (*batch)->data[indx].xpath = NULL;
+ }
+ }
+
+ MGMTD_TXN_UNLOCK(&(*batch)->txn);
+
+ XFREE(MTYPE_MGMTD_TXN_CFG_BATCH, *batch);
+ *batch = NULL;
+}
+
+static unsigned int mgmt_txn_cfgbatch_hash_key(const void *data)
+{
+ const struct mgmt_txn_be_cfg_batch *batch = data;
+
+ return jhash2((uint32_t *)&batch->batch_id,
+ sizeof(batch->batch_id) / sizeof(uint32_t), 0);
+}
+
+static bool mgmt_txn_cfgbatch_hash_cmp(const void *d1, const void *d2)
+{
+ const struct mgmt_txn_be_cfg_batch *batch1 = d1;
+ const struct mgmt_txn_be_cfg_batch *batch2 = d2;
+
+ return (batch1->batch_id == batch2->batch_id);
+}
+
+static void mgmt_txn_cfgbatch_hash_free(void *data)
+{
+ struct mgmt_txn_be_cfg_batch *batch = data;
+
+ mgmt_txn_cfg_batch_free(&batch);
+}
+
+static inline struct mgmt_txn_be_cfg_batch *
+mgmt_txn_cfgbatch_id2ctx(struct mgmt_txn_ctx *txn, uint64_t batch_id)
+{
+ struct mgmt_txn_be_cfg_batch key = { 0 };
+ struct mgmt_txn_be_cfg_batch *batch;
+
+ if (!txn->commit_cfg_req)
+ return NULL;
+
+ key.batch_id = batch_id;
+ batch = hash_lookup(txn->commit_cfg_req->req.commit_cfg.batches, &key);
+
+ return batch;
+}
+
+static void mgmt_txn_cleanup_be_cfg_batches(struct mgmt_txn_ctx *txn,
+ enum mgmt_be_client_id id)
+{
+ struct mgmt_txn_be_cfg_batch *batch;
+ struct mgmt_txn_batches_head *list;
+
+ list = &txn->commit_cfg_req->req.commit_cfg.curr_batches[id];
+ FOREACH_TXN_CFG_BATCH_IN_LIST (list, batch)
+ mgmt_txn_cfg_batch_free(&batch);
+
+ mgmt_txn_batches_fini(list);
+
+ list = &txn->commit_cfg_req->req.commit_cfg.next_batches[id];
+ FOREACH_TXN_CFG_BATCH_IN_LIST (list, batch)
+ mgmt_txn_cfg_batch_free(&batch);
+
+ mgmt_txn_batches_fini(list);
+
+ txn->commit_cfg_req->req.commit_cfg.last_be_cfg_batch[id] = NULL;
+}
+
+static struct mgmt_txn_req *mgmt_txn_req_alloc(struct mgmt_txn_ctx *txn,
+ uint64_t req_id,
+ enum mgmt_txn_event req_event)
+{
+ struct mgmt_txn_req *txn_req;
+ enum mgmt_be_client_id id;
+
+ txn_req = XCALLOC(MTYPE_MGMTD_TXN_REQ, sizeof(struct mgmt_txn_req));
+ assert(txn_req);
+ txn_req->txn = txn;
+ txn_req->req_id = req_id;
+ txn_req->req_event = req_event;
+ txn_req->pending_be_proc = false;
+
+ switch (txn_req->req_event) {
+ case MGMTD_TXN_PROC_SETCFG:
+ txn_req->req.set_cfg = XCALLOC(MTYPE_MGMTD_TXN_SETCFG_REQ,
+ sizeof(struct mgmt_set_cfg_req));
+ assert(txn_req->req.set_cfg);
+ mgmt_txn_reqs_add_tail(&txn->set_cfg_reqs, txn_req);
+ MGMTD_TXN_DBG("Added a new SETCFG req-id: %" PRIu64
+ " txn-id: %" PRIu64 ", session-id: %" PRIu64,
+ txn_req->req_id, txn->txn_id, txn->session_id);
+ break;
+ case MGMTD_TXN_PROC_COMMITCFG:
+ txn->commit_cfg_req = txn_req;
+ MGMTD_TXN_DBG("Added a new COMMITCFG req-id: %" PRIu64
+ " txn-id: %" PRIu64 " session-id: %" PRIu64,
+ txn_req->req_id, txn->txn_id, txn->session_id);
+
+ FOREACH_MGMTD_BE_CLIENT_ID (id) {
+ mgmt_txn_batches_init(
+ &txn_req->req.commit_cfg.curr_batches[id]);
+ mgmt_txn_batches_init(
+ &txn_req->req.commit_cfg.next_batches[id]);
+ }
+
+ txn_req->req.commit_cfg.batches =
+ hash_create(mgmt_txn_cfgbatch_hash_key,
+ mgmt_txn_cfgbatch_hash_cmp,
+ "MGMT Config Batches");
+ break;
+ case MGMTD_TXN_PROC_GETCFG:
+ txn_req->req.get_data =
+ XCALLOC(MTYPE_MGMTD_TXN_GETDATA_REQ,
+ sizeof(struct mgmt_get_data_req));
+ assert(txn_req->req.get_data);
+ mgmt_txn_reqs_add_tail(&txn->get_cfg_reqs, txn_req);
+ MGMTD_TXN_DBG("Added a new GETCFG req-id: %" PRIu64
+ " txn-id: %" PRIu64 " session-id: %" PRIu64,
+ txn_req->req_id, txn->txn_id, txn->session_id);
+ break;
+ case MGMTD_TXN_PROC_GETDATA:
+ txn_req->req.get_data =
+ XCALLOC(MTYPE_MGMTD_TXN_GETDATA_REQ,
+ sizeof(struct mgmt_get_data_req));
+ assert(txn_req->req.get_data);
+ mgmt_txn_reqs_add_tail(&txn->get_data_reqs, txn_req);
+ MGMTD_TXN_DBG("Added a new GETDATA req-id: %" PRIu64
+ " txn-id: %" PRIu64 " session-id: %" PRIu64,
+ txn_req->req_id, txn->txn_id, txn->session_id);
+ break;
+ case MGMTD_TXN_COMMITCFG_TIMEOUT:
+ case MGMTD_TXN_CLEANUP:
+ break;
+ }
+
+ MGMTD_TXN_LOCK(txn);
+
+ return txn_req;
+}
+
+static void mgmt_txn_req_free(struct mgmt_txn_req **txn_req)
+{
+ int indx;
+ struct mgmt_txn_reqs_head *req_list = NULL;
+ struct mgmt_txn_reqs_head *pending_list = NULL;
+ enum mgmt_be_client_id id;
+ struct mgmt_be_client_adapter *adapter;
+ struct mgmt_commit_cfg_req *ccreq;
+ bool cleanup;
+
+ switch ((*txn_req)->req_event) {
+ case MGMTD_TXN_PROC_SETCFG:
+ for (indx = 0; indx < (*txn_req)->req.set_cfg->num_cfg_changes;
+ indx++) {
+ if ((*txn_req)->req.set_cfg->cfg_changes[indx].value) {
+ MGMTD_TXN_DBG("Freeing value for %s at %p ==> '%s'",
+ (*txn_req)
+ ->req.set_cfg
+ ->cfg_changes[indx]
+ .xpath,
+ (*txn_req)
+ ->req.set_cfg
+ ->cfg_changes[indx]
+ .value,
+ (*txn_req)
+ ->req.set_cfg
+ ->cfg_changes[indx]
+ .value);
+ free((void *)(*txn_req)
+ ->req.set_cfg->cfg_changes[indx]
+ .value);
+ }
+ }
+ req_list = &(*txn_req)->txn->set_cfg_reqs;
+ MGMTD_TXN_DBG("Deleting SETCFG req-id: %" PRIu64
+ " txn-id: %" PRIu64,
+ (*txn_req)->req_id, (*txn_req)->txn->txn_id);
+ XFREE(MTYPE_MGMTD_TXN_SETCFG_REQ, (*txn_req)->req.set_cfg);
+ break;
+ case MGMTD_TXN_PROC_COMMITCFG:
+ MGMTD_TXN_DBG("Deleting COMMITCFG req-id: %" PRIu64
+ " txn-id: %" PRIu64,
+ (*txn_req)->req_id, (*txn_req)->txn->txn_id);
+
+ ccreq = &(*txn_req)->req.commit_cfg;
+ cleanup = (ccreq->curr_phase >= MGMTD_COMMIT_PHASE_TXN_CREATE &&
+ ccreq->curr_phase < MGMTD_COMMIT_PHASE_TXN_DELETE);
+
+ FOREACH_MGMTD_BE_CLIENT_ID (id) {
+ /*
+ * Send TXN_DELETE to cleanup state for this
+ * transaction on backend
+ */
+
+ /*
+ * Get rid of the batches first so we don't end up doing
+ * anything more with them
+ */
+ mgmt_txn_cleanup_be_cfg_batches((*txn_req)->txn, id);
+ if (ccreq->batches) {
+ hash_clean(ccreq->batches,
+ mgmt_txn_cfgbatch_hash_free);
+ hash_free(ccreq->batches);
+ ccreq->batches = NULL;
+ }
+
+ /*
+ * If we were in the middle of the state machine then
+ * send a txn delete message
+ */
+ adapter = mgmt_be_get_adapter_by_id(id);
+ if (adapter && cleanup &&
+ ccreq->subscr_info.xpath_subscr[id])
+ mgmt_txn_send_be_txn_delete((*txn_req)->txn,
+ adapter);
+ }
+ break;
+ case MGMTD_TXN_PROC_GETCFG:
+ for (indx = 0; indx < (*txn_req)->req.get_data->num_xpaths;
+ indx++) {
+ if ((*txn_req)->req.get_data->xpaths[indx])
+ free((void *)(*txn_req)
+ ->req.get_data->xpaths[indx]);
+ }
+ req_list = &(*txn_req)->txn->get_cfg_reqs;
+ MGMTD_TXN_DBG("Deleting GETCFG req-id: %" PRIu64
+ " txn-id: %" PRIu64,
+ (*txn_req)->req_id, (*txn_req)->txn->txn_id);
+ if ((*txn_req)->req.get_data->reply)
+ XFREE(MTYPE_MGMTD_TXN_GETDATA_REPLY,
+ (*txn_req)->req.get_data->reply);
+
+ if ((*txn_req)->req.get_data->cfg_root)
+ nb_config_free((*txn_req)->req.get_data->cfg_root);
+
+ XFREE(MTYPE_MGMTD_TXN_GETDATA_REQ, (*txn_req)->req.get_data);
+ break;
+ case MGMTD_TXN_PROC_GETDATA:
+ for (indx = 0; indx < (*txn_req)->req.get_data->num_xpaths;
+ indx++) {
+ if ((*txn_req)->req.get_data->xpaths[indx])
+ free((void *)(*txn_req)
+ ->req.get_data->xpaths[indx]);
+ }
+ pending_list = &(*txn_req)->txn->pending_get_datas;
+ req_list = &(*txn_req)->txn->get_data_reqs;
+ MGMTD_TXN_DBG("Deleting GETDATA req-id: %" PRIu64
+ " txn-id: %" PRIu64,
+ (*txn_req)->req_id, (*txn_req)->txn->txn_id);
+ if ((*txn_req)->req.get_data->reply)
+ XFREE(MTYPE_MGMTD_TXN_GETDATA_REPLY,
+ (*txn_req)->req.get_data->reply);
+ XFREE(MTYPE_MGMTD_TXN_GETDATA_REQ, (*txn_req)->req.get_data);
+ break;
+ case MGMTD_TXN_COMMITCFG_TIMEOUT:
+ case MGMTD_TXN_CLEANUP:
+ break;
+ }
+
+ if ((*txn_req)->pending_be_proc && pending_list) {
+ mgmt_txn_reqs_del(pending_list, *txn_req);
+ MGMTD_TXN_DBG("Removed req-id: %" PRIu64
+ " from pending-list (left:%zu)",
+ (*txn_req)->req_id,
+ mgmt_txn_reqs_count(pending_list));
+ } else if (req_list) {
+ mgmt_txn_reqs_del(req_list, *txn_req);
+ MGMTD_TXN_DBG("Removed req-id: %" PRIu64
+ " from request-list (left:%zu)",
+ (*txn_req)->req_id, mgmt_txn_reqs_count(req_list));
+ }
+
+ (*txn_req)->pending_be_proc = false;
+ MGMTD_TXN_UNLOCK(&(*txn_req)->txn);
+ XFREE(MTYPE_MGMTD_TXN_REQ, (*txn_req));
+ *txn_req = NULL;
+}
+
+static void mgmt_txn_process_set_cfg(struct event *thread)
+{
+ struct mgmt_txn_ctx *txn;
+ struct mgmt_txn_req *txn_req;
+ struct mgmt_ds_ctx *ds_ctx;
+ struct nb_config *nb_config;
+ char err_buf[1024];
+ bool error;
+ int num_processed = 0;
+ size_t left;
+ struct mgmt_commit_stats *cmt_stats;
+ int ret = 0;
+
+ txn = (struct mgmt_txn_ctx *)EVENT_ARG(thread);
+ assert(txn);
+ cmt_stats = mgmt_fe_get_session_commit_stats(txn->session_id);
+
+ MGMTD_TXN_DBG("Processing %zu SET_CONFIG requests txn-id:%" PRIu64
+ " session-id: %" PRIu64,
+ mgmt_txn_reqs_count(&txn->set_cfg_reqs), txn->txn_id,
+ txn->session_id);
+
+ FOREACH_TXN_REQ_IN_LIST (&txn->set_cfg_reqs, txn_req) {
+ assert(txn_req->req_event == MGMTD_TXN_PROC_SETCFG);
+ ds_ctx = txn_req->req.set_cfg->ds_ctx;
+ if (!ds_ctx) {
+ mgmt_fe_send_set_cfg_reply(txn->session_id, txn->txn_id,
+ txn_req->req.set_cfg->ds_id,
+ txn_req->req_id,
+ MGMTD_INTERNAL_ERROR,
+ "No such datastore!",
+ txn_req->req.set_cfg
+ ->implicit_commit);
+ goto mgmt_txn_process_set_cfg_done;
+ }
+
+ nb_config = mgmt_ds_get_nb_config(ds_ctx);
+ if (!nb_config) {
+ mgmt_fe_send_set_cfg_reply(txn->session_id, txn->txn_id,
+ txn_req->req.set_cfg->ds_id,
+ txn_req->req_id,
+ MGMTD_INTERNAL_ERROR,
+ "Unable to retrieve DS Config Tree!",
+ txn_req->req.set_cfg
+ ->implicit_commit);
+ goto mgmt_txn_process_set_cfg_done;
+ }
+
+ error = false;
+ nb_candidate_edit_config_changes(nb_config,
+ txn_req->req.set_cfg->cfg_changes,
+ (size_t)txn_req->req.set_cfg
+ ->num_cfg_changes,
+ NULL, NULL, 0, err_buf,
+ sizeof(err_buf), &error);
+ if (error) {
+ mgmt_fe_send_set_cfg_reply(txn->session_id, txn->txn_id,
+ txn_req->req.set_cfg->ds_id,
+ txn_req->req_id,
+ MGMTD_INTERNAL_ERROR, err_buf,
+ txn_req->req.set_cfg
+ ->implicit_commit);
+ goto mgmt_txn_process_set_cfg_done;
+ }
+
+ if (txn_req->req.set_cfg->implicit_commit) {
+ assert(mgmt_txn_reqs_count(&txn->set_cfg_reqs) == 1);
+ assert(txn_req->req.set_cfg->dst_ds_ctx);
+
+ /* We expect the user to have locked the DST DS */
+ if (!mgmt_ds_is_locked(txn_req->req.set_cfg->dst_ds_ctx,
+ txn->session_id)) {
+ MGMTD_TXN_ERR("DS %u not locked for implicit commit txn-id: %" PRIu64
+ " session-id: %" PRIu64 " err: %s",
+ txn_req->req.set_cfg->dst_ds_id,
+ txn->txn_id, txn->session_id,
+ strerror(ret));
+ mgmt_txn_send_commit_cfg_reply(
+ txn, MGMTD_DS_LOCK_FAILED,
+ "running DS not locked for implicit commit");
+ goto mgmt_txn_process_set_cfg_done;
+ }
+
+ mgmt_txn_send_commit_config_req(txn->txn_id,
+ txn_req->req_id,
+ txn_req->req.set_cfg
+ ->ds_id,
+ txn_req->req.set_cfg
+ ->ds_ctx,
+ txn_req->req.set_cfg
+ ->dst_ds_id,
+ txn_req->req.set_cfg
+ ->dst_ds_ctx,
+ false, false, true);
+
+ if (mm->perf_stats_en)
+ gettimeofday(&cmt_stats->last_start, NULL);
+ cmt_stats->commit_cnt++;
+ } else if (mgmt_fe_send_set_cfg_reply(txn->session_id,
+ txn->txn_id,
+ txn_req->req.set_cfg->ds_id,
+ txn_req->req_id,
+ MGMTD_SUCCESS, NULL,
+ false) != 0) {
+ MGMTD_TXN_ERR("Failed to send SET_CONFIG_REPLY txn-id %" PRIu64
+ " session-id: %" PRIu64,
+ txn->txn_id, txn->session_id);
+ }
+
+mgmt_txn_process_set_cfg_done:
+
+ /*
+ * Note: The following will remove it from the list as well.
+ */
+ mgmt_txn_req_free(&txn_req);
+
+ num_processed++;
+ if (num_processed == MGMTD_TXN_MAX_NUM_SETCFG_PROC)
+ break;
+ }
+
+ left = mgmt_txn_reqs_count(&txn->set_cfg_reqs);
+ if (left) {
+ MGMTD_TXN_DBG("Processed maximum number of Set-Config requests (%d/%d/%d). Rescheduling for rest.",
+ num_processed, MGMTD_TXN_MAX_NUM_SETCFG_PROC,
+ (int)left);
+ mgmt_txn_register_event(txn, MGMTD_TXN_PROC_SETCFG);
+ }
+}
+
+static int mgmt_txn_send_commit_cfg_reply(struct mgmt_txn_ctx *txn,
+ enum mgmt_result result,
+ const char *error_if_any)
+{
+ bool success, create_cmt_info_rec;
+
+ if (!txn->commit_cfg_req)
+ return -1;
+
+ success = (result == MGMTD_SUCCESS || result == MGMTD_NO_CFG_CHANGES);
+
+ /* TODO: these replies should not be send if it's a rollback
+ * b/c right now that is special cased.. that special casing should be
+ * removed; however...
+ */
+ if (!txn->commit_cfg_req->req.commit_cfg.implicit && txn->session_id &&
+ !txn->commit_cfg_req->req.commit_cfg.rollback &&
+ mgmt_fe_send_commit_cfg_reply(txn->session_id, txn->txn_id,
+ txn->commit_cfg_req->req.commit_cfg
+ .src_ds_id,
+ txn->commit_cfg_req->req.commit_cfg
+ .dst_ds_id,
+ txn->commit_cfg_req->req_id,
+ txn->commit_cfg_req->req.commit_cfg
+ .validate_only,
+ result, error_if_any) != 0) {
+ MGMTD_TXN_ERR("Failed to send COMMIT-CONFIG-REPLY txn-id: %" PRIu64
+ " session-id: %" PRIu64,
+ txn->txn_id, txn->session_id);
+ }
+
+ if (txn->commit_cfg_req->req.commit_cfg.implicit && txn->session_id &&
+ !txn->commit_cfg_req->req.commit_cfg.rollback &&
+ mgmt_fe_send_set_cfg_reply(txn->session_id, txn->txn_id,
+ txn->commit_cfg_req->req.commit_cfg
+ .src_ds_id,
+ txn->commit_cfg_req->req_id,
+ success ? MGMTD_SUCCESS
+ : MGMTD_INTERNAL_ERROR,
+ error_if_any, true) != 0) {
+ MGMTD_TXN_ERR("Failed to send SET-CONFIG-REPLY txn-id: %" PRIu64
+ " session-id: %" PRIu64,
+ txn->txn_id, txn->session_id);
+ }
+
+ if (success) {
+ /* Stop the commit-timeout timer */
+ /* XXX why only on success? */
+ EVENT_OFF(txn->comm_cfg_timeout);
+
+ create_cmt_info_rec =
+ (result != MGMTD_NO_CFG_CHANGES &&
+ !txn->commit_cfg_req->req.commit_cfg.rollback);
+
+ /*
+ * Successful commit: Merge Src DS into Dst DS if and only if
+ * this was not a validate-only or abort request.
+ */
+ if ((txn->session_id &&
+ !txn->commit_cfg_req->req.commit_cfg.validate_only &&
+ !txn->commit_cfg_req->req.commit_cfg.abort) ||
+ txn->commit_cfg_req->req.commit_cfg.rollback) {
+ mgmt_ds_copy_dss(txn->commit_cfg_req->req.commit_cfg
+ .src_ds_ctx,
+ txn->commit_cfg_req->req.commit_cfg
+ .dst_ds_ctx,
+ create_cmt_info_rec);
+ }
+
+ /*
+ * Restore Src DS back to Dest DS only through a commit abort
+ * request.
+ */
+ if (txn->session_id && txn->commit_cfg_req->req.commit_cfg.abort)
+ mgmt_ds_copy_dss(txn->commit_cfg_req->req.commit_cfg
+ .dst_ds_ctx,
+ txn->commit_cfg_req->req.commit_cfg
+ .src_ds_ctx,
+ false);
+ } else {
+ /*
+ * The commit has failied. For implicit commit requests restore
+ * back the contents of the candidate DS.
+ */
+ if (txn->commit_cfg_req->req.commit_cfg.implicit)
+ mgmt_ds_copy_dss(txn->commit_cfg_req->req.commit_cfg
+ .dst_ds_ctx,
+ txn->commit_cfg_req->req.commit_cfg
+ .src_ds_ctx,
+ false);
+ }
+
+ if (txn->commit_cfg_req->req.commit_cfg.rollback) {
+ mgmt_ds_unlock(txn->commit_cfg_req->req.commit_cfg.src_ds_ctx);
+ mgmt_ds_unlock(txn->commit_cfg_req->req.commit_cfg.dst_ds_ctx);
+ /*
+ * Resume processing the rollback command.
+ *
+ * TODO: there's no good reason to special case rollback, the
+ * rollback boolean should be passed back to the FE client and it
+ * can do the right thing.
+ */
+ mgmt_history_rollback_complete(success);
+ }
+
+ txn->commit_cfg_req->req.commit_cfg.cmt_stats = NULL;
+ mgmt_txn_req_free(&txn->commit_cfg_req);
+
+ /*
+ * The CONFIG Transaction should be destroyed from Frontend-adapter.
+ * But in case the transaction is not triggered from a front-end session
+ * we need to cleanup by itself.
+ */
+ if (!txn->session_id)
+ mgmt_txn_register_event(txn, MGMTD_TXN_CLEANUP);
+
+ return 0;
+}
+
+static void
+mgmt_move_txn_cfg_batch_to_next(struct mgmt_commit_cfg_req *cmtcfg_req,
+ struct mgmt_txn_be_cfg_batch *batch,
+ struct mgmt_txn_batches_head *src_list,
+ struct mgmt_txn_batches_head *dst_list,
+ bool update_commit_phase,
+ enum mgmt_commit_phase to_phase)
+{
+ mgmt_txn_batches_del(src_list, batch);
+
+ if (update_commit_phase) {
+ MGMTD_TXN_DBG("Move txn-id %" PRIu64 " batch-id: %" PRIu64
+ " from '%s' --> '%s'",
+ batch->txn->txn_id, batch->batch_id,
+ mgmt_commit_phase2str(batch->comm_phase),
+ mgmt_txn_commit_phase_str(batch->txn, false));
+ batch->comm_phase = to_phase;
+ }
+
+ mgmt_txn_batches_add_tail(dst_list, batch);
+}
+
+static void mgmt_move_txn_cfg_batches(struct mgmt_txn_ctx *txn,
+ struct mgmt_commit_cfg_req *cmtcfg_req,
+ struct mgmt_txn_batches_head *src_list,
+ struct mgmt_txn_batches_head *dst_list,
+ bool update_commit_phase,
+ enum mgmt_commit_phase to_phase)
+{
+ struct mgmt_txn_be_cfg_batch *batch;
+
+ FOREACH_TXN_CFG_BATCH_IN_LIST (src_list, batch) {
+ mgmt_move_txn_cfg_batch_to_next(cmtcfg_req, batch, src_list,
+ dst_list, update_commit_phase,
+ to_phase);
+ }
+}
+
+static int
+mgmt_try_move_commit_to_next_phase(struct mgmt_txn_ctx *txn,
+ struct mgmt_commit_cfg_req *cmtcfg_req)
+{
+ struct mgmt_txn_batches_head *curr_list, *next_list;
+ enum mgmt_be_client_id id;
+
+ MGMTD_TXN_DBG("txn-id: %" PRIu64 ", Phase(current:'%s' next:'%s')",
+ txn->txn_id, mgmt_txn_commit_phase_str(txn, true),
+ mgmt_txn_commit_phase_str(txn, false));
+
+ /*
+ * Check if all clients has moved to next phase or not.
+ */
+ FOREACH_MGMTD_BE_CLIENT_ID (id) {
+ if (cmtcfg_req->subscr_info.xpath_subscr[id] &&
+ mgmt_txn_batches_count(&cmtcfg_req->curr_batches[id])) {
+ /*
+ * There's atleast once client who hasn't moved to
+ * next phase.
+ *
+ * TODO: Need to re-think this design for the case
+ * set of validators for a given YANG data item is
+ * different from the set of notifiers for the same.
+ */
+ return -1;
+ }
+ }
+
+ MGMTD_TXN_DBG("Move entire txn-id: %" PRIu64 " from '%s' to '%s'",
+ txn->txn_id, mgmt_txn_commit_phase_str(txn, true),
+ mgmt_txn_commit_phase_str(txn, false));
+
+ /*
+ * If we are here, it means all the clients has moved to next phase.
+ * So we can move the whole commit to next phase.
+ */
+ cmtcfg_req->curr_phase = cmtcfg_req->next_phase;
+ cmtcfg_req->next_phase++;
+ MGMTD_TXN_DBG("Move back all config batches for txn-id: %" PRIu64
+ " from next to current branch",
+ txn->txn_id);
+ FOREACH_MGMTD_BE_CLIENT_ID (id) {
+ curr_list = &cmtcfg_req->curr_batches[id];
+ next_list = &cmtcfg_req->next_batches[id];
+ mgmt_move_txn_cfg_batches(txn, cmtcfg_req, next_list, curr_list,
+ false, 0);
+ }
+
+ mgmt_txn_register_event(txn, MGMTD_TXN_PROC_COMMITCFG);
+
+ return 0;
+}
+
+static int
+mgmt_move_be_commit_to_next_phase(struct mgmt_txn_ctx *txn,
+ struct mgmt_be_client_adapter *adapter)
+{
+ struct mgmt_commit_cfg_req *cmtcfg_req;
+ struct mgmt_txn_batches_head *curr_list, *next_list;
+
+ if (txn->type != MGMTD_TXN_TYPE_CONFIG || !txn->commit_cfg_req)
+ return -1;
+
+ cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
+
+ MGMTD_TXN_DBG("Move txn-id: %" PRIu64
+ " for '%s' Phase(current: '%s' next:'%s')",
+ txn->txn_id, adapter->name,
+ mgmt_txn_commit_phase_str(txn, true),
+ mgmt_txn_commit_phase_str(txn, false));
+
+ MGMTD_TXN_DBG("Move all config batches for '%s' from current to next list",
+ adapter->name);
+ curr_list = &cmtcfg_req->curr_batches[adapter->id];
+ next_list = &cmtcfg_req->next_batches[adapter->id];
+ mgmt_move_txn_cfg_batches(txn, cmtcfg_req, curr_list, next_list, true,
+ cmtcfg_req->next_phase);
+
+ MGMTD_TXN_DBG("txn-id: %" PRIu64 ", Phase(current:'%s' next:'%s')",
+ txn->txn_id, mgmt_txn_commit_phase_str(txn, true),
+ mgmt_txn_commit_phase_str(txn, false));
+
+ /*
+ * Check if all clients has moved to next phase or not.
+ */
+ mgmt_try_move_commit_to_next_phase(txn, cmtcfg_req);
+
+ return 0;
+}
+
+static int mgmt_txn_create_config_batches(struct mgmt_txn_req *txn_req,
+ struct nb_config_cbs *changes)
+{
+ struct nb_config_cb *cb, *nxt;
+ struct nb_config_change *chg;
+ struct mgmt_txn_be_cfg_batch *batch;
+ struct mgmt_be_client_subscr_info subscr_info;
+ char *xpath = NULL, *value = NULL;
+ char err_buf[1024];
+ enum mgmt_be_client_id id;
+ struct mgmt_be_client_adapter *adapter;
+ struct mgmt_commit_cfg_req *cmtcfg_req;
+ bool found_validator;
+ int num_chgs = 0;
+ int xpath_len, value_len;
+
+ cmtcfg_req = &txn_req->req.commit_cfg;
+
+ RB_FOREACH_SAFE (cb, nb_config_cbs, changes, nxt) {
+ chg = (struct nb_config_change *)cb;
+
+ /*
+ * Could have directly pointed to xpath in nb_node.
+ * But dont want to mess with it now.
+ * xpath = chg->cb.nb_node->xpath;
+ */
+ xpath = lyd_path(chg->cb.dnode, LYD_PATH_STD, NULL, 0);
+ if (!xpath) {
+ (void)mgmt_txn_send_commit_cfg_reply(
+ txn_req->txn, MGMTD_INTERNAL_ERROR,
+ "Internal error! Could not get Xpath from Ds node!");
+ return -1;
+ }
+
+ value = (char *)lyd_get_value(chg->cb.dnode);
+ if (!value)
+ value = (char *)MGMTD_BE_CONTAINER_NODE_VAL;
+
+ MGMTD_TXN_DBG("XPATH: %s, Value: '%s'", xpath,
+ value ? value : "NIL");
+
+ mgmt_be_get_subscr_info_for_xpath(xpath, &subscr_info);
+
+ xpath_len = strlen(xpath) + 1;
+ value_len = strlen(value) + 1;
+ found_validator = false;
+ FOREACH_MGMTD_BE_CLIENT_ID (id) {
+ if (!(subscr_info.xpath_subscr[id] &
+ (MGMT_SUBSCR_VALIDATE_CFG |
+ MGMT_SUBSCR_NOTIFY_CFG)))
+ continue;
+
+ adapter = mgmt_be_get_adapter_by_id(id);
+ if (!adapter)
+ continue;
+
+ batch = cmtcfg_req->last_be_cfg_batch[id];
+ if (!batch ||
+ (batch->num_cfg_data ==
+ MGMTD_MAX_CFG_CHANGES_IN_BATCH) ||
+ (batch->buf_space_left < (xpath_len + value_len))) {
+ /* Allocate a new config batch */
+ batch = mgmt_txn_cfg_batch_alloc(txn_req->txn,
+ id, adapter);
+ }
+
+ batch->buf_space_left -= (xpath_len + value_len);
+ memcpy(&batch->xp_subscr[batch->num_cfg_data],
+ &subscr_info.xpath_subscr[id],
+ sizeof(batch->xp_subscr[0]));
+
+ mgmt_yang_cfg_data_req_init(
+ &batch->cfg_data[batch->num_cfg_data]);
+ batch->cfg_datap[batch->num_cfg_data] =
+ &batch->cfg_data[batch->num_cfg_data];
+
+ if (chg->cb.operation == NB_OP_DESTROY)
+ batch->cfg_data[batch->num_cfg_data].req_type =
+ MGMTD__CFG_DATA_REQ_TYPE__DELETE_DATA;
+ else
+ batch->cfg_data[batch->num_cfg_data].req_type =
+ MGMTD__CFG_DATA_REQ_TYPE__SET_DATA;
+
+ mgmt_yang_data_init(&batch->data[batch->num_cfg_data]);
+ batch->cfg_data[batch->num_cfg_data].data =
+ &batch->data[batch->num_cfg_data];
+ batch->data[batch->num_cfg_data].xpath = strdup(xpath);
+
+ mgmt_yang_data_value_init(
+ &batch->value[batch->num_cfg_data]);
+ batch->data[batch->num_cfg_data].value =
+ &batch->value[batch->num_cfg_data];
+ batch->value[batch->num_cfg_data].value_case =
+ MGMTD__YANG_DATA_VALUE__VALUE_ENCODED_STR_VAL;
+ batch->value[batch->num_cfg_data].encoded_str_val =
+ value;
+ value = NULL;
+
+ if (subscr_info.xpath_subscr[id] &
+ MGMT_SUBSCR_VALIDATE_CFG)
+ found_validator = true;
+
+ cmtcfg_req->subscr_info.xpath_subscr[id] |=
+ subscr_info.xpath_subscr[id];
+ MGMTD_TXN_DBG(" -- %s, batch-id: %" PRIu64 " item:%d",
+ adapter->name, batch->batch_id,
+ (int)batch->num_cfg_data);
+
+ batch->num_cfg_data++;
+ num_chgs++;
+ }
+
+ if (!found_validator) {
+ snprintf(err_buf, sizeof(err_buf),
+ "No validator module found for XPATH: '%s",
+ xpath);
+ MGMTD_TXN_ERR("***** %s", err_buf);
+ }
+
+ free(xpath);
+ }
+
+ cmtcfg_req->cmt_stats->last_batch_cnt = num_chgs;
+ if (!num_chgs) {
+ (void)mgmt_txn_send_commit_cfg_reply(txn_req->txn,
+ MGMTD_NO_CFG_CHANGES,
+ "No changes found to commit!");
+ return -1;
+ }
+
+ cmtcfg_req->next_phase = MGMTD_COMMIT_PHASE_TXN_CREATE;
+ return 0;
+}
+
+static int mgmt_txn_prepare_config(struct mgmt_txn_ctx *txn)
+{
+ struct nb_context nb_ctx;
+ struct nb_config *nb_config;
+ struct nb_config_cbs changes;
+ struct nb_config_cbs *cfg_chgs = NULL;
+ int ret;
+ bool del_cfg_chgs = false;
+
+ ret = 0;
+ memset(&nb_ctx, 0, sizeof(nb_ctx));
+ memset(&changes, 0, sizeof(changes));
+ if (txn->commit_cfg_req->req.commit_cfg.cfg_chgs) {
+ cfg_chgs = txn->commit_cfg_req->req.commit_cfg.cfg_chgs;
+ del_cfg_chgs = true;
+ goto mgmt_txn_prep_config_validation_done;
+ }
+
+ if (txn->commit_cfg_req->req.commit_cfg.src_ds_id != MGMTD_DS_CANDIDATE) {
+ (void)mgmt_txn_send_commit_cfg_reply(
+ txn, MGMTD_INVALID_PARAM,
+ "Source DS cannot be any other than CANDIDATE!");
+ ret = -1;
+ goto mgmt_txn_prepare_config_done;
+ }
+
+ if (txn->commit_cfg_req->req.commit_cfg.dst_ds_id != MGMTD_DS_RUNNING) {
+ (void)mgmt_txn_send_commit_cfg_reply(
+ txn, MGMTD_INVALID_PARAM,
+ "Destination DS cannot be any other than RUNNING!");
+ ret = -1;
+ goto mgmt_txn_prepare_config_done;
+ }
+
+ if (!txn->commit_cfg_req->req.commit_cfg.src_ds_ctx) {
+ (void)mgmt_txn_send_commit_cfg_reply(txn, MGMTD_INVALID_PARAM,
+ "No such source datastore!");
+ ret = -1;
+ goto mgmt_txn_prepare_config_done;
+ }
+
+ if (!txn->commit_cfg_req->req.commit_cfg.dst_ds_ctx) {
+ (void)mgmt_txn_send_commit_cfg_reply(txn, MGMTD_INVALID_PARAM,
+ "No such destination datastore!");
+ ret = -1;
+ goto mgmt_txn_prepare_config_done;
+ }
+
+ if (txn->commit_cfg_req->req.commit_cfg.abort) {
+ /*
+ * This is a commit abort request. Return back success.
+ * That should trigger a restore of Candidate datastore to
+ * Running.
+ */
+ (void)mgmt_txn_send_commit_cfg_reply(txn, MGMTD_SUCCESS, NULL);
+ goto mgmt_txn_prepare_config_done;
+ }
+
+ nb_config = mgmt_ds_get_nb_config(
+ txn->commit_cfg_req->req.commit_cfg.src_ds_ctx);
+ if (!nb_config) {
+ (void)mgmt_txn_send_commit_cfg_reply(
+ txn, MGMTD_INTERNAL_ERROR,
+ "Unable to retrieve Commit DS Config Tree!");
+ ret = -1;
+ goto mgmt_txn_prepare_config_done;
+ }
+
+ /*
+ * Check for diffs from scratch buffer. If found empty
+ * get the diff from Candidate DS itself.
+ */
+ cfg_chgs = &nb_config->cfg_chgs;
+ if (RB_EMPTY(nb_config_cbs, cfg_chgs)) {
+ /*
+ * This could be the case when the config is directly
+ * loaded onto the candidate DS from a file. Get the
+ * diff from a full comparison of the candidate and
+ * running DSs.
+ */
+ nb_config_diff(mgmt_ds_get_nb_config(
+ txn->commit_cfg_req->req.commit_cfg
+ .dst_ds_ctx),
+ nb_config, &changes);
+ cfg_chgs = &changes;
+ del_cfg_chgs = true;
+ }
+
+ if (RB_EMPTY(nb_config_cbs, cfg_chgs)) {
+ /*
+ * This means there's no changes to commit whatsoever
+ * is the source of the changes in config.
+ */
+ (void)mgmt_txn_send_commit_cfg_reply(txn, MGMTD_NO_CFG_CHANGES,
+ "No changes found to be committed!");
+ ret = -1;
+ goto mgmt_txn_prepare_config_done;
+ }
+
+#ifdef MGMTD_LOCAL_VALIDATIONS_ENABLED
+ if (mm->perf_stats_en)
+ gettimeofday(&txn->commit_cfg_req->req.commit_cfg.cmt_stats
+ ->validate_start,
+ NULL);
+ /*
+ * Validate YANG contents of the source DS and get the diff
+ * between source and destination DS contents.
+ */
+ char err_buf[1024] = { 0 };
+ nb_ctx.client = NB_CLIENT_MGMTD_SERVER;
+ nb_ctx.user = (void *)txn;
+
+ ret = nb_candidate_validate_yang(nb_config, true, err_buf,
+ sizeof(err_buf) - 1);
+ if (ret != NB_OK) {
+ if (strncmp(err_buf, " ", strlen(err_buf)) == 0)
+ strlcpy(err_buf, "Validation failed", sizeof(err_buf));
+ (void)mgmt_txn_send_commit_cfg_reply(txn, MGMTD_INVALID_PARAM,
+ err_buf);
+ ret = -1;
+ goto mgmt_txn_prepare_config_done;
+ }
+ /*
+ * Perform application level validations locally on the MGMTD
+ * process by calling application specific validation routines
+ * loaded onto MGMTD process using libraries.
+ */
+ ret = nb_candidate_validate_code(&nb_ctx, nb_config, &changes, err_buf,
+ sizeof(err_buf) - 1);
+ if (ret != NB_OK) {
+ if (strncmp(err_buf, " ", strlen(err_buf)) == 0)
+ strlcpy(err_buf, "Validation failed", sizeof(err_buf));
+ (void)mgmt_txn_send_commit_cfg_reply(txn, MGMTD_INVALID_PARAM,
+ err_buf);
+ ret = -1;
+ goto mgmt_txn_prepare_config_done;
+ }
+
+ if (txn->commit_cfg_req->req.commit_cfg.validate_only) {
+ /*
+ * This was a validate-only COMMIT request return success.
+ */
+ (void)mgmt_txn_send_commit_cfg_reply(txn, MGMTD_SUCCESS, NULL);
+ goto mgmt_txn_prepare_config_done;
+ }
+#endif /* ifdef MGMTD_LOCAL_VALIDATIONS_ENABLED */
+
+mgmt_txn_prep_config_validation_done:
+
+ if (mm->perf_stats_en)
+ gettimeofday(&txn->commit_cfg_req->req.commit_cfg.cmt_stats
+ ->prep_cfg_start,
+ NULL);
+
+ /*
+ * Iterate over the diffs and create ordered batches of config
+ * commands to be validated.
+ */
+ ret = mgmt_txn_create_config_batches(txn->commit_cfg_req, cfg_chgs);
+ if (ret != 0) {
+ ret = -1;
+ goto mgmt_txn_prepare_config_done;
+ }
+
+ /* Move to the Transaction Create Phase */
+ txn->commit_cfg_req->req.commit_cfg.curr_phase =
+ MGMTD_COMMIT_PHASE_TXN_CREATE;
+ mgmt_txn_register_event(txn, MGMTD_TXN_PROC_COMMITCFG);
+
+ /*
+ * Start the COMMIT Timeout Timer to abort Txn if things get stuck at
+ * backend.
+ */
+ mgmt_txn_register_event(txn, MGMTD_TXN_COMMITCFG_TIMEOUT);
+mgmt_txn_prepare_config_done:
+
+ if (cfg_chgs && del_cfg_chgs)
+ nb_config_diff_del_changes(cfg_chgs);
+
+ return ret;
+}
+
+static int mgmt_txn_send_be_txn_create(struct mgmt_txn_ctx *txn)
+{
+ enum mgmt_be_client_id id;
+ struct mgmt_be_client_adapter *adapter;
+ struct mgmt_commit_cfg_req *cmtcfg_req;
+ struct mgmt_txn_be_cfg_batch *batch;
+
+ assert(txn->type == MGMTD_TXN_TYPE_CONFIG && txn->commit_cfg_req);
+
+ cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
+ FOREACH_MGMTD_BE_CLIENT_ID (id) {
+ if (cmtcfg_req->subscr_info.xpath_subscr[id]) {
+ adapter = mgmt_be_get_adapter_by_id(id);
+ if (mgmt_be_send_txn_req(adapter, txn->txn_id, true)) {
+ (void)mgmt_txn_send_commit_cfg_reply(
+ txn, MGMTD_INTERNAL_ERROR,
+ "Could not send TXN_CREATE to backend adapter");
+ return -1;
+ }
+
+ FOREACH_TXN_CFG_BATCH_IN_LIST (&txn->commit_cfg_req->req
+ .commit_cfg
+ .curr_batches[id],
+ batch)
+ batch->comm_phase =
+ MGMTD_COMMIT_PHASE_TXN_CREATE;
+ }
+ }
+
+ txn->commit_cfg_req->req.commit_cfg.next_phase =
+ MGMTD_COMMIT_PHASE_SEND_CFG;
+
+ /*
+ * Dont move the commit to next phase yet. Wait for the TXN_REPLY to
+ * come back.
+ */
+
+ MGMTD_TXN_DBG("txn-id: %" PRIu64 " session-id: %" PRIu64
+ " Phase(Current:'%s', Next: '%s')",
+ txn->txn_id, txn->session_id,
+ mgmt_txn_commit_phase_str(txn, true),
+ mgmt_txn_commit_phase_str(txn, false));
+
+ return 0;
+}
+
+static int mgmt_txn_send_be_cfg_data(struct mgmt_txn_ctx *txn,
+ struct mgmt_be_client_adapter *adapter)
+{
+ struct mgmt_commit_cfg_req *cmtcfg_req;
+ struct mgmt_txn_be_cfg_batch *batch;
+ struct mgmt_be_cfgreq cfg_req = { 0 };
+ size_t num_batches, indx;
+
+ assert(txn->type == MGMTD_TXN_TYPE_CONFIG && txn->commit_cfg_req);
+
+ cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
+ assert(cmtcfg_req->subscr_info.xpath_subscr[adapter->id]);
+
+ indx = 0;
+ num_batches =
+ mgmt_txn_batches_count(&cmtcfg_req->curr_batches[adapter->id]);
+ FOREACH_TXN_CFG_BATCH_IN_LIST (&cmtcfg_req->curr_batches[adapter->id],
+ batch) {
+ assert(cmtcfg_req->next_phase == MGMTD_COMMIT_PHASE_SEND_CFG);
+
+ cfg_req.cfgdata_reqs = batch->cfg_datap;
+ cfg_req.num_reqs = batch->num_cfg_data;
+ indx++;
+ if (mgmt_be_send_cfgdata_req(adapter, txn->txn_id,
+ batch->batch_id,
+ cfg_req.cfgdata_reqs,
+ cfg_req.num_reqs,
+ indx == num_batches)) {
+ (void)mgmt_txn_send_commit_cfg_reply(
+ txn, MGMTD_INTERNAL_ERROR,
+ "Internal Error! Could not send config data to backend!");
+ MGMTD_TXN_ERR("Could not send CFGDATA_CREATE txn-id: %" PRIu64
+ " batch-id: %" PRIu64 " to client '%s",
+ txn->txn_id, batch->batch_id,
+ adapter->name);
+ return -1;
+ }
+
+ cmtcfg_req->cmt_stats->last_num_cfgdata_reqs++;
+ mgmt_move_txn_cfg_batch_to_next(
+ cmtcfg_req, batch,
+ &cmtcfg_req->curr_batches[adapter->id],
+ &cmtcfg_req->next_batches[adapter->id], true,
+ MGMTD_COMMIT_PHASE_SEND_CFG);
+ }
+
+ /*
+ * This could be the last Backend Client to send CFGDATA_CREATE_REQ to.
+ * Try moving the commit to next phase.
+ */
+ mgmt_try_move_commit_to_next_phase(txn, cmtcfg_req);
+
+ return 0;
+}
+
+static int mgmt_txn_send_be_txn_delete(struct mgmt_txn_ctx *txn,
+ struct mgmt_be_client_adapter *adapter)
+{
+ struct mgmt_commit_cfg_req *cmtcfg_req =
+ &txn->commit_cfg_req->req.commit_cfg;
+
+ assert(txn->type == MGMTD_TXN_TYPE_CONFIG);
+ assert(!mgmt_txn_batches_count(&cmtcfg_req->curr_batches[adapter->id]));
+
+ if (!cmtcfg_req->subscr_info.xpath_subscr[adapter->id])
+ return 0;
+
+ return mgmt_be_send_txn_req(adapter, txn->txn_id, false);
+}
+
+static void mgmt_txn_cfg_commit_timedout(struct event *thread)
+{
+ struct mgmt_txn_ctx *txn;
+
+ txn = (struct mgmt_txn_ctx *)EVENT_ARG(thread);
+ assert(txn);
+
+ assert(txn->type == MGMTD_TXN_TYPE_CONFIG);
+
+ if (!txn->commit_cfg_req)
+ return;
+
+ MGMTD_TXN_ERR("Backend timeout txn-id: %" PRIu64 " aborting commit",
+ txn->txn_id);
+
+ /*
+ * Send a COMMIT_CONFIG_REPLY with failure.
+ * NOTE: The transaction cleanup will be triggered from Front-end
+ * adapter.
+ */
+ mgmt_txn_send_commit_cfg_reply(
+ txn, MGMTD_INTERNAL_ERROR,
+ "Operation on the backend timed-out. Aborting commit!");
+}
+
+/*
+ * Send CFG_APPLY_REQs to all the backend client.
+ *
+ * NOTE: This is always dispatched when all CFGDATA_CREATE_REQs
+ * for all backend clients has been generated. Please see
+ * mgmt_txn_register_event() and mgmt_txn_process_commit_cfg()
+ * for details.
+ */
+static int mgmt_txn_send_be_cfg_apply(struct mgmt_txn_ctx *txn)
+{
+ enum mgmt_be_client_id id;
+ struct mgmt_be_client_adapter *adapter;
+ struct mgmt_commit_cfg_req *cmtcfg_req;
+ struct mgmt_txn_batches_head *batch_list;
+ struct mgmt_txn_be_cfg_batch *batch;
+
+ assert(txn->type == MGMTD_TXN_TYPE_CONFIG && txn->commit_cfg_req);
+
+ cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
+ if (cmtcfg_req->validate_only) {
+ /*
+ * If this was a validate-only COMMIT request return success.
+ */
+ (void)mgmt_txn_send_commit_cfg_reply(txn, MGMTD_SUCCESS, NULL);
+ return 0;
+ }
+
+ FOREACH_MGMTD_BE_CLIENT_ID (id) {
+ if (cmtcfg_req->subscr_info.xpath_subscr[id] &
+ MGMT_SUBSCR_NOTIFY_CFG) {
+ adapter = mgmt_be_get_adapter_by_id(id);
+ if (!adapter)
+ return -1;
+
+ batch_list = &cmtcfg_req->curr_batches[id];
+ if (mgmt_be_send_cfgapply_req(adapter, txn->txn_id)) {
+ (void)mgmt_txn_send_commit_cfg_reply(
+ txn, MGMTD_INTERNAL_ERROR,
+ "Could not send CFG_APPLY_REQ to backend adapter");
+ return -1;
+ }
+ cmtcfg_req->cmt_stats->last_num_apply_reqs++;
+
+ UNSET_FLAG(adapter->flags,
+ MGMTD_BE_ADAPTER_FLAGS_CFG_SYNCED);
+
+ FOREACH_TXN_CFG_BATCH_IN_LIST (batch_list, batch)
+ batch->comm_phase = MGMTD_COMMIT_PHASE_APPLY_CFG;
+ }
+ }
+
+ txn->commit_cfg_req->req.commit_cfg.next_phase =
+ MGMTD_COMMIT_PHASE_TXN_DELETE;
+
+ /*
+ * Dont move the commit to next phase yet. Wait for all VALIDATE_REPLIES
+ * to come back.
+ */
+
+ return 0;
+}
+
+static void mgmt_txn_process_commit_cfg(struct event *thread)
+{
+ struct mgmt_txn_ctx *txn;
+ struct mgmt_commit_cfg_req *cmtcfg_req;
+
+ txn = (struct mgmt_txn_ctx *)EVENT_ARG(thread);
+ assert(txn);
+
+ MGMTD_TXN_DBG("Processing COMMIT_CONFIG for txn-id: %" PRIu64
+ " session-id: %" PRIu64 " Phase(Current:'%s', Next: '%s')",
+ txn->txn_id, txn->session_id,
+ mgmt_txn_commit_phase_str(txn, true),
+ mgmt_txn_commit_phase_str(txn, false));
+
+ assert(txn->commit_cfg_req);
+ cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
+ switch (cmtcfg_req->curr_phase) {
+ case MGMTD_COMMIT_PHASE_PREPARE_CFG:
+ mgmt_txn_prepare_config(txn);
+ break;
+ case MGMTD_COMMIT_PHASE_TXN_CREATE:
+ if (mm->perf_stats_en)
+ gettimeofday(&cmtcfg_req->cmt_stats->txn_create_start,
+ NULL);
+ /*
+ * Send TXN_CREATE_REQ to all Backend now.
+ */
+ mgmt_txn_send_be_txn_create(txn);
+ break;
+ case MGMTD_COMMIT_PHASE_SEND_CFG:
+ if (mm->perf_stats_en)
+ gettimeofday(&cmtcfg_req->cmt_stats->send_cfg_start,
+ NULL);
+ /*
+ * All CFGDATA_CREATE_REQ should have been sent to
+ * Backend by now.
+ */
+#ifndef MGMTD_LOCAL_VALIDATIONS_ENABLED
+ assert(cmtcfg_req->next_phase == MGMTD_COMMIT_PHASE_APPLY_CFG);
+ MGMTD_TXN_DBG("txn-id: %" PRIu64 " session-id: %" PRIu64
+ " trigger sending CFG_VALIDATE_REQ to all backend clients",
+ txn->txn_id, txn->session_id);
+#else /* ifndef MGMTD_LOCAL_VALIDATIONS_ENABLED */
+ assert(cmtcfg_req->next_phase == MGMTD_COMMIT_PHASE_APPLY_CFG);
+ MGMTD_TXN_DBG("txn-id: %" PRIu64 " session-id: %" PRIu64
+ " trigger sending CFG_APPLY_REQ to all backend clients",
+ txn->txn_id, txn->session_id);
+#endif /* ifndef MGMTD_LOCAL_VALIDATIONS_ENABLED */
+ break;
+ case MGMTD_COMMIT_PHASE_APPLY_CFG:
+ if (mm->perf_stats_en)
+ gettimeofday(&cmtcfg_req->cmt_stats->apply_cfg_start,
+ NULL);
+ /*
+ * We should have received successful CFG_VALIDATE_REPLY from
+ * all concerned Backend Clients by now. Send out the
+ * CFG_APPLY_REQs now.
+ */
+ mgmt_txn_send_be_cfg_apply(txn);
+ break;
+ case MGMTD_COMMIT_PHASE_TXN_DELETE:
+ if (mm->perf_stats_en)
+ gettimeofday(&cmtcfg_req->cmt_stats->txn_del_start,
+ NULL);
+ /*
+ * We would have sent TXN_DELETE_REQ to all backend by now.
+ * Send a successful CONFIG_COMMIT_REPLY back to front-end.
+ * NOTE: This should also trigger DS merge/unlock and Txn
+ * cleanup. Please see mgmt_fe_send_commit_cfg_reply() for
+ * more details.
+ */
+ EVENT_OFF(txn->comm_cfg_timeout);
+ mgmt_txn_send_commit_cfg_reply(txn, MGMTD_SUCCESS, NULL);
+ break;
+ case MGMTD_COMMIT_PHASE_MAX:
+ break;
+ }
+
+ MGMTD_TXN_DBG("txn-id:%" PRIu64 " session-id: %" PRIu64
+ " phase updated to (current:'%s', next: '%s')",
+ txn->txn_id, txn->session_id,
+ mgmt_txn_commit_phase_str(txn, true),
+ mgmt_txn_commit_phase_str(txn, false));
+}
+
+static void mgmt_init_get_data_reply(struct mgmt_get_data_reply *get_reply)
+{
+ size_t indx;
+
+ for (indx = 0; indx < array_size(get_reply->reply_data); indx++)
+ get_reply->reply_datap[indx] = &get_reply->reply_data[indx];
+}
+
+static void mgmt_reset_get_data_reply(struct mgmt_get_data_reply *get_reply)
+{
+ int indx;
+
+ for (indx = 0; indx < get_reply->num_reply; indx++) {
+ if (get_reply->reply_xpathp[indx]) {
+ free(get_reply->reply_xpathp[indx]);
+ get_reply->reply_xpathp[indx] = 0;
+ }
+ if (get_reply->reply_data[indx].xpath) {
+ zlog_debug("%s free xpath %p", __func__,
+ get_reply->reply_data[indx].xpath);
+ free(get_reply->reply_data[indx].xpath);
+ get_reply->reply_data[indx].xpath = 0;
+ }
+ }
+
+ get_reply->num_reply = 0;
+ memset(&get_reply->data_reply, 0, sizeof(get_reply->data_reply));
+ memset(&get_reply->reply_data, 0, sizeof(get_reply->reply_data));
+ memset(&get_reply->reply_datap, 0, sizeof(get_reply->reply_datap));
+
+ memset(&get_reply->reply_value, 0, sizeof(get_reply->reply_value));
+
+ mgmt_init_get_data_reply(get_reply);
+}
+
+static void mgmt_reset_get_data_reply_buf(struct mgmt_get_data_req *get_data)
+{
+ if (get_data->reply)
+ mgmt_reset_get_data_reply(get_data->reply);
+}
+
+static void mgmt_txn_send_getcfg_reply_data(struct mgmt_txn_req *txn_req,
+ struct mgmt_get_data_req *get_req)
+{
+ struct mgmt_get_data_reply *get_reply;
+ Mgmtd__YangDataReply *data_reply;
+
+ get_reply = get_req->reply;
+ if (!get_reply)
+ return;
+
+ data_reply = &get_reply->data_reply;
+ mgmt_yang_data_reply_init(data_reply);
+ data_reply->n_data = get_reply->num_reply;
+ data_reply->data = get_reply->reply_datap;
+ data_reply->next_indx = (!get_reply->last_batch ? get_req->total_reply
+ : -1);
+
+ MGMTD_TXN_DBG("Sending %zu Get-Config/Data replies next-index:%" PRId64,
+ data_reply->n_data, data_reply->next_indx);
+
+ switch (txn_req->req_event) {
+ case MGMTD_TXN_PROC_GETCFG:
+ if (mgmt_fe_send_get_reply(txn_req->txn->session_id,
+ txn_req->txn->txn_id, get_req->ds_id,
+ txn_req->req_id, MGMTD_SUCCESS,
+ data_reply, NULL) != 0) {
+ MGMTD_TXN_ERR("Failed to send GET-CONFIG-REPLY txn-id: %" PRIu64
+ " session-id: %" PRIu64
+ " req-id: %" PRIu64,
+ txn_req->txn->txn_id,
+ txn_req->txn->session_id, txn_req->req_id);
+ }
+ break;
+ case MGMTD_TXN_PROC_GETDATA:
+ if (mgmt_fe_send_get_reply(txn_req->txn->session_id,
+ txn_req->txn->txn_id, get_req->ds_id,
+ txn_req->req_id, MGMTD_SUCCESS,
+ data_reply, NULL) != 0) {
+ MGMTD_TXN_ERR("Failed to send GET-DATA-REPLY txn-id: %" PRIu64
+ " session-id: %" PRIu64
+ " req-id: %" PRIu64,
+ txn_req->txn->txn_id,
+ txn_req->txn->session_id, txn_req->req_id);
+ }
+ break;
+ case MGMTD_TXN_PROC_SETCFG:
+ case MGMTD_TXN_PROC_COMMITCFG:
+ case MGMTD_TXN_COMMITCFG_TIMEOUT:
+ case MGMTD_TXN_CLEANUP:
+ MGMTD_TXN_ERR("Invalid Txn-Req-Event %u", txn_req->req_event);
+ break;
+ }
+
+ /*
+ * Reset reply buffer for next reply.
+ */
+ mgmt_reset_get_data_reply_buf(get_req);
+}
+
+static void mgmt_txn_iter_and_send_get_cfg_reply(const char *xpath,
+ struct lyd_node *node,
+ struct nb_node *nb_node,
+ void *ctx)
+{
+ struct mgmt_txn_req *txn_req;
+ struct mgmt_get_data_req *get_req;
+ struct mgmt_get_data_reply *get_reply;
+ Mgmtd__YangData *data;
+ Mgmtd__YangDataValue *data_value;
+
+ txn_req = (struct mgmt_txn_req *)ctx;
+ if (!txn_req)
+ return;
+
+ if (!(node->schema->nodetype & LYD_NODE_TERM))
+ return;
+
+ assert(txn_req->req_event == MGMTD_TXN_PROC_GETCFG ||
+ txn_req->req_event == MGMTD_TXN_PROC_GETDATA);
+
+ get_req = txn_req->req.get_data;
+ assert(get_req);
+ get_reply = get_req->reply;
+ data = &get_reply->reply_data[get_reply->num_reply];
+ data_value = &get_reply->reply_value[get_reply->num_reply];
+
+ mgmt_yang_data_init(data);
+ data->xpath = strdup(xpath);
+ mgmt_yang_data_value_init(data_value);
+ data_value->value_case = MGMTD__YANG_DATA_VALUE__VALUE_ENCODED_STR_VAL;
+ data_value->encoded_str_val = (char *)lyd_get_value(node);
+ data->value = data_value;
+
+ get_reply->num_reply++;
+ get_req->total_reply++;
+ MGMTD_TXN_DBG(" [%d] XPATH: '%s', Value: '%s'", get_req->total_reply,
+ data->xpath, data_value->encoded_str_val);
+
+ if (get_reply->num_reply == MGMTD_MAX_NUM_DATA_REPLY_IN_BATCH)
+ mgmt_txn_send_getcfg_reply_data(txn_req, get_req);
+}
+
+static int mgmt_txn_get_config(struct mgmt_txn_ctx *txn,
+ struct mgmt_txn_req *txn_req,
+ struct nb_config *root)
+{
+ int indx;
+ struct mgmt_get_data_req *get_data;
+ struct mgmt_get_data_reply *get_reply;
+
+ get_data = txn_req->req.get_data;
+
+ if (!get_data->reply) {
+ get_data->reply = XCALLOC(MTYPE_MGMTD_TXN_GETDATA_REPLY,
+ sizeof(struct mgmt_get_data_reply));
+ if (!get_data->reply) {
+ mgmt_fe_send_get_reply(
+ txn->session_id, txn->txn_id, get_data->ds_id,
+ txn_req->req_id, MGMTD_INTERNAL_ERROR, NULL,
+ "Internal error: Unable to allocate reply buffers!");
+ goto mgmt_txn_get_config_failed;
+ }
+ }
+
+ /*
+ * Read data contents from the DS and respond back directly.
+ * No need to go to backend for getting data.
+ */
+ get_reply = get_data->reply;
+ for (indx = 0; indx < get_data->num_xpaths; indx++) {
+ MGMTD_TXN_DBG("Trying to get all data under '%s'",
+ get_data->xpaths[indx]);
+ mgmt_init_get_data_reply(get_reply);
+ /*
+ * mgmt_ds_iter_data works on path prefixes, but the user may
+ * want to also use an xpath regexp we need to add this
+ * functionality.
+ */
+ if (mgmt_ds_iter_data(get_data->ds_id, root,
+ get_data->xpaths[indx],
+ mgmt_txn_iter_and_send_get_cfg_reply,
+ (void *)txn_req) == -1) {
+ MGMTD_TXN_DBG("Invalid Xpath '%s",
+ get_data->xpaths[indx]);
+ mgmt_fe_send_get_reply(txn->session_id, txn->txn_id,
+ get_data->ds_id, txn_req->req_id,
+ MGMTD_INTERNAL_ERROR, NULL,
+ "Invalid xpath");
+ goto mgmt_txn_get_config_failed;
+ }
+ MGMTD_TXN_DBG("Got %d remaining data-replies for xpath '%s'",
+ get_reply->num_reply, get_data->xpaths[indx]);
+ get_reply->last_batch = true;
+ mgmt_txn_send_getcfg_reply_data(txn_req, get_data);
+ }
+
+mgmt_txn_get_config_failed:
+
+ /*
+ * Delete the txn request. It will also remove it from request
+ * list.
+ */
+ mgmt_txn_req_free(&txn_req);
+
+ return 0;
+}
+
+static void mgmt_txn_process_get_cfg(struct event *thread)
+{
+ struct mgmt_txn_ctx *txn;
+ struct mgmt_txn_req *txn_req;
+ struct nb_config *cfg_root;
+ int num_processed = 0;
+ bool error;
+
+ txn = (struct mgmt_txn_ctx *)EVENT_ARG(thread);
+ assert(txn);
+
+ MGMTD_TXN_DBG("Processing %zu GET_CONFIG requests txn-id: %" PRIu64
+ " session-id: %" PRIu64,
+ mgmt_txn_reqs_count(&txn->get_cfg_reqs), txn->txn_id,
+ txn->session_id);
+
+ FOREACH_TXN_REQ_IN_LIST (&txn->get_cfg_reqs, txn_req) {
+ error = false;
+ assert(txn_req->req_event == MGMTD_TXN_PROC_GETCFG);
+ cfg_root = txn_req->req.get_data->cfg_root;
+ assert(cfg_root);
+
+ if (mgmt_txn_get_config(txn, txn_req, cfg_root) != 0) {
+ MGMTD_TXN_ERR("Unable to retrieve config from DS %d txn-id: %" PRIu64
+ " session-id: %" PRIu64
+ " req-id: %" PRIu64,
+ txn_req->req.get_data->ds_id, txn->txn_id,
+ txn->session_id, txn_req->req_id);
+ error = true;
+ }
+
+ if (error) {
+ /*
+ * Delete the txn request.
+ * Note: The following will remove it from the list
+ * as well.
+ */
+ mgmt_txn_req_free(&txn_req);
+ }
+
+ /*
+ * Else the transaction would have been already deleted or
+ * moved to corresponding pending list. No need to delete it.
+ */
+ num_processed++;
+ if (num_processed == MGMTD_TXN_MAX_NUM_GETCFG_PROC)
+ break;
+ }
+
+ if (mgmt_txn_reqs_count(&txn->get_cfg_reqs)) {
+ MGMTD_TXN_DBG("Processed maximum number of Get-Config requests (%d/%d). Rescheduling for rest.",
+ num_processed, MGMTD_TXN_MAX_NUM_GETCFG_PROC);
+ mgmt_txn_register_event(txn, MGMTD_TXN_PROC_GETCFG);
+ }
+}
+
+static void mgmt_txn_process_get_data(struct event *thread)
+{
+ struct mgmt_txn_ctx *txn;
+ struct mgmt_txn_req *txn_req;
+ int num_processed = 0;
+
+ txn = (struct mgmt_txn_ctx *)EVENT_ARG(thread);
+ assert(txn);
+
+ MGMTD_TXN_DBG("Processing %zu GET_DATA requests txn-id: %" PRIu64
+ " session-id: %" PRIu64,
+ mgmt_txn_reqs_count(&txn->get_data_reqs), txn->txn_id,
+ txn->session_id);
+
+ FOREACH_TXN_REQ_IN_LIST (&txn->get_data_reqs, txn_req) {
+ assert(txn_req->req_event == MGMTD_TXN_PROC_GETDATA);
+
+ /*
+ * TODO: Trigger GET procedures for Backend
+ * For now return back error.
+ */
+ mgmt_fe_send_get_reply(txn->session_id, txn->txn_id,
+ txn_req->req.get_data->ds_id,
+ txn_req->req_id, MGMTD_INTERNAL_ERROR,
+ NULL, "GET-DATA is not supported yet!");
+ /*
+ * Delete the txn request.
+ * Note: The following will remove it from the list
+ * as well.
+ */
+ mgmt_txn_req_free(&txn_req);
+
+ /*
+ * Else the transaction would have been already deleted or
+ * moved to corresponding pending list. No need to delete it.
+ */
+ num_processed++;
+ if (num_processed == MGMTD_TXN_MAX_NUM_GETDATA_PROC)
+ break;
+ }
+
+ if (mgmt_txn_reqs_count(&txn->get_data_reqs)) {
+ MGMTD_TXN_DBG("Processed maximum number of Get-Data requests (%d/%d). Rescheduling for rest.",
+ num_processed, MGMTD_TXN_MAX_NUM_GETDATA_PROC);
+ mgmt_txn_register_event(txn, MGMTD_TXN_PROC_GETDATA);
+ }
+}
+
+static struct mgmt_txn_ctx *
+mgmt_fe_find_txn_by_session_id(struct mgmt_master *cm, uint64_t session_id,
+ enum mgmt_txn_type type)
+{
+ struct mgmt_txn_ctx *txn;
+
+ FOREACH_TXN_IN_LIST (cm, txn) {
+ if (txn->session_id == session_id && txn->type == type)
+ return txn;
+ }
+
+ return NULL;
+}
+
+static struct mgmt_txn_ctx *mgmt_txn_create_new(uint64_t session_id,
+ enum mgmt_txn_type type)
+{
+ struct mgmt_txn_ctx *txn = NULL;
+
+ /*
+ * For 'CONFIG' transaction check if one is already created
+ * or not.
+ */
+ if (type == MGMTD_TXN_TYPE_CONFIG && mgmt_txn_mm->cfg_txn) {
+ if (mgmt_config_txn_in_progress() == session_id)
+ txn = mgmt_txn_mm->cfg_txn;
+ goto mgmt_create_txn_done;
+ }
+
+ txn = mgmt_fe_find_txn_by_session_id(mgmt_txn_mm, session_id, type);
+ if (!txn) {
+ txn = XCALLOC(MTYPE_MGMTD_TXN, sizeof(struct mgmt_txn_ctx));
+ assert(txn);
+
+ txn->session_id = session_id;
+ txn->type = type;
+ mgmt_txns_add_tail(&mgmt_txn_mm->txn_list, txn);
+ mgmt_txn_reqs_init(&txn->set_cfg_reqs);
+ mgmt_txn_reqs_init(&txn->get_cfg_reqs);
+ mgmt_txn_reqs_init(&txn->get_data_reqs);
+ mgmt_txn_reqs_init(&txn->pending_get_datas);
+ txn->commit_cfg_req = NULL;
+ txn->refcount = 0;
+ if (!mgmt_txn_mm->next_txn_id)
+ mgmt_txn_mm->next_txn_id++;
+ txn->txn_id = mgmt_txn_mm->next_txn_id++;
+ hash_get(mgmt_txn_mm->txn_hash, txn, hash_alloc_intern);
+
+ MGMTD_TXN_DBG("Added new '%s' txn-id: %" PRIu64,
+ mgmt_txn_type2str(type), txn->txn_id);
+
+ if (type == MGMTD_TXN_TYPE_CONFIG)
+ mgmt_txn_mm->cfg_txn = txn;
+
+ MGMTD_TXN_LOCK(txn);
+ }
+
+mgmt_create_txn_done:
+ return txn;
+}
+
+static void mgmt_txn_delete(struct mgmt_txn_ctx **txn)
+{
+ MGMTD_TXN_UNLOCK(txn);
+}
+
+static unsigned int mgmt_txn_hash_key(const void *data)
+{
+ const struct mgmt_txn_ctx *txn = data;
+
+ return jhash2((uint32_t *)&txn->txn_id,
+ sizeof(txn->txn_id) / sizeof(uint32_t), 0);
+}
+
+static bool mgmt_txn_hash_cmp(const void *d1, const void *d2)
+{
+ const struct mgmt_txn_ctx *txn1 = d1;
+ const struct mgmt_txn_ctx *txn2 = d2;
+
+ return (txn1->txn_id == txn2->txn_id);
+}
+
+static void mgmt_txn_hash_free(void *data)
+{
+ struct mgmt_txn_ctx *txn = data;
+
+ mgmt_txn_delete(&txn);
+}
+
+static void mgmt_txn_hash_init(void)
+{
+ if (!mgmt_txn_mm || mgmt_txn_mm->txn_hash)
+ return;
+
+ mgmt_txn_mm->txn_hash = hash_create(mgmt_txn_hash_key, mgmt_txn_hash_cmp,
+ "MGMT Transactions");
+}
+
+static void mgmt_txn_hash_destroy(void)
+{
+ if (!mgmt_txn_mm || !mgmt_txn_mm->txn_hash)
+ return;
+
+ hash_clean(mgmt_txn_mm->txn_hash, mgmt_txn_hash_free);
+ hash_free(mgmt_txn_mm->txn_hash);
+ mgmt_txn_mm->txn_hash = NULL;
+}
+
+static inline struct mgmt_txn_ctx *mgmt_txn_id2ctx(uint64_t txn_id)
+{
+ struct mgmt_txn_ctx key = { 0 };
+ struct mgmt_txn_ctx *txn;
+
+ if (!mgmt_txn_mm || !mgmt_txn_mm->txn_hash)
+ return NULL;
+
+ key.txn_id = txn_id;
+ txn = hash_lookup(mgmt_txn_mm->txn_hash, &key);
+
+ return txn;
+}
+
+static void mgmt_txn_lock(struct mgmt_txn_ctx *txn, const char *file, int line)
+{
+ txn->refcount++;
+ MGMTD_TXN_DBG("%s:%d --> Lock %s txn-id: %" PRIu64 " refcnt: %d", file,
+ line, mgmt_txn_type2str(txn->type), txn->txn_id,
+ txn->refcount);
+}
+
+static void mgmt_txn_unlock(struct mgmt_txn_ctx **txn, const char *file,
+ int line)
+{
+ assert(*txn && (*txn)->refcount);
+
+ (*txn)->refcount--;
+ MGMTD_TXN_DBG("%s:%d --> Unlock %s txn-id: %" PRIu64 " refcnt: %d",
+ file, line, mgmt_txn_type2str((*txn)->type),
+ (*txn)->txn_id, (*txn)->refcount);
+ if (!(*txn)->refcount) {
+ if ((*txn)->type == MGMTD_TXN_TYPE_CONFIG)
+ if (mgmt_txn_mm->cfg_txn == *txn)
+ mgmt_txn_mm->cfg_txn = NULL;
+ EVENT_OFF((*txn)->proc_get_cfg);
+ EVENT_OFF((*txn)->proc_get_data);
+ EVENT_OFF((*txn)->proc_comm_cfg);
+ EVENT_OFF((*txn)->comm_cfg_timeout);
+ hash_release(mgmt_txn_mm->txn_hash, *txn);
+ mgmt_txns_del(&mgmt_txn_mm->txn_list, *txn);
+
+ MGMTD_TXN_DBG("Deleted %s txn-id: %" PRIu64
+ " session-id: %" PRIu64,
+ mgmt_txn_type2str((*txn)->type), (*txn)->txn_id,
+ (*txn)->session_id);
+
+ XFREE(MTYPE_MGMTD_TXN, *txn);
+ }
+
+ *txn = NULL;
+}
+
+static void mgmt_txn_cleanup_txn(struct mgmt_txn_ctx **txn)
+{
+ /* TODO: Any other cleanup applicable */
+
+ mgmt_txn_delete(txn);
+}
+
+static void mgmt_txn_cleanup_all_txns(void)
+{
+ struct mgmt_txn_ctx *txn;
+
+ if (!mgmt_txn_mm || !mgmt_txn_mm->txn_hash)
+ return;
+
+ FOREACH_TXN_IN_LIST (mgmt_txn_mm, txn)
+ mgmt_txn_cleanup_txn(&txn);
+}
+
+static void mgmt_txn_cleanup(struct event *thread)
+{
+ struct mgmt_txn_ctx *txn;
+
+ txn = (struct mgmt_txn_ctx *)EVENT_ARG(thread);
+ assert(txn);
+
+ mgmt_txn_cleanup_txn(&txn);
+}
+
+static void mgmt_txn_register_event(struct mgmt_txn_ctx *txn,
+ enum mgmt_txn_event event)
+{
+ struct timeval tv = { .tv_sec = 0,
+ .tv_usec = MGMTD_TXN_PROC_DELAY_USEC };
+
+ assert(mgmt_txn_mm && mgmt_txn_tm);
+
+ switch (event) {
+ case MGMTD_TXN_PROC_SETCFG:
+ event_add_timer_tv(mgmt_txn_tm, mgmt_txn_process_set_cfg, txn,
+ &tv, &txn->proc_set_cfg);
+ break;
+ case MGMTD_TXN_PROC_COMMITCFG:
+ event_add_timer_tv(mgmt_txn_tm, mgmt_txn_process_commit_cfg,
+ txn, &tv, &txn->proc_comm_cfg);
+ break;
+ case MGMTD_TXN_PROC_GETCFG:
+ event_add_timer_tv(mgmt_txn_tm, mgmt_txn_process_get_cfg, txn,
+ &tv, &txn->proc_get_cfg);
+ break;
+ case MGMTD_TXN_PROC_GETDATA:
+ event_add_timer_tv(mgmt_txn_tm, mgmt_txn_process_get_data, txn,
+ &tv, &txn->proc_get_data);
+ break;
+ case MGMTD_TXN_COMMITCFG_TIMEOUT:
+ event_add_timer_msec(mgmt_txn_tm, mgmt_txn_cfg_commit_timedout,
+ txn, MGMTD_TXN_CFG_COMMIT_MAX_DELAY_MSEC,
+ &txn->comm_cfg_timeout);
+ break;
+ case MGMTD_TXN_CLEANUP:
+ tv.tv_usec = MGMTD_TXN_CLEANUP_DELAY_USEC;
+ event_add_timer_tv(mgmt_txn_tm, mgmt_txn_cleanup, txn, &tv,
+ &txn->clnup);
+ }
+}
+
+int mgmt_txn_init(struct mgmt_master *mm, struct event_loop *tm)
+{
+ if (mgmt_txn_mm || mgmt_txn_tm)
+ assert(!"MGMTD TXN: Call txn_init() only once");
+
+ mgmt_txn_mm = mm;
+ mgmt_txn_tm = tm;
+ mgmt_txns_init(&mm->txn_list);
+ mgmt_txn_hash_init();
+ assert(!mm->cfg_txn);
+ mm->cfg_txn = NULL;
+
+ return 0;
+}
+
+void mgmt_txn_destroy(void)
+{
+ mgmt_txn_cleanup_all_txns();
+ mgmt_txn_hash_destroy();
+}
+
+uint64_t mgmt_config_txn_in_progress(void)
+{
+ if (mgmt_txn_mm && mgmt_txn_mm->cfg_txn)
+ return mgmt_txn_mm->cfg_txn->session_id;
+
+ return MGMTD_SESSION_ID_NONE;
+}
+
+uint64_t mgmt_create_txn(uint64_t session_id, enum mgmt_txn_type type)
+{
+ struct mgmt_txn_ctx *txn;
+
+ txn = mgmt_txn_create_new(session_id, type);
+ return txn ? txn->txn_id : MGMTD_TXN_ID_NONE;
+}
+
+void mgmt_destroy_txn(uint64_t *txn_id)
+{
+ struct mgmt_txn_ctx *txn;
+
+ txn = mgmt_txn_id2ctx(*txn_id);
+ if (!txn)
+ return;
+
+ mgmt_txn_delete(&txn);
+ *txn_id = MGMTD_TXN_ID_NONE;
+}
+
+int mgmt_txn_send_set_config_req(uint64_t txn_id, uint64_t req_id,
+ Mgmtd__DatastoreId ds_id,
+ struct mgmt_ds_ctx *ds_ctx,
+ Mgmtd__YangCfgDataReq **cfg_req,
+ size_t num_req, bool implicit_commit,
+ Mgmtd__DatastoreId dst_ds_id,
+ struct mgmt_ds_ctx *dst_ds_ctx)
+{
+ struct mgmt_txn_ctx *txn;
+ struct mgmt_txn_req *txn_req;
+ size_t indx;
+ uint16_t *num_chgs;
+ struct nb_cfg_change *cfg_chg;
+
+ txn = mgmt_txn_id2ctx(txn_id);
+ if (!txn)
+ return -1;
+
+ if (implicit_commit && mgmt_txn_reqs_count(&txn->set_cfg_reqs)) {
+ MGMTD_TXN_ERR(
+ "For implicit commit config only one SETCFG-REQ can be allowed!");
+ return -1;
+ }
+
+ txn_req = mgmt_txn_req_alloc(txn, req_id, MGMTD_TXN_PROC_SETCFG);
+ txn_req->req.set_cfg->ds_id = ds_id;
+ txn_req->req.set_cfg->ds_ctx = ds_ctx;
+ num_chgs = &txn_req->req.set_cfg->num_cfg_changes;
+ for (indx = 0; indx < num_req; indx++) {
+ cfg_chg = &txn_req->req.set_cfg->cfg_changes[*num_chgs];
+
+ if (cfg_req[indx]->req_type ==
+ MGMTD__CFG_DATA_REQ_TYPE__DELETE_DATA)
+ cfg_chg->operation = NB_OP_DESTROY;
+ else if (cfg_req[indx]->req_type ==
+ MGMTD__CFG_DATA_REQ_TYPE__SET_DATA)
+ cfg_chg->operation =
+ mgmt_ds_find_data_node_by_xpath(ds_ctx,
+ cfg_req[indx]
+ ->data
+ ->xpath)
+ ? NB_OP_MODIFY
+ : NB_OP_CREATE;
+ else
+ continue;
+
+ MGMTD_TXN_DBG("XPath: '%s', Value: '%s'",
+ cfg_req[indx]->data->xpath,
+ (cfg_req[indx]->data->value &&
+ cfg_req[indx]->data->value->encoded_str_val
+ ? cfg_req[indx]->data->value->encoded_str_val
+ : "NULL"));
+ strlcpy(cfg_chg->xpath, cfg_req[indx]->data->xpath,
+ sizeof(cfg_chg->xpath));
+ cfg_chg->value =
+ (cfg_req[indx]->data->value &&
+ cfg_req[indx]->data->value->encoded_str_val
+ ? strdup(cfg_req[indx]
+ ->data->value->encoded_str_val)
+ : NULL);
+ if (cfg_chg->value)
+ MGMTD_TXN_DBG("Allocated value at %p ==> '%s'",
+ cfg_chg->value, cfg_chg->value);
+
+ (*num_chgs)++;
+ }
+ txn_req->req.set_cfg->implicit_commit = implicit_commit;
+ txn_req->req.set_cfg->dst_ds_id = dst_ds_id;
+ txn_req->req.set_cfg->dst_ds_ctx = dst_ds_ctx;
+ txn_req->req.set_cfg->setcfg_stats =
+ mgmt_fe_get_session_setcfg_stats(txn->session_id);
+ mgmt_txn_register_event(txn, MGMTD_TXN_PROC_SETCFG);
+
+ return 0;
+}
+
+int mgmt_txn_send_commit_config_req(uint64_t txn_id, uint64_t req_id,
+ Mgmtd__DatastoreId src_ds_id,
+ struct mgmt_ds_ctx *src_ds_ctx,
+ Mgmtd__DatastoreId dst_ds_id,
+ struct mgmt_ds_ctx *dst_ds_ctx,
+ bool validate_only, bool abort,
+ bool implicit)
+{
+ struct mgmt_txn_ctx *txn;
+ struct mgmt_txn_req *txn_req;
+
+ txn = mgmt_txn_id2ctx(txn_id);
+ if (!txn)
+ return -1;
+
+ if (txn->commit_cfg_req) {
+ MGMTD_TXN_ERR("Commit already in-progress txn-id: %" PRIu64
+ " session-id: %" PRIu64 ". Cannot start another",
+ txn->txn_id, txn->session_id);
+ return -1;
+ }
+
+ txn_req = mgmt_txn_req_alloc(txn, req_id, MGMTD_TXN_PROC_COMMITCFG);
+ txn_req->req.commit_cfg.src_ds_id = src_ds_id;
+ txn_req->req.commit_cfg.src_ds_ctx = src_ds_ctx;
+ txn_req->req.commit_cfg.dst_ds_id = dst_ds_id;
+ txn_req->req.commit_cfg.dst_ds_ctx = dst_ds_ctx;
+ txn_req->req.commit_cfg.validate_only = validate_only;
+ txn_req->req.commit_cfg.abort = abort;
+ txn_req->req.commit_cfg.implicit = implicit;
+ txn_req->req.commit_cfg.cmt_stats =
+ mgmt_fe_get_session_commit_stats(txn->session_id);
+
+ /*
+ * Trigger a COMMIT-CONFIG process.
+ */
+ mgmt_txn_register_event(txn, MGMTD_TXN_PROC_COMMITCFG);
+ return 0;
+}
+
+int mgmt_txn_notify_be_adapter_conn(struct mgmt_be_client_adapter *adapter,
+ bool connect)
+{
+ struct mgmt_txn_ctx *txn;
+ struct mgmt_txn_req *txn_req;
+ struct mgmt_commit_cfg_req *cmtcfg_req;
+ static struct mgmt_commit_stats dummy_stats;
+ struct nb_config_cbs *adapter_cfgs = NULL;
+
+ memset(&dummy_stats, 0, sizeof(dummy_stats));
+ if (connect) {
+ /* Get config for this single backend client */
+
+ mgmt_be_get_adapter_config(adapter, &adapter_cfgs);
+ if (!adapter_cfgs || RB_EMPTY(nb_config_cbs, adapter_cfgs)) {
+ SET_FLAG(adapter->flags,
+ MGMTD_BE_ADAPTER_FLAGS_CFG_SYNCED);
+ return 0;
+ }
+
+ /*
+ * Create a CONFIG transaction to push the config changes
+ * provided to the backend client.
+ */
+ txn = mgmt_txn_create_new(0, MGMTD_TXN_TYPE_CONFIG);
+ if (!txn) {
+ MGMTD_TXN_ERR("Failed to create CONFIG Transaction for downloading CONFIGs for client '%s'",
+ adapter->name);
+ return -1;
+ }
+
+ MGMTD_TXN_DBG("Created initial txn-id: %" PRIu64
+ " for BE client '%s'",
+ txn->txn_id, adapter->name);
+ /*
+ * Set the changeset for transaction to commit and trigger the
+ * commit request.
+ */
+ txn_req = mgmt_txn_req_alloc(txn, 0, MGMTD_TXN_PROC_COMMITCFG);
+ txn_req->req.commit_cfg.src_ds_id = MGMTD_DS_NONE;
+ txn_req->req.commit_cfg.src_ds_ctx = 0;
+ txn_req->req.commit_cfg.dst_ds_id = MGMTD_DS_NONE;
+ txn_req->req.commit_cfg.dst_ds_ctx = 0;
+ txn_req->req.commit_cfg.validate_only = false;
+ txn_req->req.commit_cfg.abort = false;
+ txn_req->req.commit_cfg.cmt_stats = &dummy_stats;
+ txn_req->req.commit_cfg.cfg_chgs = adapter_cfgs;
+
+ /*
+ * Trigger a COMMIT-CONFIG process.
+ */
+ mgmt_txn_register_event(txn, MGMTD_TXN_PROC_COMMITCFG);
+
+ } else {
+ /*
+ * Check if any transaction is currently on-going that
+ * involves this backend client. If so, report the transaction
+ * has failed.
+ */
+ FOREACH_TXN_IN_LIST (mgmt_txn_mm, txn) {
+ /* TODO: update with operational state when that is
+ * completed */
+ if (txn->type == MGMTD_TXN_TYPE_CONFIG) {
+ cmtcfg_req = txn->commit_cfg_req
+ ? &txn->commit_cfg_req->req
+ .commit_cfg
+ : NULL;
+ if (cmtcfg_req &&
+ cmtcfg_req->subscr_info
+ .xpath_subscr[adapter->id]) {
+ mgmt_txn_send_commit_cfg_reply(
+ txn, MGMTD_INTERNAL_ERROR,
+ "Backend daemon disconnected while processing commit!");
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
+int mgmt_txn_notify_be_txn_reply(uint64_t txn_id, bool create, bool success,
+ struct mgmt_be_client_adapter *adapter)
+{
+ struct mgmt_txn_ctx *txn;
+ struct mgmt_commit_cfg_req *cmtcfg_req = NULL;
+
+ txn = mgmt_txn_id2ctx(txn_id);
+ if (!txn || txn->type != MGMTD_TXN_TYPE_CONFIG)
+ return -1;
+
+ if (!create && !txn->commit_cfg_req)
+ return 0;
+
+ assert(txn->commit_cfg_req);
+ cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
+ if (create) {
+ if (success) {
+ /*
+ * Done with TXN_CREATE. Move the backend client to
+ * next phase.
+ */
+ assert(cmtcfg_req->curr_phase ==
+ MGMTD_COMMIT_PHASE_TXN_CREATE);
+
+ /*
+ * Send CFGDATA_CREATE-REQs to the backend immediately.
+ */
+ mgmt_txn_send_be_cfg_data(txn, adapter);
+ } else {
+ mgmt_txn_send_commit_cfg_reply(
+ txn, MGMTD_INTERNAL_ERROR,
+ "Internal error! Failed to initiate transaction at backend!");
+ }
+ } else {
+ /*
+ * Done with TXN_DELETE. Move the backend client to next phase.
+ */
+ if (false)
+ mgmt_move_be_commit_to_next_phase(txn, adapter);
+ }
+
+ return 0;
+}
+
+int mgmt_txn_notify_be_cfgdata_reply(uint64_t txn_id, uint64_t batch_id,
+ bool success, char *error_if_any,
+ struct mgmt_be_client_adapter *adapter)
+{
+ struct mgmt_txn_ctx *txn;
+ struct mgmt_txn_be_cfg_batch *batch;
+ struct mgmt_commit_cfg_req *cmtcfg_req;
+
+ txn = mgmt_txn_id2ctx(txn_id);
+ if (!txn || txn->type != MGMTD_TXN_TYPE_CONFIG)
+ return -1;
+
+ if (!txn->commit_cfg_req)
+ return -1;
+ cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
+
+ batch = mgmt_txn_cfgbatch_id2ctx(txn, batch_id);
+ if (!batch || batch->txn != txn)
+ return -1;
+
+ if (!success) {
+ MGMTD_TXN_ERR("CFGDATA_CREATE_REQ sent to '%s' failed txn-id: %" PRIu64
+ " batch-id %" PRIu64 " err: %s",
+ adapter->name, txn->txn_id, batch->batch_id,
+ error_if_any ? error_if_any : "None");
+ mgmt_txn_send_commit_cfg_reply(
+ txn, MGMTD_INTERNAL_ERROR,
+ error_if_any
+ ? error_if_any
+ : "Internal error! Failed to download config data to backend!");
+ return 0;
+ }
+
+ MGMTD_TXN_DBG("CFGDATA_CREATE_REQ sent to '%s' was successful txn-id: %" PRIu64
+ " batch-id %" PRIu64 " err: %s",
+ adapter->name, txn->txn_id, batch->batch_id,
+ error_if_any ? error_if_any : "None");
+ mgmt_move_txn_cfg_batch_to_next(cmtcfg_req, batch,
+ &cmtcfg_req->curr_batches[adapter->id],
+ &cmtcfg_req->next_batches[adapter->id],
+ true, MGMTD_COMMIT_PHASE_APPLY_CFG);
+
+ mgmt_try_move_commit_to_next_phase(txn, cmtcfg_req);
+
+ return 0;
+}
+
+int mgmt_txn_notify_be_cfg_apply_reply(uint64_t txn_id, bool success,
+ uint64_t batch_ids[],
+ size_t num_batch_ids, char *error_if_any,
+ struct mgmt_be_client_adapter *adapter)
+{
+ struct mgmt_txn_ctx *txn;
+ struct mgmt_txn_be_cfg_batch *batch;
+ struct mgmt_commit_cfg_req *cmtcfg_req = NULL;
+ size_t indx;
+
+ txn = mgmt_txn_id2ctx(txn_id);
+ if (!txn || txn->type != MGMTD_TXN_TYPE_CONFIG || !txn->commit_cfg_req)
+ return -1;
+
+ cmtcfg_req = &txn->commit_cfg_req->req.commit_cfg;
+
+ if (!success) {
+ MGMTD_TXN_ERR("CFGDATA_APPLY_REQ sent to '%s' failed txn-id: %" PRIu64
+ " batch ids %" PRIu64 " - %" PRIu64 " err: %s",
+ adapter->name, txn->txn_id, batch_ids[0],
+ batch_ids[num_batch_ids - 1],
+ error_if_any ? error_if_any : "None");
+ mgmt_txn_send_commit_cfg_reply(
+ txn, MGMTD_INTERNAL_ERROR,
+ error_if_any
+ ? error_if_any
+ : "Internal error! Failed to apply config data on backend!");
+ return 0;
+ }
+
+ for (indx = 0; indx < num_batch_ids; indx++) {
+ batch = mgmt_txn_cfgbatch_id2ctx(txn, batch_ids[indx]);
+ if (batch->txn != txn)
+ return -1;
+ mgmt_move_txn_cfg_batch_to_next(
+ cmtcfg_req, batch,
+ &cmtcfg_req->curr_batches[adapter->id],
+ &cmtcfg_req->next_batches[adapter->id], true,
+ MGMTD_COMMIT_PHASE_TXN_DELETE);
+ }
+
+ if (!mgmt_txn_batches_count(&cmtcfg_req->curr_batches[adapter->id])) {
+ /*
+ * All configuration for the specific backend has been applied.
+ * Send TXN-DELETE to wrap up the transaction for this backend.
+ */
+ SET_FLAG(adapter->flags, MGMTD_BE_ADAPTER_FLAGS_CFG_SYNCED);
+ mgmt_txn_send_be_txn_delete(txn, adapter);
+ }
+
+ mgmt_try_move_commit_to_next_phase(txn, cmtcfg_req);
+ if (mm->perf_stats_en)
+ gettimeofday(&cmtcfg_req->cmt_stats->apply_cfg_end, NULL);
+
+ return 0;
+}
+
+int mgmt_txn_send_get_req(uint64_t txn_id, uint64_t req_id,
+ Mgmtd__DatastoreId ds_id, struct nb_config *cfg_root,
+ Mgmtd__YangGetDataReq **data_req, size_t num_reqs)
+{
+ struct mgmt_txn_ctx *txn;
+ struct mgmt_txn_req *txn_req;
+ enum mgmt_txn_event req_event;
+ size_t indx;
+
+ txn = mgmt_txn_id2ctx(txn_id);
+ if (!txn)
+ return -1;
+
+ req_event = cfg_root ? MGMTD_TXN_PROC_GETCFG : MGMTD_TXN_PROC_GETDATA;
+
+ txn_req = mgmt_txn_req_alloc(txn, req_id, req_event);
+ txn_req->req.get_data->ds_id = ds_id;
+ txn_req->req.get_data->cfg_root = cfg_root;
+ for (indx = 0;
+ indx < num_reqs && indx < MGMTD_MAX_NUM_DATA_REPLY_IN_BATCH;
+ indx++) {
+ MGMTD_TXN_DBG("XPath: '%s'", data_req[indx]->data->xpath);
+ txn_req->req.get_data->xpaths[indx] =
+ strdup(data_req[indx]->data->xpath);
+ txn_req->req.get_data->num_xpaths++;
+ }
+
+ mgmt_txn_register_event(txn, req_event);
+
+ return 0;
+}
+
+void mgmt_txn_status_write(struct vty *vty)
+{
+ struct mgmt_txn_ctx *txn;
+
+ vty_out(vty, "MGMTD Transactions\n");
+
+ FOREACH_TXN_IN_LIST (mgmt_txn_mm, txn) {
+ vty_out(vty, " Txn: \t\t\t0x%p\n", txn);
+ vty_out(vty, " Txn-Id: \t\t\t%" PRIu64 "\n", txn->txn_id);
+ vty_out(vty, " Session-Id: \t\t%" PRIu64 "\n",
+ txn->session_id);
+ vty_out(vty, " Type: \t\t\t%s\n",
+ mgmt_txn_type2str(txn->type));
+ vty_out(vty, " Ref-Count: \t\t\t%d\n", txn->refcount);
+ }
+ vty_out(vty, " Total: %d\n",
+ (int)mgmt_txns_count(&mgmt_txn_mm->txn_list));
+}
+
+int mgmt_txn_rollback_trigger_cfg_apply(struct mgmt_ds_ctx *src_ds_ctx,
+ struct mgmt_ds_ctx *dst_ds_ctx)
+{
+ static struct nb_config_cbs changes;
+ static struct mgmt_commit_stats dummy_stats;
+
+ struct nb_config_cbs *cfg_chgs = NULL;
+ struct mgmt_txn_ctx *txn;
+ struct mgmt_txn_req *txn_req;
+
+ memset(&changes, 0, sizeof(changes));
+ memset(&dummy_stats, 0, sizeof(dummy_stats));
+ /*
+ * This could be the case when the config is directly
+ * loaded onto the candidate DS from a file. Get the
+ * diff from a full comparison of the candidate and
+ * running DSs.
+ */
+ nb_config_diff(mgmt_ds_get_nb_config(dst_ds_ctx),
+ mgmt_ds_get_nb_config(src_ds_ctx), &changes);
+ cfg_chgs = &changes;
+
+ if (RB_EMPTY(nb_config_cbs, cfg_chgs)) {
+ /*
+ * This means there's no changes to commit whatsoever
+ * is the source of the changes in config.
+ */
+ return -1;
+ }
+
+ /*
+ * Create a CONFIG transaction to push the config changes
+ * provided to the backend client.
+ */
+ txn = mgmt_txn_create_new(0, MGMTD_TXN_TYPE_CONFIG);
+ if (!txn) {
+ MGMTD_TXN_ERR(
+ "Failed to create CONFIG Transaction for downloading CONFIGs");
+ return -1;
+ }
+
+ MGMTD_TXN_DBG("Created rollback txn-id: %" PRIu64, txn->txn_id);
+
+ /*
+ * Set the changeset for transaction to commit and trigger the commit
+ * request.
+ */
+ txn_req = mgmt_txn_req_alloc(txn, 0, MGMTD_TXN_PROC_COMMITCFG);
+ txn_req->req.commit_cfg.src_ds_id = MGMTD_DS_CANDIDATE;
+ txn_req->req.commit_cfg.src_ds_ctx = src_ds_ctx;
+ txn_req->req.commit_cfg.dst_ds_id = MGMTD_DS_RUNNING;
+ txn_req->req.commit_cfg.dst_ds_ctx = dst_ds_ctx;
+ txn_req->req.commit_cfg.validate_only = false;
+ txn_req->req.commit_cfg.abort = false;
+ txn_req->req.commit_cfg.rollback = true;
+ txn_req->req.commit_cfg.cmt_stats = &dummy_stats;
+ txn_req->req.commit_cfg.cfg_chgs = cfg_chgs;
+
+ /*
+ * Trigger a COMMIT-CONFIG process.
+ */
+ mgmt_txn_register_event(txn, MGMTD_TXN_PROC_COMMITCFG);
+ return 0;
+}
diff --git a/mgmtd/mgmt_txn.h b/mgmtd/mgmt_txn.h
new file mode 100644
index 0000000..068f07a
--- /dev/null
+++ b/mgmtd/mgmt_txn.h
@@ -0,0 +1,243 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MGMTD Transactions
+ *
+ * Copyright (C) 2021 Vmware, Inc.
+ * Pushpasis Sarkar <spushpasis@vmware.com>
+ */
+
+#ifndef _FRR_MGMTD_TXN_H_
+#define _FRR_MGMTD_TXN_H_
+
+#include "mgmtd/mgmt_be_adapter.h"
+#include "mgmtd/mgmt.h"
+#include "mgmtd/mgmt_ds.h"
+
+#define MGMTD_TXN_PROC_DELAY_MSEC 5
+#define MGMTD_TXN_PROC_DELAY_USEC 10
+#define MGMTD_TXN_MAX_NUM_SETCFG_PROC 128
+#define MGMTD_TXN_MAX_NUM_GETCFG_PROC 128
+#define MGMTD_TXN_MAX_NUM_GETDATA_PROC 128
+
+#define MGMTD_TXN_SEND_CFGVALIDATE_DELAY_MSEC 100
+#define MGMTD_TXN_SEND_CFGAPPLY_DELAY_MSEC 100
+#define MGMTD_TXN_CFG_COMMIT_MAX_DELAY_MSEC 30000 /* 30 seconds */
+
+#define MGMTD_TXN_CLEANUP_DELAY_MSEC 100
+#define MGMTD_TXN_CLEANUP_DELAY_USEC 10
+
+/*
+ * The following definition enables local validation of config
+ * on the MGMTD process by loading client-defined NB callbacks
+ * and calling them locally before sening CNFG_APPLY_REQ to
+ * backend for actual apply of configuration on internal state
+ * of the backend application.
+ *
+ * #define MGMTD_LOCAL_VALIDATIONS_ENABLED
+ *
+ * Note: Enabled by default in configure.ac, if this needs to be
+ * disabled then pass --enable-mgmtd-local-validations=no to
+ * the list of arguments passed to ./configure
+ */
+
+PREDECL_LIST(mgmt_txns);
+
+struct mgmt_master;
+
+enum mgmt_txn_type {
+ MGMTD_TXN_TYPE_NONE = 0,
+ MGMTD_TXN_TYPE_CONFIG,
+ MGMTD_TXN_TYPE_SHOW
+};
+
+static inline const char *mgmt_txn_type2str(enum mgmt_txn_type type)
+{
+ switch (type) {
+ case MGMTD_TXN_TYPE_NONE:
+ return "None";
+ case MGMTD_TXN_TYPE_CONFIG:
+ return "CONFIG";
+ case MGMTD_TXN_TYPE_SHOW:
+ return "SHOW";
+ }
+
+ return "Unknown";
+}
+
+/* Initialise transaction module. */
+extern int mgmt_txn_init(struct mgmt_master *cm, struct event_loop *tm);
+
+/* Destroy the transaction module. */
+extern void mgmt_txn_destroy(void);
+
+/*
+ * Check if transaction is in progress.
+ *
+ * Returns:
+ * session ID if in-progress, MGMTD_SESSION_ID_NONE otherwise.
+ */
+extern uint64_t mgmt_config_txn_in_progress(void);
+
+/*
+ * Create transaction.
+ *
+ * session_id
+ * Session ID.
+ *
+ * type
+ * Transaction type (CONFIG/SHOW/NONE)
+ *
+ * Returns:
+ * transaction ID.
+ */
+extern uint64_t mgmt_create_txn(uint64_t session_id, enum mgmt_txn_type type);
+
+/*
+ * Destroy transaction.
+ *
+ * txn_id
+ * Unique transaction identifier.
+ */
+extern void mgmt_destroy_txn(uint64_t *txn_id);
+
+/*
+ * Send set-config request to be processed later in transaction.
+ *
+ * txn_id
+ * Unique transaction identifier.
+ *
+ * req_id
+ * Unique transaction request identifier.
+ *
+ * ds_id
+ * Datastore ID.
+ *
+ * ds_hndl
+ * Datastore handle.
+ *
+ * cfg_req
+ * Config requests.
+ *
+ * num_req
+ * Number of config requests.
+ *
+ * implicit_commit
+ * TRUE if the commit is implicit, FALSE otherwise.
+ *
+ * dst_ds_id
+ * Destination datastore ID.
+ *
+ * dst_ds_handle
+ * Destination datastore handle.
+ *
+ * Returns:
+ * 0 on success, -1 on failures.
+ */
+extern int mgmt_txn_send_set_config_req(uint64_t txn_id, uint64_t req_id,
+ Mgmtd__DatastoreId ds_id,
+ struct mgmt_ds_ctx *ds_ctx,
+ Mgmtd__YangCfgDataReq **cfg_req,
+ size_t num_req, bool implicit_commit,
+ Mgmtd__DatastoreId dst_ds_id,
+ struct mgmt_ds_ctx *dst_ds_ctx);
+
+/*
+ * Send commit-config request to be processed later in transaction.
+ *
+ * txn_id
+ * Unique transaction identifier.
+ *
+ * req_id
+ * Unique transaction request identifier.
+ *
+ * src_ds_id
+ * Source datastore ID.
+ *
+ * src_ds_hndl
+ * Source Datastore handle.
+ *
+ * validate_only
+ * TRUE if commit request needs to be validated only, FALSE otherwise.
+ *
+ * abort
+ * TRUE if need to restore Src DS back to Dest DS, FALSE otherwise.
+ *
+ * implicit
+ * TRUE if the commit is implicit, FALSE otherwise.
+ *
+ * Returns:
+ * 0 on success, -1 on failures.
+ */
+extern int mgmt_txn_send_commit_config_req(uint64_t txn_id, uint64_t req_id,
+ Mgmtd__DatastoreId src_ds_id,
+ struct mgmt_ds_ctx *dst_ds_ctx,
+ Mgmtd__DatastoreId dst_ds_id,
+ struct mgmt_ds_ctx *src_ds_ctx,
+ bool validate_only, bool abort,
+ bool implicit);
+
+/*
+ * Send get-{cfg,data} request to be processed later in transaction.
+ *
+ * Is get-config if cfg_root is provided and the config is gathered locally,
+ * otherwise it's get-data and data is fetched from backedn clients.
+ */
+extern int mgmt_txn_send_get_req(uint64_t txn_id, uint64_t req_id,
+ Mgmtd__DatastoreId ds_id,
+ struct nb_config *cfg_root,
+ Mgmtd__YangGetDataReq **data_req,
+ size_t num_reqs);
+
+/*
+ * Notifiy backend adapter on connection.
+ */
+extern int
+mgmt_txn_notify_be_adapter_conn(struct mgmt_be_client_adapter *adapter,
+ bool connect);
+
+/*
+ * Reply to backend adapter about transaction create/delete.
+ */
+extern int
+mgmt_txn_notify_be_txn_reply(uint64_t txn_id, bool create, bool success,
+ struct mgmt_be_client_adapter *adapter);
+
+/*
+ * Reply to backend adapater with config data create request.
+ */
+extern int
+mgmt_txn_notify_be_cfgdata_reply(uint64_t txn_id, uint64_t batch_id,
+ bool success, char *error_if_any,
+ struct mgmt_be_client_adapter *adapter);
+
+/*
+ * Reply to backend adapater with config data validate request.
+ */
+extern int mgmt_txn_notify_be_cfg_validate_reply(
+ uint64_t txn_id, bool success, uint64_t batch_ids[],
+ size_t num_batch_ids, char *error_if_any,
+ struct mgmt_be_client_adapter *adapter);
+
+/*
+ * Reply to backend adapater with config data apply request.
+ */
+extern int
+mgmt_txn_notify_be_cfg_apply_reply(uint64_t txn_id, bool success,
+ uint64_t batch_ids[],
+ size_t num_batch_ids, char *error_if_any,
+ struct mgmt_be_client_adapter *adapter);
+
+/*
+ * Dump transaction status to vty.
+ */
+extern void mgmt_txn_status_write(struct vty *vty);
+
+/*
+ * Trigger rollback config apply.
+ *
+ * Creates a new transaction and commit request for rollback.
+ */
+extern int
+mgmt_txn_rollback_trigger_cfg_apply(struct mgmt_ds_ctx *src_ds_ctx,
+ struct mgmt_ds_ctx *dst_ds_ctx);
+#endif /* _FRR_MGMTD_TXN_H_ */
diff --git a/mgmtd/mgmt_vty.c b/mgmtd/mgmt_vty.c
new file mode 100644
index 0000000..b49bf80
--- /dev/null
+++ b/mgmtd/mgmt_vty.c
@@ -0,0 +1,507 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MGMTD VTY Interface
+ *
+ * Copyright (C) 2021 Vmware, Inc.
+ * Pushpasis Sarkar <spushpasis@vmware.com>
+ */
+
+#include <zebra.h>
+
+#include "command.h"
+#include "json.h"
+#include "network.h"
+#include "northbound_cli.h"
+
+#include "mgmtd/mgmt.h"
+#include "mgmtd/mgmt_be_adapter.h"
+#include "mgmtd/mgmt_fe_adapter.h"
+#include "mgmtd/mgmt_ds.h"
+#include "mgmtd/mgmt_history.h"
+
+#include "mgmtd/mgmt_vty_clippy.c"
+
+extern struct frr_daemon_info *mgmt_daemon_info;
+
+DEFPY(show_mgmt_be_adapter,
+ show_mgmt_be_adapter_cmd,
+ "show mgmt backend-adapter all",
+ SHOW_STR
+ MGMTD_STR
+ MGMTD_BE_ADAPTER_STR
+ "Display all Backend Adapters\n")
+{
+ mgmt_be_adapter_status_write(vty);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY(show_mgmt_be_xpath_reg,
+ show_mgmt_be_xpath_reg_cmd,
+ "show mgmt backend-yang-xpath-registry",
+ SHOW_STR
+ MGMTD_STR
+ "Backend Adapter YANG Xpath Registry\n")
+{
+ mgmt_be_xpath_register_write(vty);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY(show_mgmt_fe_adapter, show_mgmt_fe_adapter_cmd,
+ "show mgmt frontend-adapter all [detail$detail]",
+ SHOW_STR
+ MGMTD_STR
+ MGMTD_FE_ADAPTER_STR
+ "Display all Frontend Adapters\n"
+ "Display more details\n")
+{
+ mgmt_fe_adapter_status_write(vty, !!detail);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY_HIDDEN(mgmt_performance_measurement,
+ mgmt_performance_measurement_cmd,
+ "[no] mgmt performance-measurement",
+ NO_STR
+ MGMTD_STR
+ "Enable performance measurement\n")
+{
+ if (no)
+ mgmt_fe_adapter_perf_measurement(vty, false);
+ else
+ mgmt_fe_adapter_perf_measurement(vty, true);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY(mgmt_reset_performance_stats,
+ mgmt_reset_performance_stats_cmd,
+ "mgmt reset-statistics",
+ MGMTD_STR
+ "Reset the Performance measurement statistics\n")
+{
+ mgmt_fe_adapter_reset_perf_stats(vty);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY(show_mgmt_txn,
+ show_mgmt_txn_cmd,
+ "show mgmt transaction all",
+ SHOW_STR
+ MGMTD_STR
+ MGMTD_TXN_STR
+ "Display all Transactions\n")
+{
+ mgmt_txn_status_write(vty);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY(show_mgmt_ds,
+ show_mgmt_ds_cmd,
+ "show mgmt datastore [all|candidate|operational|running]$dsname",
+ SHOW_STR
+ MGMTD_STR
+ MGMTD_DS_STR
+ "All datastores (default)\n"
+ "Candidate datastore\n"
+ "Operational datastore\n"
+ "Running datastore\n")
+{
+ struct mgmt_ds_ctx *ds_ctx;
+
+ if (!dsname || dsname[0] == 'a') {
+ mgmt_ds_status_write(vty);
+ return CMD_SUCCESS;
+ }
+ ds_ctx = mgmt_ds_get_ctx_by_id(mm, mgmt_ds_name2id(dsname));
+ if (!ds_ctx) {
+ vty_out(vty, "ERROR: Could not access %s datastore!\n", dsname);
+ return CMD_ERR_NO_MATCH;
+ }
+ mgmt_ds_status_write_one(vty, ds_ctx);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY(mgmt_commit,
+ mgmt_commit_cmd,
+ "mgmt commit <check|apply|abort>$type",
+ MGMTD_STR
+ "Commit action\n"
+ "Validate the set of config commands\n"
+ "Validate and apply the set of config commands\n"
+ "Abort and drop the set of config commands recently added\n")
+{
+ bool validate_only = type[0] == 'c';
+ bool abort = type[1] == 'b';
+
+ if (vty_mgmt_send_commit_config(vty, validate_only, abort) != 0)
+ return CMD_WARNING_CONFIG_FAILED;
+ return CMD_SUCCESS;
+}
+
+DEFPY(mgmt_set_config_data, mgmt_set_config_data_cmd,
+ "mgmt set-config WORD$path VALUE",
+ MGMTD_STR
+ "Set configuration data\n"
+ "XPath expression specifying the YANG data path\n"
+ "Value of the data to set\n")
+{
+ strlcpy(vty->cfg_changes[0].xpath, path,
+ sizeof(vty->cfg_changes[0].xpath));
+ vty->cfg_changes[0].value = value;
+ vty->cfg_changes[0].operation = NB_OP_CREATE;
+ vty->num_cfg_changes = 1;
+
+ vty_mgmt_send_config_data(vty, false);
+ return CMD_SUCCESS;
+}
+
+DEFPY(mgmt_delete_config_data, mgmt_delete_config_data_cmd,
+ "mgmt delete-config WORD$path",
+ MGMTD_STR
+ "Delete configuration data\n"
+ "XPath expression specifying the YANG data path\n")
+{
+
+ strlcpy(vty->cfg_changes[0].xpath, path,
+ sizeof(vty->cfg_changes[0].xpath));
+ vty->cfg_changes[0].value = NULL;
+ vty->cfg_changes[0].operation = NB_OP_DESTROY;
+ vty->num_cfg_changes = 1;
+
+ vty_mgmt_send_config_data(vty, false);
+ return CMD_SUCCESS;
+}
+
+DEFPY(show_mgmt_get_config, show_mgmt_get_config_cmd,
+ "show mgmt get-config [candidate|operational|running]$dsname WORD$path",
+ SHOW_STR MGMTD_STR
+ "Get configuration data from a specific configuration datastore\n"
+ "Candidate datastore (default)\n"
+ "Operational datastore\n"
+ "Running datastore\n"
+ "XPath expression specifying the YANG data path\n")
+{
+ const char *xpath_list[VTY_MAXCFGCHANGES] = {0};
+ Mgmtd__DatastoreId datastore = MGMTD_DS_CANDIDATE;
+
+ if (dsname)
+ datastore = mgmt_ds_name2id(dsname);
+
+ xpath_list[0] = path;
+ vty_mgmt_send_get_req(vty, true, datastore, xpath_list, 1);
+ return CMD_SUCCESS;
+}
+
+DEFPY(show_mgmt_get_data, show_mgmt_get_data_cmd,
+ "show mgmt get-data [candidate|operational|running]$dsname WORD$path",
+ SHOW_STR MGMTD_STR
+ "Get data from a specific datastore\n"
+ "Candidate datastore\n"
+ "Operational datastore (default)\n"
+ "Running datastore\n"
+ "XPath expression specifying the YANG data path\n")
+{
+ const char *xpath_list[VTY_MAXCFGCHANGES] = {0};
+ Mgmtd__DatastoreId datastore = MGMTD_DS_OPERATIONAL;
+
+ if (dsname)
+ datastore = mgmt_ds_name2id(dsname);
+
+ xpath_list[0] = path;
+ vty_mgmt_send_get_req(vty, false, datastore, xpath_list, 1);
+ return CMD_SUCCESS;
+}
+
+DEFPY(show_mgmt_dump_data,
+ show_mgmt_dump_data_cmd,
+ "show mgmt datastore-contents [candidate|operational|running]$dsname [xpath WORD$path] [file WORD$filepath] <json|xml>$fmt",
+ SHOW_STR
+ MGMTD_STR
+ "Get Datastore contents from a specific datastore\n"
+ "Candidate datastore (default)\n"
+ "Operational datastore\n"
+ "Running datastore\n"
+ "XPath expression specifying the YANG data path\n"
+ "XPath string\n"
+ "Dump the contents to a file\n"
+ "Full path of the file\n"
+ "json output\n"
+ "xml output\n")
+{
+ struct mgmt_ds_ctx *ds_ctx;
+ Mgmtd__DatastoreId datastore = MGMTD_DS_CANDIDATE;
+ LYD_FORMAT format = fmt[0] == 'j' ? LYD_JSON : LYD_XML;
+ FILE *f = NULL;
+
+ if (dsname)
+ datastore = mgmt_ds_name2id(dsname);
+
+ ds_ctx = mgmt_ds_get_ctx_by_id(mm, datastore);
+ if (!ds_ctx) {
+ vty_out(vty, "ERROR: Could not access datastore!\n");
+ return CMD_ERR_NO_MATCH;
+ }
+
+ if (filepath) {
+ f = fopen(filepath, "w");
+ if (!f) {
+ vty_out(vty,
+ "Could not open file pointed by filepath %s\n",
+ filepath);
+ return CMD_SUCCESS;
+ }
+ }
+
+ mgmt_ds_dump_tree(vty, ds_ctx, path, f, format);
+
+ if (f)
+ fclose(f);
+ return CMD_SUCCESS;
+}
+
+DEFPY(show_mgmt_map_xpath,
+ show_mgmt_map_xpath_cmd,
+ "show mgmt yang-xpath-subscription WORD$path",
+ SHOW_STR
+ MGMTD_STR
+ "Get YANG Backend Subscription\n"
+ "XPath expression specifying the YANG data path\n")
+{
+ mgmt_be_xpath_subscr_info_write(vty, path);
+ return CMD_SUCCESS;
+}
+
+DEFPY(mgmt_load_config,
+ mgmt_load_config_cmd,
+ "mgmt load-config WORD$filepath <merge|replace>$type",
+ MGMTD_STR
+ "Load configuration onto Candidate Datastore\n"
+ "Full path of the file\n"
+ "Merge configuration with contents of Candidate Datastore\n"
+ "Replace the existing contents of Candidate datastore\n")
+{
+ bool merge = type[0] == 'm' ? true : false;
+ struct mgmt_ds_ctx *ds_ctx;
+ int ret;
+
+ if (access(filepath, F_OK) == -1) {
+ vty_out(vty, "ERROR: File %s : %s\n", filepath,
+ strerror(errno));
+ return CMD_ERR_NO_FILE;
+ }
+
+ ds_ctx = mgmt_ds_get_ctx_by_id(mm, MGMTD_DS_CANDIDATE);
+ if (!ds_ctx) {
+ vty_out(vty, "ERROR: Could not access Candidate datastore!\n");
+ return CMD_ERR_NO_MATCH;
+ }
+
+ ret = mgmt_ds_load_config_from_file(ds_ctx, filepath, merge);
+ if (ret != 0)
+ vty_out(vty, "Error with parsing the file with error code %d\n",
+ ret);
+ return CMD_SUCCESS;
+}
+
+DEFPY(mgmt_save_config,
+ mgmt_save_config_cmd,
+ "mgmt save-config <candidate|running>$dsname WORD$filepath",
+ MGMTD_STR
+ "Save configuration from datastore\n"
+ "Candidate datastore\n"
+ "Running datastore\n"
+ "Full path of the file\n")
+{
+ Mgmtd__DatastoreId datastore = mgmt_ds_name2id(dsname);
+ struct mgmt_ds_ctx *ds_ctx;
+ FILE *f;
+
+ ds_ctx = mgmt_ds_get_ctx_by_id(mm, datastore);
+ if (!ds_ctx) {
+ vty_out(vty, "ERROR: Could not access the '%s' datastore!\n",
+ dsname);
+ return CMD_ERR_NO_MATCH;
+ }
+
+ if (!filepath) {
+ vty_out(vty, "ERROR: No file path mentioned!\n");
+ return CMD_ERR_NO_MATCH;
+ }
+
+ f = fopen(filepath, "w");
+ if (!f) {
+ vty_out(vty, "Could not open file pointed by filepath %s\n",
+ filepath);
+ return CMD_SUCCESS;
+ }
+
+ mgmt_ds_dump_tree(vty, ds_ctx, "/", f, LYD_JSON);
+
+ fclose(f);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY(show_mgmt_cmt_hist,
+ show_mgmt_cmt_hist_cmd,
+ "show mgmt commit-history",
+ SHOW_STR
+ MGMTD_STR
+ "Show commit history\n")
+{
+ show_mgmt_cmt_history(vty);
+ return CMD_SUCCESS;
+}
+
+DEFPY(mgmt_rollback,
+ mgmt_rollback_cmd,
+ "mgmt rollback <commit-id WORD$commit | last [(1-10)]$last>",
+ MGMTD_STR
+ "Rollback commits\n"
+ "Rollback to commit ID\n"
+ "Commit-ID\n"
+ "Rollbak n commits\n"
+ "Number of commits\n")
+{
+ if (commit)
+ mgmt_history_rollback_by_id(vty, commit);
+ else
+ mgmt_history_rollback_n(vty, last);
+
+ return CMD_SUCCESS;
+}
+
+int config_write_mgmt_debug(struct vty *vty);
+static struct cmd_node debug_node = {
+ .name = "debug",
+ .node = DEBUG_NODE,
+ .prompt = "",
+ .config_write = config_write_mgmt_debug,
+};
+
+static int write_mgmt_debug_helper(struct vty *vty, bool config)
+{
+ uint32_t mode = config ? DEBUG_MODE_CONF : DEBUG_MODE_ALL;
+ bool be = DEBUG_MODE_CHECK(&mgmt_debug_be, mode);
+ bool ds = DEBUG_MODE_CHECK(&mgmt_debug_ds, mode);
+ bool fe = DEBUG_MODE_CHECK(&mgmt_debug_fe, mode);
+ bool txn = DEBUG_MODE_CHECK(&mgmt_debug_txn, mode);
+
+ if (!(be || ds || fe || txn))
+ return 0;
+
+ vty_out(vty, "debug mgmt");
+ if (be)
+ vty_out(vty, " backend");
+ if (ds)
+ vty_out(vty, " datastore");
+ if (fe)
+ vty_out(vty, " frontend");
+ if (txn)
+ vty_out(vty, " transaction");
+
+ vty_out(vty, "\n");
+
+ return 0;
+}
+
+int config_write_mgmt_debug(struct vty *vty)
+{
+ return write_mgmt_debug_helper(vty, true);
+}
+
+DEFPY_NOSH(show_debugging_mgmt, show_debugging_mgmt_cmd,
+ "show debugging [mgmt]", SHOW_STR DEBUG_STR "MGMT Information\n")
+{
+ vty_out(vty, "MGMT debugging status:\n");
+
+ write_mgmt_debug_helper(vty, false);
+
+ cmd_show_lib_debugs(vty);
+
+ return CMD_SUCCESS;
+}
+
+DEFPY(debug_mgmt, debug_mgmt_cmd,
+ "[no$no] debug mgmt {backend$be|datastore$ds|frontend$fe|transaction$txn}",
+ NO_STR DEBUG_STR MGMTD_STR
+ "Backend debug\n"
+ "Datastore debug\n"
+ "Frontend debug\n"
+ "Transaction debug\n")
+{
+ uint32_t mode = DEBUG_NODE2MODE(vty->node);
+
+ if (be)
+ DEBUG_MODE_SET(&mgmt_debug_be, mode, !no);
+ if (ds)
+ DEBUG_MODE_SET(&mgmt_debug_ds, mode, !no);
+ if (fe)
+ DEBUG_MODE_SET(&mgmt_debug_fe, mode, !no);
+ if (txn)
+ DEBUG_MODE_SET(&mgmt_debug_txn, mode, !no);
+
+ return CMD_SUCCESS;
+}
+
+static void mgmt_config_read_in(struct event *event)
+{
+ mgmt_vty_read_configs();
+}
+
+void mgmt_vty_init(void)
+{
+ /*
+ * Initialize command handling from VTYSH connection.
+ * Call command initialization routines defined by
+ * backend components that are moved to new MGMTD infra
+ * here one by one.
+ */
+#if HAVE_STATICD
+ extern void static_vty_init(void);
+ static_vty_init();
+#endif
+
+ event_add_event(mm->master, mgmt_config_read_in, NULL, 0,
+ &mgmt_daemon_info->read_in);
+
+ install_node(&debug_node);
+
+ install_element(VIEW_NODE, &show_mgmt_be_adapter_cmd);
+ install_element(VIEW_NODE, &show_mgmt_be_xpath_reg_cmd);
+ install_element(VIEW_NODE, &show_mgmt_fe_adapter_cmd);
+ install_element(VIEW_NODE, &show_mgmt_txn_cmd);
+ install_element(VIEW_NODE, &show_mgmt_ds_cmd);
+ install_element(VIEW_NODE, &show_mgmt_get_config_cmd);
+ install_element(VIEW_NODE, &show_mgmt_get_data_cmd);
+ install_element(VIEW_NODE, &show_mgmt_dump_data_cmd);
+ install_element(VIEW_NODE, &show_mgmt_map_xpath_cmd);
+ install_element(VIEW_NODE, &show_mgmt_cmt_hist_cmd);
+
+ install_element(CONFIG_NODE, &mgmt_commit_cmd);
+ install_element(CONFIG_NODE, &mgmt_set_config_data_cmd);
+ install_element(CONFIG_NODE, &mgmt_delete_config_data_cmd);
+ install_element(CONFIG_NODE, &mgmt_load_config_cmd);
+ install_element(CONFIG_NODE, &mgmt_save_config_cmd);
+ install_element(CONFIG_NODE, &mgmt_rollback_cmd);
+
+ install_element(VIEW_NODE, &debug_mgmt_cmd);
+ install_element(CONFIG_NODE, &debug_mgmt_cmd);
+
+ /* Enable view */
+ install_element(ENABLE_NODE, &mgmt_performance_measurement_cmd);
+ install_element(ENABLE_NODE, &mgmt_reset_performance_stats_cmd);
+
+ install_element(ENABLE_NODE, &show_debugging_mgmt_cmd);
+
+ mgmt_fe_client_lib_vty_init();
+ /*
+ * TODO: Register and handlers for auto-completion here.
+ */
+}
diff --git a/mgmtd/subdir.am b/mgmtd/subdir.am
new file mode 100644
index 0000000..67b45d5
--- /dev/null
+++ b/mgmtd/subdir.am
@@ -0,0 +1,68 @@
+#
+# mgmtd -- Mangagement Daemon
+#
+
+# dist_examples_DATA += \
+ # end
+
+vtysh_daemons += mgmtd
+
+# man8 += $(MANBUILD)/frr-mgmtd.8
+# endif
+
+clippy_scan += \
+ mgmtd/mgmt_vty.c \
+ # end
+
+lib_LTLIBRARIES += mgmtd/libmgmt_be_nb.la
+nodist_mgmtd_libmgmt_be_nb_la_SOURCES = \
+ # end
+mgmtd_libmgmt_be_nb_la_CFLAGS = $(AM_CFLAGS) -DINCLUDE_MGMTD_CMDDEFS_ONLY
+mgmtd_libmgmt_be_nb_la_CPPFLAGS = $(AM_CPPFLAGS) -DINCLUDE_MGMTD_CMDDEFS_ONLY
+mgmtd_libmgmt_be_nb_la_LDFLAGS = -version-info 0:0:0
+
+noinst_LIBRARIES += mgmtd/libmgmtd.a
+mgmtd_libmgmtd_a_SOURCES = \
+ mgmtd/mgmt.c \
+ mgmtd/mgmt_ds.c \
+ mgmtd/mgmt_be_adapter.c \
+ mgmtd/mgmt_fe_adapter.c \
+ mgmtd/mgmt_history.c \
+ mgmtd/mgmt_memory.c \
+ mgmtd/mgmt_txn.c \
+ mgmtd/mgmt_vty.c \
+ # end
+
+mgmtdheaderdir = $(pkgincludedir)/mgmtd
+mgmtdheader_HEADERS = \
+ mgmtd/mgmt_defines.h \
+ # end
+
+noinst_HEADERS += \
+ mgmtd/mgmt.h \
+ mgmtd/mgmt_be_adapter.h \
+ mgmtd/mgmt_ds.h \
+ mgmtd/mgmt_fe_adapter.h \
+ mgmtd/mgmt_history.h \
+ mgmtd/mgmt_memory.h \
+ mgmtd/mgmt_txn.h \
+ # end
+
+sbin_PROGRAMS += mgmtd/mgmtd
+
+mgmtd_mgmtd_SOURCES = \
+ mgmtd/mgmt_main.c \
+ # end
+nodist_mgmtd_mgmtd_SOURCES = \
+ # nothing
+mgmtd_mgmtd_CFLAGS = $(AM_CFLAGS) -I ./
+mgmtd_mgmtd_LDADD = mgmtd/libmgmtd.a lib/libfrr.la $(LIBCAP) $(LIBM) $(LIBYANG_LIBS) $(UST_LIBS)
+mgmtd_mgmtd_LDADD += mgmtd/libmgmt_be_nb.la
+
+if STATICD
+nodist_mgmtd_mgmtd_SOURCES += \
+ yang/frr-staticd.yang.c \
+ yang/frr-bfdd.yang.c \
+ # end
+nodist_mgmtd_libmgmt_be_nb_la_SOURCES += staticd/static_vty.c
+endif