summaryrefslogtreecommitdiffstats
path: root/drivers/net/netdevsim
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 10:05:51 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 10:05:51 +0000
commit5d1646d90e1f2cceb9f0828f4b28318cd0ec7744 (patch)
treea94efe259b9009378be6d90eb30d2b019d95c194 /drivers/net/netdevsim
parentInitial commit. (diff)
downloadlinux-5d1646d90e1f2cceb9f0828f4b28318cd0ec7744.tar.xz
linux-5d1646d90e1f2cceb9f0828f4b28318cd0ec7744.zip
Adding upstream version 5.10.209.upstream/5.10.209upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/net/netdevsim')
-rw-r--r--drivers/net/netdevsim/Makefile15
-rw-r--r--drivers/net/netdevsim/bpf.c657
-rw-r--r--drivers/net/netdevsim/bus.c398
-rw-r--r--drivers/net/netdevsim/dev.c1231
-rw-r--r--drivers/net/netdevsim/ethtool.c64
-rw-r--r--drivers/net/netdevsim/fib.c957
-rw-r--r--drivers/net/netdevsim/health.c320
-rw-r--r--drivers/net/netdevsim/ipsec.c299
-rw-r--r--drivers/net/netdevsim/netdev.c396
-rw-r--r--drivers/net/netdevsim/netdevsim.h293
-rw-r--r--drivers/net/netdevsim/udp_tunnels.c217
11 files changed, 4847 insertions, 0 deletions
diff --git a/drivers/net/netdevsim/Makefile b/drivers/net/netdevsim/Makefile
new file mode 100644
index 000000000..ade086eed
--- /dev/null
+++ b/drivers/net/netdevsim/Makefile
@@ -0,0 +1,15 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_NETDEVSIM) += netdevsim.o
+
+netdevsim-objs := \
+ netdev.o dev.o ethtool.o fib.o bus.o health.o udp_tunnels.o
+
+ifeq ($(CONFIG_BPF_SYSCALL),y)
+netdevsim-objs += \
+ bpf.o
+endif
+
+ifneq ($(CONFIG_XFRM_OFFLOAD),)
+netdevsim-objs += ipsec.o
+endif
diff --git a/drivers/net/netdevsim/bpf.c b/drivers/net/netdevsim/bpf.c
new file mode 100644
index 000000000..508542658
--- /dev/null
+++ b/drivers/net/netdevsim/bpf.c
@@ -0,0 +1,657 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree.
+ *
+ * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"
+ * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
+ * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
+ * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME
+ * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+ */
+
+#include <linux/bpf.h>
+#include <linux/bpf_verifier.h>
+#include <linux/debugfs.h>
+#include <linux/kernel.h>
+#include <linux/mutex.h>
+#include <linux/rtnetlink.h>
+#include <net/pkt_cls.h>
+
+#include "netdevsim.h"
+
+#define pr_vlog(env, fmt, ...) \
+ bpf_verifier_log_write(env, "[netdevsim] " fmt, ##__VA_ARGS__)
+
+struct nsim_bpf_bound_prog {
+ struct nsim_dev *nsim_dev;
+ struct bpf_prog *prog;
+ struct dentry *ddir;
+ const char *state;
+ bool is_loaded;
+ struct list_head l;
+};
+
+#define NSIM_BPF_MAX_KEYS 2
+
+struct nsim_bpf_bound_map {
+ struct netdevsim *ns;
+ struct bpf_offloaded_map *map;
+ struct mutex mutex;
+ struct nsim_map_entry {
+ void *key;
+ void *value;
+ } entry[NSIM_BPF_MAX_KEYS];
+ struct list_head l;
+};
+
+static int nsim_bpf_string_show(struct seq_file *file, void *data)
+{
+ const char **str = file->private;
+
+ if (*str)
+ seq_printf(file, "%s\n", *str);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(nsim_bpf_string);
+
+static int
+nsim_bpf_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn)
+{
+ struct nsim_bpf_bound_prog *state;
+ int ret = 0;
+
+ state = env->prog->aux->offload->dev_priv;
+ if (state->nsim_dev->bpf_bind_verifier_delay && !insn_idx)
+ msleep(state->nsim_dev->bpf_bind_verifier_delay);
+
+ if (insn_idx == env->prog->len - 1) {
+ pr_vlog(env, "Hello from netdevsim!\n");
+
+ if (!state->nsim_dev->bpf_bind_verifier_accept)
+ ret = -EOPNOTSUPP;
+ }
+
+ return ret;
+}
+
+static int nsim_bpf_finalize(struct bpf_verifier_env *env)
+{
+ return 0;
+}
+
+static bool nsim_xdp_offload_active(struct netdevsim *ns)
+{
+ return ns->xdp_hw.prog;
+}
+
+static void nsim_prog_set_loaded(struct bpf_prog *prog, bool loaded)
+{
+ struct nsim_bpf_bound_prog *state;
+
+ if (!prog || !prog->aux->offload)
+ return;
+
+ state = prog->aux->offload->dev_priv;
+ state->is_loaded = loaded;
+}
+
+static int
+nsim_bpf_offload(struct netdevsim *ns, struct bpf_prog *prog, bool oldprog)
+{
+ nsim_prog_set_loaded(ns->bpf_offloaded, false);
+
+ WARN(!!ns->bpf_offloaded != oldprog,
+ "bad offload state, expected offload %sto be active",
+ oldprog ? "" : "not ");
+ ns->bpf_offloaded = prog;
+ ns->bpf_offloaded_id = prog ? prog->aux->id : 0;
+ nsim_prog_set_loaded(prog, true);
+
+ return 0;
+}
+
+int nsim_bpf_setup_tc_block_cb(enum tc_setup_type type,
+ void *type_data, void *cb_priv)
+{
+ struct tc_cls_bpf_offload *cls_bpf = type_data;
+ struct bpf_prog *prog = cls_bpf->prog;
+ struct netdevsim *ns = cb_priv;
+ struct bpf_prog *oldprog;
+
+ if (type != TC_SETUP_CLSBPF) {
+ NSIM_EA(cls_bpf->common.extack,
+ "only offload of BPF classifiers supported");
+ return -EOPNOTSUPP;
+ }
+
+ if (!tc_cls_can_offload_and_chain0(ns->netdev, &cls_bpf->common))
+ return -EOPNOTSUPP;
+
+ if (cls_bpf->common.protocol != htons(ETH_P_ALL)) {
+ NSIM_EA(cls_bpf->common.extack,
+ "only ETH_P_ALL supported as filter protocol");
+ return -EOPNOTSUPP;
+ }
+
+ if (!ns->bpf_tc_accept) {
+ NSIM_EA(cls_bpf->common.extack,
+ "netdevsim configured to reject BPF TC offload");
+ return -EOPNOTSUPP;
+ }
+ /* Note: progs without skip_sw will probably not be dev bound */
+ if (prog && !prog->aux->offload && !ns->bpf_tc_non_bound_accept) {
+ NSIM_EA(cls_bpf->common.extack,
+ "netdevsim configured to reject unbound programs");
+ return -EOPNOTSUPP;
+ }
+
+ if (cls_bpf->command != TC_CLSBPF_OFFLOAD)
+ return -EOPNOTSUPP;
+
+ oldprog = cls_bpf->oldprog;
+
+ /* Don't remove if oldprog doesn't match driver's state */
+ if (ns->bpf_offloaded != oldprog) {
+ oldprog = NULL;
+ if (!cls_bpf->prog)
+ return 0;
+ if (ns->bpf_offloaded) {
+ NSIM_EA(cls_bpf->common.extack,
+ "driver and netdev offload states mismatch");
+ return -EBUSY;
+ }
+ }
+
+ return nsim_bpf_offload(ns, cls_bpf->prog, oldprog);
+}
+
+int nsim_bpf_disable_tc(struct netdevsim *ns)
+{
+ if (ns->bpf_offloaded && !nsim_xdp_offload_active(ns))
+ return -EBUSY;
+ return 0;
+}
+
+static int nsim_xdp_offload_prog(struct netdevsim *ns, struct netdev_bpf *bpf)
+{
+ if (!nsim_xdp_offload_active(ns) && !bpf->prog)
+ return 0;
+ if (!nsim_xdp_offload_active(ns) && bpf->prog && ns->bpf_offloaded) {
+ NSIM_EA(bpf->extack, "TC program is already loaded");
+ return -EBUSY;
+ }
+
+ return nsim_bpf_offload(ns, bpf->prog, nsim_xdp_offload_active(ns));
+}
+
+static int
+nsim_xdp_set_prog(struct netdevsim *ns, struct netdev_bpf *bpf,
+ struct xdp_attachment_info *xdp)
+{
+ int err;
+
+ if (bpf->command == XDP_SETUP_PROG && !ns->bpf_xdpdrv_accept) {
+ NSIM_EA(bpf->extack, "driver XDP disabled in DebugFS");
+ return -EOPNOTSUPP;
+ }
+ if (bpf->command == XDP_SETUP_PROG_HW && !ns->bpf_xdpoffload_accept) {
+ NSIM_EA(bpf->extack, "XDP offload disabled in DebugFS");
+ return -EOPNOTSUPP;
+ }
+
+ if (bpf->command == XDP_SETUP_PROG_HW) {
+ err = nsim_xdp_offload_prog(ns, bpf);
+ if (err)
+ return err;
+ }
+
+ xdp_attachment_setup(xdp, bpf);
+
+ return 0;
+}
+
+static int nsim_bpf_create_prog(struct nsim_dev *nsim_dev,
+ struct bpf_prog *prog)
+{
+ struct nsim_bpf_bound_prog *state;
+ char name[16];
+ int ret;
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return -ENOMEM;
+
+ state->nsim_dev = nsim_dev;
+ state->prog = prog;
+ state->state = "verify";
+
+ /* Program id is not populated yet when we create the state. */
+ sprintf(name, "%u", nsim_dev->prog_id_gen++);
+ state->ddir = debugfs_create_dir(name, nsim_dev->ddir_bpf_bound_progs);
+ if (IS_ERR(state->ddir)) {
+ ret = PTR_ERR(state->ddir);
+ kfree(state);
+ return ret;
+ }
+
+ debugfs_create_u32("id", 0400, state->ddir, &prog->aux->id);
+ debugfs_create_file("state", 0400, state->ddir,
+ &state->state, &nsim_bpf_string_fops);
+ debugfs_create_bool("loaded", 0400, state->ddir, &state->is_loaded);
+
+ list_add_tail(&state->l, &nsim_dev->bpf_bound_progs);
+
+ prog->aux->offload->dev_priv = state;
+
+ return 0;
+}
+
+static int nsim_bpf_verifier_prep(struct bpf_prog *prog)
+{
+ struct nsim_dev *nsim_dev =
+ bpf_offload_dev_priv(prog->aux->offload->offdev);
+
+ if (!nsim_dev->bpf_bind_accept)
+ return -EOPNOTSUPP;
+
+ return nsim_bpf_create_prog(nsim_dev, prog);
+}
+
+static int nsim_bpf_translate(struct bpf_prog *prog)
+{
+ struct nsim_bpf_bound_prog *state = prog->aux->offload->dev_priv;
+
+ state->state = "xlated";
+ return 0;
+}
+
+static void nsim_bpf_destroy_prog(struct bpf_prog *prog)
+{
+ struct nsim_bpf_bound_prog *state;
+
+ state = prog->aux->offload->dev_priv;
+ WARN(state->is_loaded,
+ "offload state destroyed while program still bound");
+ debugfs_remove_recursive(state->ddir);
+ list_del(&state->l);
+ kfree(state);
+}
+
+static const struct bpf_prog_offload_ops nsim_bpf_dev_ops = {
+ .insn_hook = nsim_bpf_verify_insn,
+ .finalize = nsim_bpf_finalize,
+ .prepare = nsim_bpf_verifier_prep,
+ .translate = nsim_bpf_translate,
+ .destroy = nsim_bpf_destroy_prog,
+};
+
+static int nsim_setup_prog_checks(struct netdevsim *ns, struct netdev_bpf *bpf)
+{
+ if (bpf->prog && bpf->prog->aux->offload) {
+ NSIM_EA(bpf->extack, "attempt to load offloaded prog to drv");
+ return -EINVAL;
+ }
+ if (ns->netdev->mtu > NSIM_XDP_MAX_MTU) {
+ NSIM_EA(bpf->extack, "MTU too large w/ XDP enabled");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static int
+nsim_setup_prog_hw_checks(struct netdevsim *ns, struct netdev_bpf *bpf)
+{
+ struct nsim_bpf_bound_prog *state;
+
+ if (!bpf->prog)
+ return 0;
+
+ if (!bpf->prog->aux->offload) {
+ NSIM_EA(bpf->extack, "xdpoffload of non-bound program");
+ return -EINVAL;
+ }
+ if (!bpf_offload_dev_match(bpf->prog, ns->netdev)) {
+ NSIM_EA(bpf->extack, "program bound to different dev");
+ return -EINVAL;
+ }
+
+ state = bpf->prog->aux->offload->dev_priv;
+ if (WARN_ON(strcmp(state->state, "xlated"))) {
+ NSIM_EA(bpf->extack, "offloading program in bad state");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static bool
+nsim_map_key_match(struct bpf_map *map, struct nsim_map_entry *e, void *key)
+{
+ return e->key && !memcmp(key, e->key, map->key_size);
+}
+
+static int nsim_map_key_find(struct bpf_offloaded_map *offmap, void *key)
+{
+ struct nsim_bpf_bound_map *nmap = offmap->dev_priv;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(nmap->entry); i++)
+ if (nsim_map_key_match(&offmap->map, &nmap->entry[i], key))
+ return i;
+
+ return -ENOENT;
+}
+
+static int
+nsim_map_alloc_elem(struct bpf_offloaded_map *offmap, unsigned int idx)
+{
+ struct nsim_bpf_bound_map *nmap = offmap->dev_priv;
+
+ nmap->entry[idx].key = kmalloc(offmap->map.key_size,
+ GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
+ if (!nmap->entry[idx].key)
+ return -ENOMEM;
+ nmap->entry[idx].value = kmalloc(offmap->map.value_size,
+ GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
+ if (!nmap->entry[idx].value) {
+ kfree(nmap->entry[idx].key);
+ nmap->entry[idx].key = NULL;
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int
+nsim_map_get_next_key(struct bpf_offloaded_map *offmap,
+ void *key, void *next_key)
+{
+ struct nsim_bpf_bound_map *nmap = offmap->dev_priv;
+ int idx = -ENOENT;
+
+ mutex_lock(&nmap->mutex);
+
+ if (key)
+ idx = nsim_map_key_find(offmap, key);
+ if (idx == -ENOENT)
+ idx = 0;
+ else
+ idx++;
+
+ for (; idx < ARRAY_SIZE(nmap->entry); idx++) {
+ if (nmap->entry[idx].key) {
+ memcpy(next_key, nmap->entry[idx].key,
+ offmap->map.key_size);
+ break;
+ }
+ }
+
+ mutex_unlock(&nmap->mutex);
+
+ if (idx == ARRAY_SIZE(nmap->entry))
+ return -ENOENT;
+ return 0;
+}
+
+static int
+nsim_map_lookup_elem(struct bpf_offloaded_map *offmap, void *key, void *value)
+{
+ struct nsim_bpf_bound_map *nmap = offmap->dev_priv;
+ int idx;
+
+ mutex_lock(&nmap->mutex);
+
+ idx = nsim_map_key_find(offmap, key);
+ if (idx >= 0)
+ memcpy(value, nmap->entry[idx].value, offmap->map.value_size);
+
+ mutex_unlock(&nmap->mutex);
+
+ return idx < 0 ? idx : 0;
+}
+
+static int
+nsim_map_update_elem(struct bpf_offloaded_map *offmap,
+ void *key, void *value, u64 flags)
+{
+ struct nsim_bpf_bound_map *nmap = offmap->dev_priv;
+ int idx, err = 0;
+
+ mutex_lock(&nmap->mutex);
+
+ idx = nsim_map_key_find(offmap, key);
+ if (idx < 0 && flags == BPF_EXIST) {
+ err = idx;
+ goto exit_unlock;
+ }
+ if (idx >= 0 && flags == BPF_NOEXIST) {
+ err = -EEXIST;
+ goto exit_unlock;
+ }
+
+ if (idx < 0) {
+ for (idx = 0; idx < ARRAY_SIZE(nmap->entry); idx++)
+ if (!nmap->entry[idx].key)
+ break;
+ if (idx == ARRAY_SIZE(nmap->entry)) {
+ err = -E2BIG;
+ goto exit_unlock;
+ }
+
+ err = nsim_map_alloc_elem(offmap, idx);
+ if (err)
+ goto exit_unlock;
+ }
+
+ memcpy(nmap->entry[idx].key, key, offmap->map.key_size);
+ memcpy(nmap->entry[idx].value, value, offmap->map.value_size);
+exit_unlock:
+ mutex_unlock(&nmap->mutex);
+
+ return err;
+}
+
+static int nsim_map_delete_elem(struct bpf_offloaded_map *offmap, void *key)
+{
+ struct nsim_bpf_bound_map *nmap = offmap->dev_priv;
+ int idx;
+
+ if (offmap->map.map_type == BPF_MAP_TYPE_ARRAY)
+ return -EINVAL;
+
+ mutex_lock(&nmap->mutex);
+
+ idx = nsim_map_key_find(offmap, key);
+ if (idx >= 0) {
+ kfree(nmap->entry[idx].key);
+ kfree(nmap->entry[idx].value);
+ memset(&nmap->entry[idx], 0, sizeof(nmap->entry[idx]));
+ }
+
+ mutex_unlock(&nmap->mutex);
+
+ return idx < 0 ? idx : 0;
+}
+
+static const struct bpf_map_dev_ops nsim_bpf_map_ops = {
+ .map_get_next_key = nsim_map_get_next_key,
+ .map_lookup_elem = nsim_map_lookup_elem,
+ .map_update_elem = nsim_map_update_elem,
+ .map_delete_elem = nsim_map_delete_elem,
+};
+
+static int
+nsim_bpf_map_alloc(struct netdevsim *ns, struct bpf_offloaded_map *offmap)
+{
+ struct nsim_bpf_bound_map *nmap;
+ int i, err;
+
+ if (WARN_ON(offmap->map.map_type != BPF_MAP_TYPE_ARRAY &&
+ offmap->map.map_type != BPF_MAP_TYPE_HASH))
+ return -EINVAL;
+ if (offmap->map.max_entries > NSIM_BPF_MAX_KEYS)
+ return -ENOMEM;
+ if (offmap->map.map_flags)
+ return -EINVAL;
+
+ nmap = kzalloc(sizeof(*nmap), GFP_KERNEL_ACCOUNT);
+ if (!nmap)
+ return -ENOMEM;
+
+ offmap->dev_priv = nmap;
+ nmap->ns = ns;
+ nmap->map = offmap;
+ mutex_init(&nmap->mutex);
+
+ if (offmap->map.map_type == BPF_MAP_TYPE_ARRAY) {
+ for (i = 0; i < ARRAY_SIZE(nmap->entry); i++) {
+ u32 *key;
+
+ err = nsim_map_alloc_elem(offmap, i);
+ if (err)
+ goto err_free;
+ key = nmap->entry[i].key;
+ *key = i;
+ memset(nmap->entry[i].value, 0, offmap->map.value_size);
+ }
+ }
+
+ offmap->dev_ops = &nsim_bpf_map_ops;
+ list_add_tail(&nmap->l, &ns->nsim_dev->bpf_bound_maps);
+
+ return 0;
+
+err_free:
+ while (--i >= 0) {
+ kfree(nmap->entry[i].key);
+ kfree(nmap->entry[i].value);
+ }
+ kfree(nmap);
+ return err;
+}
+
+static void nsim_bpf_map_free(struct bpf_offloaded_map *offmap)
+{
+ struct nsim_bpf_bound_map *nmap = offmap->dev_priv;
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(nmap->entry); i++) {
+ kfree(nmap->entry[i].key);
+ kfree(nmap->entry[i].value);
+ }
+ list_del_init(&nmap->l);
+ mutex_destroy(&nmap->mutex);
+ kfree(nmap);
+}
+
+int nsim_bpf(struct net_device *dev, struct netdev_bpf *bpf)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+ int err;
+
+ ASSERT_RTNL();
+
+ switch (bpf->command) {
+ case XDP_SETUP_PROG:
+ err = nsim_setup_prog_checks(ns, bpf);
+ if (err)
+ return err;
+
+ return nsim_xdp_set_prog(ns, bpf, &ns->xdp);
+ case XDP_SETUP_PROG_HW:
+ err = nsim_setup_prog_hw_checks(ns, bpf);
+ if (err)
+ return err;
+
+ return nsim_xdp_set_prog(ns, bpf, &ns->xdp_hw);
+ case BPF_OFFLOAD_MAP_ALLOC:
+ if (!ns->bpf_map_accept)
+ return -EOPNOTSUPP;
+
+ return nsim_bpf_map_alloc(ns, bpf->offmap);
+ case BPF_OFFLOAD_MAP_FREE:
+ nsim_bpf_map_free(bpf->offmap);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+int nsim_bpf_dev_init(struct nsim_dev *nsim_dev)
+{
+ int err;
+
+ INIT_LIST_HEAD(&nsim_dev->bpf_bound_progs);
+ INIT_LIST_HEAD(&nsim_dev->bpf_bound_maps);
+
+ nsim_dev->ddir_bpf_bound_progs = debugfs_create_dir("bpf_bound_progs",
+ nsim_dev->ddir);
+ if (IS_ERR(nsim_dev->ddir_bpf_bound_progs))
+ return PTR_ERR(nsim_dev->ddir_bpf_bound_progs);
+
+ nsim_dev->bpf_dev = bpf_offload_dev_create(&nsim_bpf_dev_ops, nsim_dev);
+ err = PTR_ERR_OR_ZERO(nsim_dev->bpf_dev);
+ if (err)
+ return err;
+
+ nsim_dev->bpf_bind_accept = true;
+ debugfs_create_bool("bpf_bind_accept", 0600, nsim_dev->ddir,
+ &nsim_dev->bpf_bind_accept);
+ debugfs_create_u32("bpf_bind_verifier_delay", 0600, nsim_dev->ddir,
+ &nsim_dev->bpf_bind_verifier_delay);
+ nsim_dev->bpf_bind_verifier_accept = true;
+ debugfs_create_bool("bpf_bind_verifier_accept", 0600, nsim_dev->ddir,
+ &nsim_dev->bpf_bind_verifier_accept);
+ return 0;
+}
+
+void nsim_bpf_dev_exit(struct nsim_dev *nsim_dev)
+{
+ WARN_ON(!list_empty(&nsim_dev->bpf_bound_progs));
+ WARN_ON(!list_empty(&nsim_dev->bpf_bound_maps));
+ bpf_offload_dev_destroy(nsim_dev->bpf_dev);
+}
+
+int nsim_bpf_init(struct netdevsim *ns)
+{
+ struct dentry *ddir = ns->nsim_dev_port->ddir;
+ int err;
+
+ err = bpf_offload_dev_netdev_register(ns->nsim_dev->bpf_dev,
+ ns->netdev);
+ if (err)
+ return err;
+
+ debugfs_create_u32("bpf_offloaded_id", 0400, ddir,
+ &ns->bpf_offloaded_id);
+
+ ns->bpf_tc_accept = true;
+ debugfs_create_bool("bpf_tc_accept", 0600, ddir,
+ &ns->bpf_tc_accept);
+ debugfs_create_bool("bpf_tc_non_bound_accept", 0600, ddir,
+ &ns->bpf_tc_non_bound_accept);
+ ns->bpf_xdpdrv_accept = true;
+ debugfs_create_bool("bpf_xdpdrv_accept", 0600, ddir,
+ &ns->bpf_xdpdrv_accept);
+ ns->bpf_xdpoffload_accept = true;
+ debugfs_create_bool("bpf_xdpoffload_accept", 0600, ddir,
+ &ns->bpf_xdpoffload_accept);
+
+ ns->bpf_map_accept = true;
+ debugfs_create_bool("bpf_map_accept", 0600, ddir,
+ &ns->bpf_map_accept);
+
+ return 0;
+}
+
+void nsim_bpf_uninit(struct netdevsim *ns)
+{
+ WARN_ON(ns->xdp.prog);
+ WARN_ON(ns->xdp_hw.prog);
+ WARN_ON(ns->bpf_offloaded);
+ bpf_offload_dev_netdev_unregister(ns->nsim_dev->bpf_dev, ns->netdev);
+}
diff --git a/drivers/net/netdevsim/bus.c b/drivers/net/netdevsim/bus.c
new file mode 100644
index 000000000..0e9511661
--- /dev/null
+++ b/drivers/net/netdevsim/bus.c
@@ -0,0 +1,398 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2017 Netronome Systems, Inc.
+ * Copyright (C) 2019 Mellanox Technologies. All rights reserved
+ */
+
+#include <linux/device.h>
+#include <linux/idr.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/rtnetlink.h>
+#include <linux/slab.h>
+#include <linux/sysfs.h>
+
+#include "netdevsim.h"
+
+static DEFINE_IDA(nsim_bus_dev_ids);
+static LIST_HEAD(nsim_bus_dev_list);
+static DEFINE_MUTEX(nsim_bus_dev_list_lock);
+static bool nsim_bus_enable;
+
+static struct nsim_bus_dev *to_nsim_bus_dev(struct device *dev)
+{
+ return container_of(dev, struct nsim_bus_dev, dev);
+}
+
+static int nsim_bus_dev_vfs_enable(struct nsim_bus_dev *nsim_bus_dev,
+ unsigned int num_vfs)
+{
+ nsim_bus_dev->vfconfigs = kcalloc(num_vfs,
+ sizeof(struct nsim_vf_config),
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!nsim_bus_dev->vfconfigs)
+ return -ENOMEM;
+ nsim_bus_dev->num_vfs = num_vfs;
+
+ return 0;
+}
+
+static void nsim_bus_dev_vfs_disable(struct nsim_bus_dev *nsim_bus_dev)
+{
+ kfree(nsim_bus_dev->vfconfigs);
+ nsim_bus_dev->vfconfigs = NULL;
+ nsim_bus_dev->num_vfs = 0;
+}
+
+static ssize_t
+nsim_bus_dev_numvfs_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct nsim_bus_dev *nsim_bus_dev = to_nsim_bus_dev(dev);
+ unsigned int num_vfs;
+ int ret;
+
+ ret = kstrtouint(buf, 0, &num_vfs);
+ if (ret)
+ return ret;
+
+ rtnl_lock();
+ if (nsim_bus_dev->num_vfs == num_vfs)
+ goto exit_good;
+ if (nsim_bus_dev->num_vfs && num_vfs) {
+ ret = -EBUSY;
+ goto exit_unlock;
+ }
+
+ if (num_vfs) {
+ ret = nsim_bus_dev_vfs_enable(nsim_bus_dev, num_vfs);
+ if (ret)
+ goto exit_unlock;
+ } else {
+ nsim_bus_dev_vfs_disable(nsim_bus_dev);
+ }
+exit_good:
+ ret = count;
+exit_unlock:
+ rtnl_unlock();
+
+ return ret;
+}
+
+static ssize_t
+nsim_bus_dev_numvfs_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct nsim_bus_dev *nsim_bus_dev = to_nsim_bus_dev(dev);
+
+ return sprintf(buf, "%u\n", nsim_bus_dev->num_vfs);
+}
+
+static struct device_attribute nsim_bus_dev_numvfs_attr =
+ __ATTR(sriov_numvfs, 0664, nsim_bus_dev_numvfs_show,
+ nsim_bus_dev_numvfs_store);
+
+static ssize_t
+new_port_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct nsim_bus_dev *nsim_bus_dev = to_nsim_bus_dev(dev);
+ struct nsim_dev *nsim_dev = dev_get_drvdata(dev);
+ struct devlink *devlink;
+ unsigned int port_index;
+ int ret;
+
+ /* Prevent to use nsim_bus_dev before initialization. */
+ if (!smp_load_acquire(&nsim_bus_dev->init))
+ return -EBUSY;
+ ret = kstrtouint(buf, 0, &port_index);
+ if (ret)
+ return ret;
+
+ devlink = priv_to_devlink(nsim_dev);
+
+ mutex_lock(&nsim_bus_dev->nsim_bus_reload_lock);
+ devlink_reload_disable(devlink);
+ ret = nsim_dev_port_add(nsim_bus_dev, port_index);
+ devlink_reload_enable(devlink);
+ mutex_unlock(&nsim_bus_dev->nsim_bus_reload_lock);
+ return ret ? ret : count;
+}
+
+static struct device_attribute nsim_bus_dev_new_port_attr = __ATTR_WO(new_port);
+
+static ssize_t
+del_port_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct nsim_bus_dev *nsim_bus_dev = to_nsim_bus_dev(dev);
+ struct nsim_dev *nsim_dev = dev_get_drvdata(dev);
+ struct devlink *devlink;
+ unsigned int port_index;
+ int ret;
+
+ /* Prevent to use nsim_bus_dev before initialization. */
+ if (!smp_load_acquire(&nsim_bus_dev->init))
+ return -EBUSY;
+ ret = kstrtouint(buf, 0, &port_index);
+ if (ret)
+ return ret;
+
+ devlink = priv_to_devlink(nsim_dev);
+
+ mutex_lock(&nsim_bus_dev->nsim_bus_reload_lock);
+ devlink_reload_disable(devlink);
+ ret = nsim_dev_port_del(nsim_bus_dev, port_index);
+ devlink_reload_enable(devlink);
+ mutex_unlock(&nsim_bus_dev->nsim_bus_reload_lock);
+ return ret ? ret : count;
+}
+
+static struct device_attribute nsim_bus_dev_del_port_attr = __ATTR_WO(del_port);
+
+static struct attribute *nsim_bus_dev_attrs[] = {
+ &nsim_bus_dev_numvfs_attr.attr,
+ &nsim_bus_dev_new_port_attr.attr,
+ &nsim_bus_dev_del_port_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group nsim_bus_dev_attr_group = {
+ .attrs = nsim_bus_dev_attrs,
+};
+
+static const struct attribute_group *nsim_bus_dev_attr_groups[] = {
+ &nsim_bus_dev_attr_group,
+ NULL,
+};
+
+static void nsim_bus_dev_release(struct device *dev)
+{
+ struct nsim_bus_dev *nsim_bus_dev = to_nsim_bus_dev(dev);
+
+ nsim_bus_dev_vfs_disable(nsim_bus_dev);
+}
+
+static struct device_type nsim_bus_dev_type = {
+ .groups = nsim_bus_dev_attr_groups,
+ .release = nsim_bus_dev_release,
+};
+
+static struct nsim_bus_dev *
+nsim_bus_dev_new(unsigned int id, unsigned int port_count);
+
+static ssize_t
+new_device_store(struct bus_type *bus, const char *buf, size_t count)
+{
+ struct nsim_bus_dev *nsim_bus_dev;
+ unsigned int port_count;
+ unsigned int id;
+ int err;
+
+ err = sscanf(buf, "%u %u", &id, &port_count);
+ switch (err) {
+ case 1:
+ port_count = 1;
+ fallthrough;
+ case 2:
+ if (id > INT_MAX) {
+ pr_err("Value of \"id\" is too big.\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ pr_err("Format for adding new device is \"id port_count\" (uint uint).\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&nsim_bus_dev_list_lock);
+ /* Prevent to use resource before initialization. */
+ if (!smp_load_acquire(&nsim_bus_enable)) {
+ err = -EBUSY;
+ goto err;
+ }
+
+ nsim_bus_dev = nsim_bus_dev_new(id, port_count);
+ if (IS_ERR(nsim_bus_dev)) {
+ err = PTR_ERR(nsim_bus_dev);
+ goto err;
+ }
+
+ /* Allow using nsim_bus_dev */
+ smp_store_release(&nsim_bus_dev->init, true);
+
+ list_add_tail(&nsim_bus_dev->list, &nsim_bus_dev_list);
+ mutex_unlock(&nsim_bus_dev_list_lock);
+
+ return count;
+err:
+ mutex_unlock(&nsim_bus_dev_list_lock);
+ return err;
+}
+static BUS_ATTR_WO(new_device);
+
+static void nsim_bus_dev_del(struct nsim_bus_dev *nsim_bus_dev);
+
+static ssize_t
+del_device_store(struct bus_type *bus, const char *buf, size_t count)
+{
+ struct nsim_bus_dev *nsim_bus_dev, *tmp;
+ unsigned int id;
+ int err;
+
+ err = sscanf(buf, "%u", &id);
+ switch (err) {
+ case 1:
+ if (id > INT_MAX) {
+ pr_err("Value of \"id\" is too big.\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ pr_err("Format for deleting device is \"id\" (uint).\n");
+ return -EINVAL;
+ }
+
+ err = -ENOENT;
+ mutex_lock(&nsim_bus_dev_list_lock);
+ /* Prevent to use resource before initialization. */
+ if (!smp_load_acquire(&nsim_bus_enable)) {
+ mutex_unlock(&nsim_bus_dev_list_lock);
+ return -EBUSY;
+ }
+ list_for_each_entry_safe(nsim_bus_dev, tmp, &nsim_bus_dev_list, list) {
+ if (nsim_bus_dev->dev.id != id)
+ continue;
+ list_del(&nsim_bus_dev->list);
+ nsim_bus_dev_del(nsim_bus_dev);
+ err = 0;
+ break;
+ }
+ mutex_unlock(&nsim_bus_dev_list_lock);
+ return !err ? count : err;
+}
+static BUS_ATTR_WO(del_device);
+
+static struct attribute *nsim_bus_attrs[] = {
+ &bus_attr_new_device.attr,
+ &bus_attr_del_device.attr,
+ NULL
+};
+ATTRIBUTE_GROUPS(nsim_bus);
+
+static int nsim_bus_probe(struct device *dev)
+{
+ struct nsim_bus_dev *nsim_bus_dev = to_nsim_bus_dev(dev);
+
+ return nsim_dev_probe(nsim_bus_dev);
+}
+
+static int nsim_bus_remove(struct device *dev)
+{
+ struct nsim_bus_dev *nsim_bus_dev = to_nsim_bus_dev(dev);
+
+ nsim_dev_remove(nsim_bus_dev);
+ return 0;
+}
+
+static int nsim_num_vf(struct device *dev)
+{
+ struct nsim_bus_dev *nsim_bus_dev = to_nsim_bus_dev(dev);
+
+ return nsim_bus_dev->num_vfs;
+}
+
+static struct bus_type nsim_bus = {
+ .name = DRV_NAME,
+ .dev_name = DRV_NAME,
+ .bus_groups = nsim_bus_groups,
+ .probe = nsim_bus_probe,
+ .remove = nsim_bus_remove,
+ .num_vf = nsim_num_vf,
+};
+
+static struct nsim_bus_dev *
+nsim_bus_dev_new(unsigned int id, unsigned int port_count)
+{
+ struct nsim_bus_dev *nsim_bus_dev;
+ int err;
+
+ nsim_bus_dev = kzalloc(sizeof(*nsim_bus_dev), GFP_KERNEL);
+ if (!nsim_bus_dev)
+ return ERR_PTR(-ENOMEM);
+
+ err = ida_alloc_range(&nsim_bus_dev_ids, id, id, GFP_KERNEL);
+ if (err < 0)
+ goto err_nsim_bus_dev_free;
+ nsim_bus_dev->dev.id = err;
+ nsim_bus_dev->dev.bus = &nsim_bus;
+ nsim_bus_dev->dev.type = &nsim_bus_dev_type;
+ nsim_bus_dev->port_count = port_count;
+ nsim_bus_dev->initial_net = current->nsproxy->net_ns;
+ mutex_init(&nsim_bus_dev->nsim_bus_reload_lock);
+ /* Disallow using nsim_bus_dev */
+ smp_store_release(&nsim_bus_dev->init, false);
+
+ err = device_register(&nsim_bus_dev->dev);
+ if (err)
+ goto err_nsim_bus_dev_id_free;
+ return nsim_bus_dev;
+
+err_nsim_bus_dev_id_free:
+ ida_free(&nsim_bus_dev_ids, nsim_bus_dev->dev.id);
+err_nsim_bus_dev_free:
+ kfree(nsim_bus_dev);
+ return ERR_PTR(err);
+}
+
+static void nsim_bus_dev_del(struct nsim_bus_dev *nsim_bus_dev)
+{
+ /* Disallow using nsim_bus_dev */
+ smp_store_release(&nsim_bus_dev->init, false);
+ device_unregister(&nsim_bus_dev->dev);
+ ida_free(&nsim_bus_dev_ids, nsim_bus_dev->dev.id);
+ kfree(nsim_bus_dev);
+}
+
+static struct device_driver nsim_driver = {
+ .name = DRV_NAME,
+ .bus = &nsim_bus,
+ .owner = THIS_MODULE,
+};
+
+int nsim_bus_init(void)
+{
+ int err;
+
+ err = bus_register(&nsim_bus);
+ if (err)
+ return err;
+ err = driver_register(&nsim_driver);
+ if (err)
+ goto err_bus_unregister;
+ /* Allow using resources */
+ smp_store_release(&nsim_bus_enable, true);
+ return 0;
+
+err_bus_unregister:
+ bus_unregister(&nsim_bus);
+ return err;
+}
+
+void nsim_bus_exit(void)
+{
+ struct nsim_bus_dev *nsim_bus_dev, *tmp;
+
+ /* Disallow using resources */
+ smp_store_release(&nsim_bus_enable, false);
+
+ mutex_lock(&nsim_bus_dev_list_lock);
+ list_for_each_entry_safe(nsim_bus_dev, tmp, &nsim_bus_dev_list, list) {
+ list_del(&nsim_bus_dev->list);
+ nsim_bus_dev_del(nsim_bus_dev);
+ }
+ mutex_unlock(&nsim_bus_dev_list_lock);
+
+ driver_unregister(&nsim_driver);
+ bus_unregister(&nsim_bus);
+}
diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
new file mode 100644
index 000000000..bcf354719
--- /dev/null
+++ b/drivers/net/netdevsim/dev.c
@@ -0,0 +1,1231 @@
+/*
+ * Copyright (c) 2018 Cumulus Networks. All rights reserved.
+ * Copyright (c) 2018 David Ahern <dsa@cumulusnetworks.com>
+ * Copyright (c) 2019 Mellanox Technologies. All rights reserved.
+ *
+ * This software is licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree.
+ *
+ * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"
+ * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
+ * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
+ * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME
+ * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/etherdevice.h>
+#include <linux/inet.h>
+#include <linux/jiffies.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/random.h>
+#include <linux/rtnetlink.h>
+#include <linux/workqueue.h>
+#include <net/devlink.h>
+#include <net/ip.h>
+#include <net/flow_offload.h>
+#include <uapi/linux/devlink.h>
+#include <uapi/linux/ip.h>
+#include <uapi/linux/udp.h>
+
+#include "netdevsim.h"
+
+static struct dentry *nsim_dev_ddir;
+
+#define NSIM_DEV_DUMMY_REGION_SIZE (1024 * 32)
+
+static int
+nsim_dev_take_snapshot(struct devlink *devlink,
+ const struct devlink_region_ops *ops,
+ struct netlink_ext_ack *extack,
+ u8 **data)
+{
+ void *dummy_data;
+
+ dummy_data = kmalloc(NSIM_DEV_DUMMY_REGION_SIZE, GFP_KERNEL);
+ if (!dummy_data)
+ return -ENOMEM;
+
+ get_random_bytes(dummy_data, NSIM_DEV_DUMMY_REGION_SIZE);
+
+ *data = dummy_data;
+
+ return 0;
+}
+
+static ssize_t nsim_dev_take_snapshot_write(struct file *file,
+ const char __user *data,
+ size_t count, loff_t *ppos)
+{
+ struct nsim_dev *nsim_dev = file->private_data;
+ struct devlink *devlink;
+ u8 *dummy_data;
+ int err;
+ u32 id;
+
+ devlink = priv_to_devlink(nsim_dev);
+
+ err = nsim_dev_take_snapshot(devlink, NULL, NULL, &dummy_data);
+ if (err)
+ return err;
+
+ err = devlink_region_snapshot_id_get(devlink, &id);
+ if (err) {
+ pr_err("Failed to get snapshot id\n");
+ kfree(dummy_data);
+ return err;
+ }
+ err = devlink_region_snapshot_create(nsim_dev->dummy_region,
+ dummy_data, id);
+ devlink_region_snapshot_id_put(devlink, id);
+ if (err) {
+ pr_err("Failed to create region snapshot\n");
+ kfree(dummy_data);
+ return err;
+ }
+
+ return count;
+}
+
+static const struct file_operations nsim_dev_take_snapshot_fops = {
+ .open = simple_open,
+ .write = nsim_dev_take_snapshot_write,
+ .llseek = generic_file_llseek,
+ .owner = THIS_MODULE,
+};
+
+static ssize_t nsim_dev_trap_fa_cookie_read(struct file *file,
+ char __user *data,
+ size_t count, loff_t *ppos)
+{
+ struct nsim_dev *nsim_dev = file->private_data;
+ struct flow_action_cookie *fa_cookie;
+ unsigned int buf_len;
+ ssize_t ret;
+ char *buf;
+
+ spin_lock(&nsim_dev->fa_cookie_lock);
+ fa_cookie = nsim_dev->fa_cookie;
+ if (!fa_cookie) {
+ ret = -EINVAL;
+ goto errout;
+ }
+ buf_len = fa_cookie->cookie_len * 2;
+ buf = kmalloc(buf_len, GFP_ATOMIC);
+ if (!buf) {
+ ret = -ENOMEM;
+ goto errout;
+ }
+ bin2hex(buf, fa_cookie->cookie, fa_cookie->cookie_len);
+ spin_unlock(&nsim_dev->fa_cookie_lock);
+
+ ret = simple_read_from_buffer(data, count, ppos, buf, buf_len);
+
+ kfree(buf);
+ return ret;
+
+errout:
+ spin_unlock(&nsim_dev->fa_cookie_lock);
+ return ret;
+}
+
+static ssize_t nsim_dev_trap_fa_cookie_write(struct file *file,
+ const char __user *data,
+ size_t count, loff_t *ppos)
+{
+ struct nsim_dev *nsim_dev = file->private_data;
+ struct flow_action_cookie *fa_cookie;
+ size_t cookie_len;
+ ssize_t ret;
+ char *buf;
+
+ if (*ppos != 0)
+ return -EINVAL;
+ cookie_len = (count - 1) / 2;
+ if ((count - 1) % 2)
+ return -EINVAL;
+
+ buf = memdup_user(data, count);
+ if (IS_ERR(buf))
+ return PTR_ERR(buf);
+
+ fa_cookie = kmalloc(sizeof(*fa_cookie) + cookie_len,
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!fa_cookie) {
+ ret = -ENOMEM;
+ goto free_buf;
+ }
+
+ fa_cookie->cookie_len = cookie_len;
+ ret = hex2bin(fa_cookie->cookie, buf, cookie_len);
+ if (ret)
+ goto free_fa_cookie;
+ kfree(buf);
+
+ spin_lock(&nsim_dev->fa_cookie_lock);
+ kfree(nsim_dev->fa_cookie);
+ nsim_dev->fa_cookie = fa_cookie;
+ spin_unlock(&nsim_dev->fa_cookie_lock);
+
+ return count;
+
+free_fa_cookie:
+ kfree(fa_cookie);
+free_buf:
+ kfree(buf);
+ return ret;
+}
+
+static const struct file_operations nsim_dev_trap_fa_cookie_fops = {
+ .open = simple_open,
+ .read = nsim_dev_trap_fa_cookie_read,
+ .write = nsim_dev_trap_fa_cookie_write,
+ .llseek = generic_file_llseek,
+ .owner = THIS_MODULE,
+};
+
+static int nsim_dev_debugfs_init(struct nsim_dev *nsim_dev)
+{
+ char dev_ddir_name[sizeof(DRV_NAME) + 10];
+
+ sprintf(dev_ddir_name, DRV_NAME "%u", nsim_dev->nsim_bus_dev->dev.id);
+ nsim_dev->ddir = debugfs_create_dir(dev_ddir_name, nsim_dev_ddir);
+ if (IS_ERR(nsim_dev->ddir))
+ return PTR_ERR(nsim_dev->ddir);
+ nsim_dev->ports_ddir = debugfs_create_dir("ports", nsim_dev->ddir);
+ if (IS_ERR(nsim_dev->ports_ddir))
+ return PTR_ERR(nsim_dev->ports_ddir);
+ debugfs_create_bool("fw_update_status", 0600, nsim_dev->ddir,
+ &nsim_dev->fw_update_status);
+ debugfs_create_u32("fw_update_overwrite_mask", 0600, nsim_dev->ddir,
+ &nsim_dev->fw_update_overwrite_mask);
+ debugfs_create_u32("max_macs", 0600, nsim_dev->ddir,
+ &nsim_dev->max_macs);
+ debugfs_create_bool("test1", 0600, nsim_dev->ddir,
+ &nsim_dev->test1);
+ nsim_dev->take_snapshot = debugfs_create_file("take_snapshot",
+ 0200,
+ nsim_dev->ddir,
+ nsim_dev,
+ &nsim_dev_take_snapshot_fops);
+ debugfs_create_bool("dont_allow_reload", 0600, nsim_dev->ddir,
+ &nsim_dev->dont_allow_reload);
+ debugfs_create_bool("fail_reload", 0600, nsim_dev->ddir,
+ &nsim_dev->fail_reload);
+ debugfs_create_file("trap_flow_action_cookie", 0600, nsim_dev->ddir,
+ nsim_dev, &nsim_dev_trap_fa_cookie_fops);
+ debugfs_create_bool("fail_trap_group_set", 0600,
+ nsim_dev->ddir,
+ &nsim_dev->fail_trap_group_set);
+ debugfs_create_bool("fail_trap_policer_set", 0600,
+ nsim_dev->ddir,
+ &nsim_dev->fail_trap_policer_set);
+ debugfs_create_bool("fail_trap_policer_counter_get", 0600,
+ nsim_dev->ddir,
+ &nsim_dev->fail_trap_policer_counter_get);
+ nsim_udp_tunnels_debugfs_create(nsim_dev);
+ return 0;
+}
+
+static void nsim_dev_debugfs_exit(struct nsim_dev *nsim_dev)
+{
+ debugfs_remove_recursive(nsim_dev->ports_ddir);
+ debugfs_remove_recursive(nsim_dev->ddir);
+}
+
+static int nsim_dev_port_debugfs_init(struct nsim_dev *nsim_dev,
+ struct nsim_dev_port *nsim_dev_port)
+{
+ char port_ddir_name[16];
+ char dev_link_name[32];
+
+ sprintf(port_ddir_name, "%u", nsim_dev_port->port_index);
+ nsim_dev_port->ddir = debugfs_create_dir(port_ddir_name,
+ nsim_dev->ports_ddir);
+ if (IS_ERR(nsim_dev_port->ddir))
+ return PTR_ERR(nsim_dev_port->ddir);
+
+ sprintf(dev_link_name, "../../../" DRV_NAME "%u",
+ nsim_dev->nsim_bus_dev->dev.id);
+ debugfs_create_symlink("dev", nsim_dev_port->ddir, dev_link_name);
+
+ return 0;
+}
+
+static void nsim_dev_port_debugfs_exit(struct nsim_dev_port *nsim_dev_port)
+{
+ debugfs_remove_recursive(nsim_dev_port->ddir);
+}
+
+static int nsim_dev_resources_register(struct devlink *devlink)
+{
+ struct devlink_resource_size_params params = {
+ .size_max = (u64)-1,
+ .size_granularity = 1,
+ .unit = DEVLINK_RESOURCE_UNIT_ENTRY
+ };
+ int err;
+
+ /* Resources for IPv4 */
+ err = devlink_resource_register(devlink, "IPv4", (u64)-1,
+ NSIM_RESOURCE_IPV4,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &params);
+ if (err) {
+ pr_err("Failed to register IPv4 top resource\n");
+ goto out;
+ }
+
+ err = devlink_resource_register(devlink, "fib", (u64)-1,
+ NSIM_RESOURCE_IPV4_FIB,
+ NSIM_RESOURCE_IPV4, &params);
+ if (err) {
+ pr_err("Failed to register IPv4 FIB resource\n");
+ return err;
+ }
+
+ err = devlink_resource_register(devlink, "fib-rules", (u64)-1,
+ NSIM_RESOURCE_IPV4_FIB_RULES,
+ NSIM_RESOURCE_IPV4, &params);
+ if (err) {
+ pr_err("Failed to register IPv4 FIB rules resource\n");
+ return err;
+ }
+
+ /* Resources for IPv6 */
+ err = devlink_resource_register(devlink, "IPv6", (u64)-1,
+ NSIM_RESOURCE_IPV6,
+ DEVLINK_RESOURCE_ID_PARENT_TOP,
+ &params);
+ if (err) {
+ pr_err("Failed to register IPv6 top resource\n");
+ goto out;
+ }
+
+ err = devlink_resource_register(devlink, "fib", (u64)-1,
+ NSIM_RESOURCE_IPV6_FIB,
+ NSIM_RESOURCE_IPV6, &params);
+ if (err) {
+ pr_err("Failed to register IPv6 FIB resource\n");
+ return err;
+ }
+
+ err = devlink_resource_register(devlink, "fib-rules", (u64)-1,
+ NSIM_RESOURCE_IPV6_FIB_RULES,
+ NSIM_RESOURCE_IPV6, &params);
+ if (err) {
+ pr_err("Failed to register IPv6 FIB rules resource\n");
+ return err;
+ }
+
+out:
+ return err;
+}
+
+enum nsim_devlink_param_id {
+ NSIM_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
+ NSIM_DEVLINK_PARAM_ID_TEST1,
+};
+
+static const struct devlink_param nsim_devlink_params[] = {
+ DEVLINK_PARAM_GENERIC(MAX_MACS,
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL, NULL, NULL),
+ DEVLINK_PARAM_DRIVER(NSIM_DEVLINK_PARAM_ID_TEST1,
+ "test1", DEVLINK_PARAM_TYPE_BOOL,
+ BIT(DEVLINK_PARAM_CMODE_DRIVERINIT),
+ NULL, NULL, NULL),
+};
+
+static void nsim_devlink_set_params_init_values(struct nsim_dev *nsim_dev,
+ struct devlink *devlink)
+{
+ union devlink_param_value value;
+
+ value.vu32 = nsim_dev->max_macs;
+ devlink_param_driverinit_value_set(devlink,
+ DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
+ value);
+ value.vbool = nsim_dev->test1;
+ devlink_param_driverinit_value_set(devlink,
+ NSIM_DEVLINK_PARAM_ID_TEST1,
+ value);
+}
+
+static void nsim_devlink_param_load_driverinit_values(struct devlink *devlink)
+{
+ struct nsim_dev *nsim_dev = devlink_priv(devlink);
+ union devlink_param_value saved_value;
+ int err;
+
+ err = devlink_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
+ &saved_value);
+ if (!err)
+ nsim_dev->max_macs = saved_value.vu32;
+ err = devlink_param_driverinit_value_get(devlink,
+ NSIM_DEVLINK_PARAM_ID_TEST1,
+ &saved_value);
+ if (!err)
+ nsim_dev->test1 = saved_value.vbool;
+}
+
+#define NSIM_DEV_DUMMY_REGION_SNAPSHOT_MAX 16
+
+static const struct devlink_region_ops dummy_region_ops = {
+ .name = "dummy",
+ .destructor = &kfree,
+ .snapshot = nsim_dev_take_snapshot,
+};
+
+static int nsim_dev_dummy_region_init(struct nsim_dev *nsim_dev,
+ struct devlink *devlink)
+{
+ nsim_dev->dummy_region =
+ devlink_region_create(devlink, &dummy_region_ops,
+ NSIM_DEV_DUMMY_REGION_SNAPSHOT_MAX,
+ NSIM_DEV_DUMMY_REGION_SIZE);
+ return PTR_ERR_OR_ZERO(nsim_dev->dummy_region);
+}
+
+static void nsim_dev_dummy_region_exit(struct nsim_dev *nsim_dev)
+{
+ devlink_region_destroy(nsim_dev->dummy_region);
+}
+
+struct nsim_trap_item {
+ void *trap_ctx;
+ enum devlink_trap_action action;
+};
+
+struct nsim_trap_data {
+ struct delayed_work trap_report_dw;
+ struct nsim_trap_item *trap_items_arr;
+ u64 *trap_policers_cnt_arr;
+ struct nsim_dev *nsim_dev;
+ spinlock_t trap_lock; /* Protects trap_items_arr */
+};
+
+/* All driver-specific traps must be documented in
+ * Documentation/networking/devlink/netdevsim.rst
+ */
+enum {
+ NSIM_TRAP_ID_BASE = DEVLINK_TRAP_GENERIC_ID_MAX,
+ NSIM_TRAP_ID_FID_MISS,
+};
+
+#define NSIM_TRAP_NAME_FID_MISS "fid_miss"
+
+#define NSIM_TRAP_METADATA DEVLINK_TRAP_METADATA_TYPE_F_IN_PORT
+
+#define NSIM_TRAP_DROP(_id, _group_id) \
+ DEVLINK_TRAP_GENERIC(DROP, DROP, _id, \
+ DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \
+ NSIM_TRAP_METADATA)
+#define NSIM_TRAP_DROP_EXT(_id, _group_id, _metadata) \
+ DEVLINK_TRAP_GENERIC(DROP, DROP, _id, \
+ DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \
+ NSIM_TRAP_METADATA | (_metadata))
+#define NSIM_TRAP_EXCEPTION(_id, _group_id) \
+ DEVLINK_TRAP_GENERIC(EXCEPTION, TRAP, _id, \
+ DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \
+ NSIM_TRAP_METADATA)
+#define NSIM_TRAP_CONTROL(_id, _group_id, _action) \
+ DEVLINK_TRAP_GENERIC(CONTROL, _action, _id, \
+ DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \
+ NSIM_TRAP_METADATA)
+#define NSIM_TRAP_DRIVER_EXCEPTION(_id, _group_id) \
+ DEVLINK_TRAP_DRIVER(EXCEPTION, TRAP, NSIM_TRAP_ID_##_id, \
+ NSIM_TRAP_NAME_##_id, \
+ DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \
+ NSIM_TRAP_METADATA)
+
+#define NSIM_DEV_TRAP_POLICER_MIN_RATE 1
+#define NSIM_DEV_TRAP_POLICER_MAX_RATE 8000
+#define NSIM_DEV_TRAP_POLICER_MIN_BURST 8
+#define NSIM_DEV_TRAP_POLICER_MAX_BURST 65536
+
+#define NSIM_TRAP_POLICER(_id, _rate, _burst) \
+ DEVLINK_TRAP_POLICER(_id, _rate, _burst, \
+ NSIM_DEV_TRAP_POLICER_MAX_RATE, \
+ NSIM_DEV_TRAP_POLICER_MIN_RATE, \
+ NSIM_DEV_TRAP_POLICER_MAX_BURST, \
+ NSIM_DEV_TRAP_POLICER_MIN_BURST)
+
+static const struct devlink_trap_policer nsim_trap_policers_arr[] = {
+ NSIM_TRAP_POLICER(1, 1000, 128),
+ NSIM_TRAP_POLICER(2, 2000, 256),
+ NSIM_TRAP_POLICER(3, 3000, 512),
+};
+
+static const struct devlink_trap_group nsim_trap_groups_arr[] = {
+ DEVLINK_TRAP_GROUP_GENERIC(L2_DROPS, 0),
+ DEVLINK_TRAP_GROUP_GENERIC(L3_DROPS, 1),
+ DEVLINK_TRAP_GROUP_GENERIC(L3_EXCEPTIONS, 1),
+ DEVLINK_TRAP_GROUP_GENERIC(BUFFER_DROPS, 2),
+ DEVLINK_TRAP_GROUP_GENERIC(ACL_DROPS, 3),
+ DEVLINK_TRAP_GROUP_GENERIC(MC_SNOOPING, 3),
+};
+
+static const struct devlink_trap nsim_traps_arr[] = {
+ NSIM_TRAP_DROP(SMAC_MC, L2_DROPS),
+ NSIM_TRAP_DROP(VLAN_TAG_MISMATCH, L2_DROPS),
+ NSIM_TRAP_DROP(INGRESS_VLAN_FILTER, L2_DROPS),
+ NSIM_TRAP_DROP(INGRESS_STP_FILTER, L2_DROPS),
+ NSIM_TRAP_DROP(EMPTY_TX_LIST, L2_DROPS),
+ NSIM_TRAP_DROP(PORT_LOOPBACK_FILTER, L2_DROPS),
+ NSIM_TRAP_DRIVER_EXCEPTION(FID_MISS, L2_DROPS),
+ NSIM_TRAP_DROP(BLACKHOLE_ROUTE, L3_DROPS),
+ NSIM_TRAP_EXCEPTION(TTL_ERROR, L3_EXCEPTIONS),
+ NSIM_TRAP_DROP(TAIL_DROP, BUFFER_DROPS),
+ NSIM_TRAP_DROP_EXT(INGRESS_FLOW_ACTION_DROP, ACL_DROPS,
+ DEVLINK_TRAP_METADATA_TYPE_F_FA_COOKIE),
+ NSIM_TRAP_DROP_EXT(EGRESS_FLOW_ACTION_DROP, ACL_DROPS,
+ DEVLINK_TRAP_METADATA_TYPE_F_FA_COOKIE),
+ NSIM_TRAP_CONTROL(IGMP_QUERY, MC_SNOOPING, MIRROR),
+ NSIM_TRAP_CONTROL(IGMP_V1_REPORT, MC_SNOOPING, TRAP),
+};
+
+#define NSIM_TRAP_L4_DATA_LEN 100
+
+static struct sk_buff *nsim_dev_trap_skb_build(void)
+{
+ int tot_len, data_len = NSIM_TRAP_L4_DATA_LEN;
+ struct sk_buff *skb;
+ struct udphdr *udph;
+ struct ethhdr *eth;
+ struct iphdr *iph;
+
+ skb = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC);
+ if (!skb)
+ return NULL;
+ tot_len = sizeof(struct iphdr) + sizeof(struct udphdr) + data_len;
+
+ skb_reset_mac_header(skb);
+ eth = skb_put(skb, sizeof(struct ethhdr));
+ eth_random_addr(eth->h_dest);
+ eth_random_addr(eth->h_source);
+ eth->h_proto = htons(ETH_P_IP);
+ skb->protocol = htons(ETH_P_IP);
+
+ skb_set_network_header(skb, skb->len);
+ iph = skb_put(skb, sizeof(struct iphdr));
+ iph->protocol = IPPROTO_UDP;
+ iph->saddr = in_aton("192.0.2.1");
+ iph->daddr = in_aton("198.51.100.1");
+ iph->version = 0x4;
+ iph->frag_off = 0;
+ iph->ihl = 0x5;
+ iph->tot_len = htons(tot_len);
+ iph->ttl = 100;
+ iph->check = 0;
+ iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
+
+ skb_set_transport_header(skb, skb->len);
+ udph = skb_put_zero(skb, sizeof(struct udphdr) + data_len);
+ get_random_bytes(&udph->source, sizeof(u16));
+ get_random_bytes(&udph->dest, sizeof(u16));
+ udph->len = htons(sizeof(struct udphdr) + data_len);
+
+ return skb;
+}
+
+static void nsim_dev_trap_report(struct nsim_dev_port *nsim_dev_port)
+{
+ struct nsim_dev *nsim_dev = nsim_dev_port->ns->nsim_dev;
+ struct devlink *devlink = priv_to_devlink(nsim_dev);
+ struct nsim_trap_data *nsim_trap_data;
+ int i;
+
+ nsim_trap_data = nsim_dev->trap_data;
+
+ spin_lock(&nsim_trap_data->trap_lock);
+ for (i = 0; i < ARRAY_SIZE(nsim_traps_arr); i++) {
+ struct flow_action_cookie *fa_cookie = NULL;
+ struct nsim_trap_item *nsim_trap_item;
+ struct sk_buff *skb;
+ bool has_fa_cookie;
+
+ has_fa_cookie = nsim_traps_arr[i].metadata_cap &
+ DEVLINK_TRAP_METADATA_TYPE_F_FA_COOKIE;
+
+ nsim_trap_item = &nsim_trap_data->trap_items_arr[i];
+ if (nsim_trap_item->action == DEVLINK_TRAP_ACTION_DROP)
+ continue;
+
+ skb = nsim_dev_trap_skb_build();
+ if (!skb)
+ continue;
+ skb->dev = nsim_dev_port->ns->netdev;
+
+ /* Trapped packets are usually passed to devlink in softIRQ,
+ * but in this case they are generated in a workqueue. Disable
+ * softIRQs to prevent lockdep from complaining about
+ * "incosistent lock state".
+ */
+
+ spin_lock_bh(&nsim_dev->fa_cookie_lock);
+ fa_cookie = has_fa_cookie ? nsim_dev->fa_cookie : NULL;
+ devlink_trap_report(devlink, skb, nsim_trap_item->trap_ctx,
+ &nsim_dev_port->devlink_port, fa_cookie);
+ spin_unlock_bh(&nsim_dev->fa_cookie_lock);
+ consume_skb(skb);
+ }
+ spin_unlock(&nsim_trap_data->trap_lock);
+}
+
+#define NSIM_TRAP_REPORT_INTERVAL_MS 100
+
+static void nsim_dev_trap_report_work(struct work_struct *work)
+{
+ struct nsim_trap_data *nsim_trap_data;
+ struct nsim_dev_port *nsim_dev_port;
+ struct nsim_dev *nsim_dev;
+
+ nsim_trap_data = container_of(work, struct nsim_trap_data,
+ trap_report_dw.work);
+ nsim_dev = nsim_trap_data->nsim_dev;
+
+ /* For each running port and enabled packet trap, generate a UDP
+ * packet with a random 5-tuple and report it.
+ */
+ mutex_lock(&nsim_dev->port_list_lock);
+ list_for_each_entry(nsim_dev_port, &nsim_dev->port_list, list) {
+ if (!netif_running(nsim_dev_port->ns->netdev))
+ continue;
+
+ nsim_dev_trap_report(nsim_dev_port);
+ }
+ mutex_unlock(&nsim_dev->port_list_lock);
+
+ schedule_delayed_work(&nsim_dev->trap_data->trap_report_dw,
+ msecs_to_jiffies(NSIM_TRAP_REPORT_INTERVAL_MS));
+}
+
+static int nsim_dev_traps_init(struct devlink *devlink)
+{
+ size_t policers_count = ARRAY_SIZE(nsim_trap_policers_arr);
+ struct nsim_dev *nsim_dev = devlink_priv(devlink);
+ struct nsim_trap_data *nsim_trap_data;
+ int err;
+
+ nsim_trap_data = kzalloc(sizeof(*nsim_trap_data), GFP_KERNEL);
+ if (!nsim_trap_data)
+ return -ENOMEM;
+
+ nsim_trap_data->trap_items_arr = kcalloc(ARRAY_SIZE(nsim_traps_arr),
+ sizeof(struct nsim_trap_item),
+ GFP_KERNEL);
+ if (!nsim_trap_data->trap_items_arr) {
+ err = -ENOMEM;
+ goto err_trap_data_free;
+ }
+
+ nsim_trap_data->trap_policers_cnt_arr = kcalloc(policers_count,
+ sizeof(u64),
+ GFP_KERNEL);
+ if (!nsim_trap_data->trap_policers_cnt_arr) {
+ err = -ENOMEM;
+ goto err_trap_items_free;
+ }
+
+ /* The lock is used to protect the action state of the registered
+ * traps. The value is written by user and read in delayed work when
+ * iterating over all the traps.
+ */
+ spin_lock_init(&nsim_trap_data->trap_lock);
+ nsim_trap_data->nsim_dev = nsim_dev;
+ nsim_dev->trap_data = nsim_trap_data;
+
+ err = devlink_trap_policers_register(devlink, nsim_trap_policers_arr,
+ policers_count);
+ if (err)
+ goto err_trap_policers_cnt_free;
+
+ err = devlink_trap_groups_register(devlink, nsim_trap_groups_arr,
+ ARRAY_SIZE(nsim_trap_groups_arr));
+ if (err)
+ goto err_trap_policers_unregister;
+
+ err = devlink_traps_register(devlink, nsim_traps_arr,
+ ARRAY_SIZE(nsim_traps_arr), NULL);
+ if (err)
+ goto err_trap_groups_unregister;
+
+ INIT_DELAYED_WORK(&nsim_dev->trap_data->trap_report_dw,
+ nsim_dev_trap_report_work);
+ schedule_delayed_work(&nsim_dev->trap_data->trap_report_dw,
+ msecs_to_jiffies(NSIM_TRAP_REPORT_INTERVAL_MS));
+
+ return 0;
+
+err_trap_groups_unregister:
+ devlink_trap_groups_unregister(devlink, nsim_trap_groups_arr,
+ ARRAY_SIZE(nsim_trap_groups_arr));
+err_trap_policers_unregister:
+ devlink_trap_policers_unregister(devlink, nsim_trap_policers_arr,
+ ARRAY_SIZE(nsim_trap_policers_arr));
+err_trap_policers_cnt_free:
+ kfree(nsim_trap_data->trap_policers_cnt_arr);
+err_trap_items_free:
+ kfree(nsim_trap_data->trap_items_arr);
+err_trap_data_free:
+ kfree(nsim_trap_data);
+ return err;
+}
+
+static void nsim_dev_traps_exit(struct devlink *devlink)
+{
+ struct nsim_dev *nsim_dev = devlink_priv(devlink);
+
+ cancel_delayed_work_sync(&nsim_dev->trap_data->trap_report_dw);
+ devlink_traps_unregister(devlink, nsim_traps_arr,
+ ARRAY_SIZE(nsim_traps_arr));
+ devlink_trap_groups_unregister(devlink, nsim_trap_groups_arr,
+ ARRAY_SIZE(nsim_trap_groups_arr));
+ devlink_trap_policers_unregister(devlink, nsim_trap_policers_arr,
+ ARRAY_SIZE(nsim_trap_policers_arr));
+ kfree(nsim_dev->trap_data->trap_policers_cnt_arr);
+ kfree(nsim_dev->trap_data->trap_items_arr);
+ kfree(nsim_dev->trap_data);
+}
+
+static int nsim_dev_reload_create(struct nsim_dev *nsim_dev,
+ struct netlink_ext_ack *extack);
+static void nsim_dev_reload_destroy(struct nsim_dev *nsim_dev);
+
+static int nsim_dev_reload_down(struct devlink *devlink, bool netns_change,
+ enum devlink_reload_action action, enum devlink_reload_limit limit,
+ struct netlink_ext_ack *extack)
+{
+ struct nsim_dev *nsim_dev = devlink_priv(devlink);
+
+ if (nsim_dev->dont_allow_reload) {
+ /* For testing purposes, user set debugfs dont_allow_reload
+ * value to true. So forbid it.
+ */
+ NL_SET_ERR_MSG_MOD(extack, "User forbid the reload for testing purposes");
+ return -EOPNOTSUPP;
+ }
+
+ nsim_dev_reload_destroy(nsim_dev);
+ return 0;
+}
+
+static int nsim_dev_reload_up(struct devlink *devlink, enum devlink_reload_action action,
+ enum devlink_reload_limit limit, u32 *actions_performed,
+ struct netlink_ext_ack *extack)
+{
+ struct nsim_dev *nsim_dev = devlink_priv(devlink);
+
+ if (nsim_dev->fail_reload) {
+ /* For testing purposes, user set debugfs fail_reload
+ * value to true. Fail right away.
+ */
+ NL_SET_ERR_MSG_MOD(extack, "User setup the reload to fail for testing purposes");
+ return -EINVAL;
+ }
+
+ *actions_performed = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT);
+ return nsim_dev_reload_create(nsim_dev, extack);
+}
+
+static int nsim_dev_info_get(struct devlink *devlink,
+ struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ return devlink_info_driver_name_put(req, DRV_NAME);
+}
+
+#define NSIM_DEV_FLASH_SIZE 500000
+#define NSIM_DEV_FLASH_CHUNK_SIZE 1000
+#define NSIM_DEV_FLASH_CHUNK_TIME_MS 10
+
+static int nsim_dev_flash_update(struct devlink *devlink,
+ struct devlink_flash_update_params *params,
+ struct netlink_ext_ack *extack)
+{
+ struct nsim_dev *nsim_dev = devlink_priv(devlink);
+ int i;
+
+ if ((params->overwrite_mask & ~nsim_dev->fw_update_overwrite_mask) != 0)
+ return -EOPNOTSUPP;
+
+ if (nsim_dev->fw_update_status) {
+ devlink_flash_update_begin_notify(devlink);
+ devlink_flash_update_status_notify(devlink,
+ "Preparing to flash",
+ params->component, 0, 0);
+ }
+
+ for (i = 0; i < NSIM_DEV_FLASH_SIZE / NSIM_DEV_FLASH_CHUNK_SIZE; i++) {
+ if (nsim_dev->fw_update_status)
+ devlink_flash_update_status_notify(devlink, "Flashing",
+ params->component,
+ i * NSIM_DEV_FLASH_CHUNK_SIZE,
+ NSIM_DEV_FLASH_SIZE);
+ msleep(NSIM_DEV_FLASH_CHUNK_TIME_MS);
+ }
+
+ if (nsim_dev->fw_update_status) {
+ devlink_flash_update_status_notify(devlink, "Flashing",
+ params->component,
+ NSIM_DEV_FLASH_SIZE,
+ NSIM_DEV_FLASH_SIZE);
+ devlink_flash_update_timeout_notify(devlink, "Flash select",
+ params->component, 81);
+ devlink_flash_update_status_notify(devlink, "Flashing done",
+ params->component, 0, 0);
+ devlink_flash_update_end_notify(devlink);
+ }
+
+ return 0;
+}
+
+static struct nsim_trap_item *
+nsim_dev_trap_item_lookup(struct nsim_dev *nsim_dev, u16 trap_id)
+{
+ struct nsim_trap_data *nsim_trap_data = nsim_dev->trap_data;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(nsim_traps_arr); i++) {
+ if (nsim_traps_arr[i].id == trap_id)
+ return &nsim_trap_data->trap_items_arr[i];
+ }
+
+ return NULL;
+}
+
+static int nsim_dev_devlink_trap_init(struct devlink *devlink,
+ const struct devlink_trap *trap,
+ void *trap_ctx)
+{
+ struct nsim_dev *nsim_dev = devlink_priv(devlink);
+ struct nsim_trap_item *nsim_trap_item;
+
+ nsim_trap_item = nsim_dev_trap_item_lookup(nsim_dev, trap->id);
+ if (WARN_ON(!nsim_trap_item))
+ return -ENOENT;
+
+ nsim_trap_item->trap_ctx = trap_ctx;
+ nsim_trap_item->action = trap->init_action;
+
+ return 0;
+}
+
+static int
+nsim_dev_devlink_trap_action_set(struct devlink *devlink,
+ const struct devlink_trap *trap,
+ enum devlink_trap_action action,
+ struct netlink_ext_ack *extack)
+{
+ struct nsim_dev *nsim_dev = devlink_priv(devlink);
+ struct nsim_trap_item *nsim_trap_item;
+
+ nsim_trap_item = nsim_dev_trap_item_lookup(nsim_dev, trap->id);
+ if (WARN_ON(!nsim_trap_item))
+ return -ENOENT;
+
+ spin_lock(&nsim_dev->trap_data->trap_lock);
+ nsim_trap_item->action = action;
+ spin_unlock(&nsim_dev->trap_data->trap_lock);
+
+ return 0;
+}
+
+static int
+nsim_dev_devlink_trap_group_set(struct devlink *devlink,
+ const struct devlink_trap_group *group,
+ const struct devlink_trap_policer *policer,
+ struct netlink_ext_ack *extack)
+{
+ struct nsim_dev *nsim_dev = devlink_priv(devlink);
+
+ if (nsim_dev->fail_trap_group_set)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+nsim_dev_devlink_trap_policer_set(struct devlink *devlink,
+ const struct devlink_trap_policer *policer,
+ u64 rate, u64 burst,
+ struct netlink_ext_ack *extack)
+{
+ struct nsim_dev *nsim_dev = devlink_priv(devlink);
+
+ if (nsim_dev->fail_trap_policer_set) {
+ NL_SET_ERR_MSG_MOD(extack, "User setup the operation to fail for testing purposes");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int
+nsim_dev_devlink_trap_policer_counter_get(struct devlink *devlink,
+ const struct devlink_trap_policer *policer,
+ u64 *p_drops)
+{
+ struct nsim_dev *nsim_dev = devlink_priv(devlink);
+ u64 *cnt;
+
+ if (nsim_dev->fail_trap_policer_counter_get)
+ return -EINVAL;
+
+ cnt = &nsim_dev->trap_data->trap_policers_cnt_arr[policer->id - 1];
+ *p_drops = (*cnt)++;
+
+ return 0;
+}
+
+static const struct devlink_ops nsim_dev_devlink_ops = {
+ .supported_flash_update_params = DEVLINK_SUPPORT_FLASH_UPDATE_COMPONENT |
+ DEVLINK_SUPPORT_FLASH_UPDATE_OVERWRITE_MASK,
+ .reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT),
+ .reload_down = nsim_dev_reload_down,
+ .reload_up = nsim_dev_reload_up,
+ .info_get = nsim_dev_info_get,
+ .flash_update = nsim_dev_flash_update,
+ .trap_init = nsim_dev_devlink_trap_init,
+ .trap_action_set = nsim_dev_devlink_trap_action_set,
+ .trap_group_set = nsim_dev_devlink_trap_group_set,
+ .trap_policer_set = nsim_dev_devlink_trap_policer_set,
+ .trap_policer_counter_get = nsim_dev_devlink_trap_policer_counter_get,
+};
+
+#define NSIM_DEV_MAX_MACS_DEFAULT 32
+#define NSIM_DEV_TEST1_DEFAULT true
+
+static int __nsim_dev_port_add(struct nsim_dev *nsim_dev,
+ unsigned int port_index)
+{
+ struct devlink_port_attrs attrs = {};
+ struct nsim_dev_port *nsim_dev_port;
+ struct devlink_port *devlink_port;
+ int err;
+
+ nsim_dev_port = kzalloc(sizeof(*nsim_dev_port), GFP_KERNEL);
+ if (!nsim_dev_port)
+ return -ENOMEM;
+ nsim_dev_port->port_index = port_index;
+
+ devlink_port = &nsim_dev_port->devlink_port;
+ attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL;
+ attrs.phys.port_number = port_index + 1;
+ memcpy(attrs.switch_id.id, nsim_dev->switch_id.id, nsim_dev->switch_id.id_len);
+ attrs.switch_id.id_len = nsim_dev->switch_id.id_len;
+ devlink_port_attrs_set(devlink_port, &attrs);
+ err = devlink_port_register(priv_to_devlink(nsim_dev), devlink_port,
+ port_index);
+ if (err)
+ goto err_port_free;
+
+ err = nsim_dev_port_debugfs_init(nsim_dev, nsim_dev_port);
+ if (err)
+ goto err_dl_port_unregister;
+
+ nsim_dev_port->ns = nsim_create(nsim_dev, nsim_dev_port);
+ if (IS_ERR(nsim_dev_port->ns)) {
+ err = PTR_ERR(nsim_dev_port->ns);
+ goto err_port_debugfs_exit;
+ }
+
+ devlink_port_type_eth_set(devlink_port, nsim_dev_port->ns->netdev);
+ list_add(&nsim_dev_port->list, &nsim_dev->port_list);
+
+ return 0;
+
+err_port_debugfs_exit:
+ nsim_dev_port_debugfs_exit(nsim_dev_port);
+err_dl_port_unregister:
+ devlink_port_unregister(devlink_port);
+err_port_free:
+ kfree(nsim_dev_port);
+ return err;
+}
+
+static void __nsim_dev_port_del(struct nsim_dev_port *nsim_dev_port)
+{
+ struct devlink_port *devlink_port = &nsim_dev_port->devlink_port;
+
+ list_del(&nsim_dev_port->list);
+ devlink_port_type_clear(devlink_port);
+ nsim_destroy(nsim_dev_port->ns);
+ nsim_dev_port_debugfs_exit(nsim_dev_port);
+ devlink_port_unregister(devlink_port);
+ kfree(nsim_dev_port);
+}
+
+static void nsim_dev_port_del_all(struct nsim_dev *nsim_dev)
+{
+ struct nsim_dev_port *nsim_dev_port, *tmp;
+
+ mutex_lock(&nsim_dev->port_list_lock);
+ list_for_each_entry_safe(nsim_dev_port, tmp,
+ &nsim_dev->port_list, list)
+ __nsim_dev_port_del(nsim_dev_port);
+ mutex_unlock(&nsim_dev->port_list_lock);
+}
+
+static int nsim_dev_port_add_all(struct nsim_dev *nsim_dev,
+ unsigned int port_count)
+{
+ int i, err;
+
+ for (i = 0; i < port_count; i++) {
+ err = __nsim_dev_port_add(nsim_dev, i);
+ if (err)
+ goto err_port_del_all;
+ }
+ return 0;
+
+err_port_del_all:
+ nsim_dev_port_del_all(nsim_dev);
+ return err;
+}
+
+static int nsim_dev_reload_create(struct nsim_dev *nsim_dev,
+ struct netlink_ext_ack *extack)
+{
+ struct nsim_bus_dev *nsim_bus_dev = nsim_dev->nsim_bus_dev;
+ struct devlink *devlink;
+ int err;
+
+ devlink = priv_to_devlink(nsim_dev);
+ nsim_dev = devlink_priv(devlink);
+ INIT_LIST_HEAD(&nsim_dev->port_list);
+ mutex_init(&nsim_dev->port_list_lock);
+ nsim_dev->fw_update_status = true;
+ nsim_dev->fw_update_overwrite_mask = 0;
+
+ nsim_devlink_param_load_driverinit_values(devlink);
+
+ err = nsim_dev_dummy_region_init(nsim_dev, devlink);
+ if (err)
+ return err;
+
+ err = nsim_dev_traps_init(devlink);
+ if (err)
+ goto err_dummy_region_exit;
+
+ nsim_dev->fib_data = nsim_fib_create(devlink, extack);
+ if (IS_ERR(nsim_dev->fib_data)) {
+ err = PTR_ERR(nsim_dev->fib_data);
+ goto err_traps_exit;
+ }
+
+ err = nsim_dev_health_init(nsim_dev, devlink);
+ if (err)
+ goto err_fib_destroy;
+
+ err = nsim_dev_port_add_all(nsim_dev, nsim_bus_dev->port_count);
+ if (err)
+ goto err_health_exit;
+
+ nsim_dev->take_snapshot = debugfs_create_file("take_snapshot",
+ 0200,
+ nsim_dev->ddir,
+ nsim_dev,
+ &nsim_dev_take_snapshot_fops);
+ return 0;
+
+err_health_exit:
+ nsim_dev_health_exit(nsim_dev);
+err_fib_destroy:
+ nsim_fib_destroy(devlink, nsim_dev->fib_data);
+err_traps_exit:
+ nsim_dev_traps_exit(devlink);
+err_dummy_region_exit:
+ nsim_dev_dummy_region_exit(nsim_dev);
+ return err;
+}
+
+int nsim_dev_probe(struct nsim_bus_dev *nsim_bus_dev)
+{
+ struct nsim_dev *nsim_dev;
+ struct devlink *devlink;
+ int err;
+
+ devlink = devlink_alloc(&nsim_dev_devlink_ops, sizeof(*nsim_dev));
+ if (!devlink)
+ return -ENOMEM;
+ devlink_net_set(devlink, nsim_bus_dev->initial_net);
+ nsim_dev = devlink_priv(devlink);
+ nsim_dev->nsim_bus_dev = nsim_bus_dev;
+ nsim_dev->switch_id.id_len = sizeof(nsim_dev->switch_id.id);
+ get_random_bytes(nsim_dev->switch_id.id, nsim_dev->switch_id.id_len);
+ INIT_LIST_HEAD(&nsim_dev->port_list);
+ mutex_init(&nsim_dev->port_list_lock);
+ nsim_dev->fw_update_status = true;
+ nsim_dev->fw_update_overwrite_mask = 0;
+ nsim_dev->max_macs = NSIM_DEV_MAX_MACS_DEFAULT;
+ nsim_dev->test1 = NSIM_DEV_TEST1_DEFAULT;
+ spin_lock_init(&nsim_dev->fa_cookie_lock);
+
+ dev_set_drvdata(&nsim_bus_dev->dev, nsim_dev);
+
+ err = nsim_dev_resources_register(devlink);
+ if (err)
+ goto err_devlink_free;
+
+ err = devlink_register(devlink, &nsim_bus_dev->dev);
+ if (err)
+ goto err_resources_unregister;
+
+ err = devlink_params_register(devlink, nsim_devlink_params,
+ ARRAY_SIZE(nsim_devlink_params));
+ if (err)
+ goto err_dl_unregister;
+ nsim_devlink_set_params_init_values(nsim_dev, devlink);
+
+ err = nsim_dev_dummy_region_init(nsim_dev, devlink);
+ if (err)
+ goto err_params_unregister;
+
+ err = nsim_dev_traps_init(devlink);
+ if (err)
+ goto err_dummy_region_exit;
+
+ err = nsim_dev_debugfs_init(nsim_dev);
+ if (err)
+ goto err_traps_exit;
+
+ nsim_dev->fib_data = nsim_fib_create(devlink, NULL);
+ if (IS_ERR(nsim_dev->fib_data)) {
+ err = PTR_ERR(nsim_dev->fib_data);
+ goto err_debugfs_exit;
+ }
+
+ err = nsim_dev_health_init(nsim_dev, devlink);
+ if (err)
+ goto err_fib_destroy;
+
+ err = nsim_bpf_dev_init(nsim_dev);
+ if (err)
+ goto err_health_exit;
+
+ err = nsim_dev_port_add_all(nsim_dev, nsim_bus_dev->port_count);
+ if (err)
+ goto err_bpf_dev_exit;
+
+ devlink_params_publish(devlink);
+ devlink_reload_enable(devlink);
+ return 0;
+
+err_bpf_dev_exit:
+ nsim_bpf_dev_exit(nsim_dev);
+err_health_exit:
+ nsim_dev_health_exit(nsim_dev);
+err_fib_destroy:
+ nsim_fib_destroy(devlink, nsim_dev->fib_data);
+err_debugfs_exit:
+ nsim_dev_debugfs_exit(nsim_dev);
+err_traps_exit:
+ nsim_dev_traps_exit(devlink);
+err_dummy_region_exit:
+ nsim_dev_dummy_region_exit(nsim_dev);
+err_params_unregister:
+ devlink_params_unregister(devlink, nsim_devlink_params,
+ ARRAY_SIZE(nsim_devlink_params));
+err_dl_unregister:
+ devlink_unregister(devlink);
+err_resources_unregister:
+ devlink_resources_unregister(devlink, NULL);
+err_devlink_free:
+ devlink_free(devlink);
+ return err;
+}
+
+static void nsim_dev_reload_destroy(struct nsim_dev *nsim_dev)
+{
+ struct devlink *devlink = priv_to_devlink(nsim_dev);
+
+ if (devlink_is_reload_failed(devlink))
+ return;
+ debugfs_remove(nsim_dev->take_snapshot);
+ nsim_dev_port_del_all(nsim_dev);
+ nsim_dev_health_exit(nsim_dev);
+ nsim_fib_destroy(devlink, nsim_dev->fib_data);
+ nsim_dev_traps_exit(devlink);
+ nsim_dev_dummy_region_exit(nsim_dev);
+ mutex_destroy(&nsim_dev->port_list_lock);
+}
+
+void nsim_dev_remove(struct nsim_bus_dev *nsim_bus_dev)
+{
+ struct nsim_dev *nsim_dev = dev_get_drvdata(&nsim_bus_dev->dev);
+ struct devlink *devlink = priv_to_devlink(nsim_dev);
+
+ devlink_reload_disable(devlink);
+
+ nsim_dev_reload_destroy(nsim_dev);
+
+ nsim_bpf_dev_exit(nsim_dev);
+ nsim_dev_debugfs_exit(nsim_dev);
+ devlink_params_unregister(devlink, nsim_devlink_params,
+ ARRAY_SIZE(nsim_devlink_params));
+ devlink_unregister(devlink);
+ devlink_resources_unregister(devlink, NULL);
+ devlink_free(devlink);
+}
+
+static struct nsim_dev_port *
+__nsim_dev_port_lookup(struct nsim_dev *nsim_dev, unsigned int port_index)
+{
+ struct nsim_dev_port *nsim_dev_port;
+
+ list_for_each_entry(nsim_dev_port, &nsim_dev->port_list, list)
+ if (nsim_dev_port->port_index == port_index)
+ return nsim_dev_port;
+ return NULL;
+}
+
+int nsim_dev_port_add(struct nsim_bus_dev *nsim_bus_dev,
+ unsigned int port_index)
+{
+ struct nsim_dev *nsim_dev = dev_get_drvdata(&nsim_bus_dev->dev);
+ int err;
+
+ mutex_lock(&nsim_dev->port_list_lock);
+ if (__nsim_dev_port_lookup(nsim_dev, port_index))
+ err = -EEXIST;
+ else
+ err = __nsim_dev_port_add(nsim_dev, port_index);
+ mutex_unlock(&nsim_dev->port_list_lock);
+ return err;
+}
+
+int nsim_dev_port_del(struct nsim_bus_dev *nsim_bus_dev,
+ unsigned int port_index)
+{
+ struct nsim_dev *nsim_dev = dev_get_drvdata(&nsim_bus_dev->dev);
+ struct nsim_dev_port *nsim_dev_port;
+ int err = 0;
+
+ mutex_lock(&nsim_dev->port_list_lock);
+ nsim_dev_port = __nsim_dev_port_lookup(nsim_dev, port_index);
+ if (!nsim_dev_port)
+ err = -ENOENT;
+ else
+ __nsim_dev_port_del(nsim_dev_port);
+ mutex_unlock(&nsim_dev->port_list_lock);
+ return err;
+}
+
+int nsim_dev_init(void)
+{
+ nsim_dev_ddir = debugfs_create_dir(DRV_NAME, NULL);
+ return PTR_ERR_OR_ZERO(nsim_dev_ddir);
+}
+
+void nsim_dev_exit(void)
+{
+ debugfs_remove_recursive(nsim_dev_ddir);
+}
diff --git a/drivers/net/netdevsim/ethtool.c b/drivers/net/netdevsim/ethtool.c
new file mode 100644
index 000000000..f1884d90a
--- /dev/null
+++ b/drivers/net/netdevsim/ethtool.c
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (c) 2020 Facebook
+
+#include <linux/debugfs.h>
+#include <linux/ethtool.h>
+#include <linux/random.h>
+
+#include "netdevsim.h"
+
+static void
+nsim_get_pause_stats(struct net_device *dev,
+ struct ethtool_pause_stats *pause_stats)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+
+ if (ns->ethtool.report_stats_rx)
+ pause_stats->rx_pause_frames = 1;
+ if (ns->ethtool.report_stats_tx)
+ pause_stats->tx_pause_frames = 2;
+}
+
+static void
+nsim_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+
+ pause->autoneg = 0; /* We don't support ksettings, so can't pretend */
+ pause->rx_pause = ns->ethtool.rx;
+ pause->tx_pause = ns->ethtool.tx;
+}
+
+static int
+nsim_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *pause)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+
+ if (pause->autoneg)
+ return -EINVAL;
+
+ ns->ethtool.rx = pause->rx_pause;
+ ns->ethtool.tx = pause->tx_pause;
+ return 0;
+}
+
+static const struct ethtool_ops nsim_ethtool_ops = {
+ .get_pause_stats = nsim_get_pause_stats,
+ .get_pauseparam = nsim_get_pauseparam,
+ .set_pauseparam = nsim_set_pauseparam,
+};
+
+void nsim_ethtool_init(struct netdevsim *ns)
+{
+ struct dentry *ethtool, *dir;
+
+ ns->netdev->ethtool_ops = &nsim_ethtool_ops;
+
+ ethtool = debugfs_create_dir("ethtool", ns->nsim_dev_port->ddir);
+
+ dir = debugfs_create_dir("pause", ethtool);
+ debugfs_create_bool("report_stats_rx", 0600, dir,
+ &ns->ethtool.report_stats_rx);
+ debugfs_create_bool("report_stats_tx", 0600, dir,
+ &ns->ethtool.report_stats_tx);
+}
diff --git a/drivers/net/netdevsim/fib.c b/drivers/net/netdevsim/fib.c
new file mode 100644
index 000000000..deea17a0e
--- /dev/null
+++ b/drivers/net/netdevsim/fib.c
@@ -0,0 +1,957 @@
+/*
+ * Copyright (c) 2018 Cumulus Networks. All rights reserved.
+ * Copyright (c) 2018 David Ahern <dsa@cumulusnetworks.com>
+ *
+ * This software is licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree.
+ *
+ * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"
+ * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
+ * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
+ * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME
+ * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+ */
+
+#include <linux/in6.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/rhashtable.h>
+#include <linux/spinlock_types.h>
+#include <linux/types.h>
+#include <net/fib_notifier.h>
+#include <net/ip_fib.h>
+#include <net/ip6_fib.h>
+#include <net/fib_rules.h>
+#include <net/net_namespace.h>
+
+#include "netdevsim.h"
+
+struct nsim_fib_entry {
+ u64 max;
+ u64 num;
+};
+
+struct nsim_per_fib_data {
+ struct nsim_fib_entry fib;
+ struct nsim_fib_entry rules;
+};
+
+struct nsim_fib_data {
+ struct notifier_block fib_nb;
+ struct nsim_per_fib_data ipv4;
+ struct nsim_per_fib_data ipv6;
+ struct rhashtable fib_rt_ht;
+ struct list_head fib_rt_list;
+ spinlock_t fib_lock; /* Protects hashtable, list and accounting */
+ struct devlink *devlink;
+};
+
+struct nsim_fib_rt_key {
+ unsigned char addr[sizeof(struct in6_addr)];
+ unsigned char prefix_len;
+ int family;
+ u32 tb_id;
+};
+
+struct nsim_fib_rt {
+ struct nsim_fib_rt_key key;
+ struct rhash_head ht_node;
+ struct list_head list; /* Member of fib_rt_list */
+};
+
+struct nsim_fib4_rt {
+ struct nsim_fib_rt common;
+ struct fib_info *fi;
+ u8 tos;
+ u8 type;
+};
+
+struct nsim_fib6_rt {
+ struct nsim_fib_rt common;
+ struct list_head nh_list;
+ unsigned int nhs;
+};
+
+struct nsim_fib6_rt_nh {
+ struct list_head list; /* Member of nh_list */
+ struct fib6_info *rt;
+};
+
+static const struct rhashtable_params nsim_fib_rt_ht_params = {
+ .key_offset = offsetof(struct nsim_fib_rt, key),
+ .head_offset = offsetof(struct nsim_fib_rt, ht_node),
+ .key_len = sizeof(struct nsim_fib_rt_key),
+ .automatic_shrinking = true,
+};
+
+u64 nsim_fib_get_val(struct nsim_fib_data *fib_data,
+ enum nsim_resource_id res_id, bool max)
+{
+ struct nsim_fib_entry *entry;
+
+ switch (res_id) {
+ case NSIM_RESOURCE_IPV4_FIB:
+ entry = &fib_data->ipv4.fib;
+ break;
+ case NSIM_RESOURCE_IPV4_FIB_RULES:
+ entry = &fib_data->ipv4.rules;
+ break;
+ case NSIM_RESOURCE_IPV6_FIB:
+ entry = &fib_data->ipv6.fib;
+ break;
+ case NSIM_RESOURCE_IPV6_FIB_RULES:
+ entry = &fib_data->ipv6.rules;
+ break;
+ default:
+ return 0;
+ }
+
+ return max ? entry->max : entry->num;
+}
+
+static void nsim_fib_set_max(struct nsim_fib_data *fib_data,
+ enum nsim_resource_id res_id, u64 val)
+{
+ struct nsim_fib_entry *entry;
+
+ switch (res_id) {
+ case NSIM_RESOURCE_IPV4_FIB:
+ entry = &fib_data->ipv4.fib;
+ break;
+ case NSIM_RESOURCE_IPV4_FIB_RULES:
+ entry = &fib_data->ipv4.rules;
+ break;
+ case NSIM_RESOURCE_IPV6_FIB:
+ entry = &fib_data->ipv6.fib;
+ break;
+ case NSIM_RESOURCE_IPV6_FIB_RULES:
+ entry = &fib_data->ipv6.rules;
+ break;
+ default:
+ WARN_ON(1);
+ return;
+ }
+ entry->max = val;
+}
+
+static int nsim_fib_rule_account(struct nsim_fib_entry *entry, bool add,
+ struct netlink_ext_ack *extack)
+{
+ int err = 0;
+
+ if (add) {
+ if (entry->num < entry->max) {
+ entry->num++;
+ } else {
+ err = -ENOSPC;
+ NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported fib rule entries");
+ }
+ } else {
+ entry->num--;
+ }
+
+ return err;
+}
+
+static int nsim_fib_rule_event(struct nsim_fib_data *data,
+ struct fib_notifier_info *info, bool add)
+{
+ struct netlink_ext_ack *extack = info->extack;
+ int err = 0;
+
+ switch (info->family) {
+ case AF_INET:
+ err = nsim_fib_rule_account(&data->ipv4.rules, add, extack);
+ break;
+ case AF_INET6:
+ err = nsim_fib_rule_account(&data->ipv6.rules, add, extack);
+ break;
+ }
+
+ return err;
+}
+
+static int nsim_fib_account(struct nsim_fib_entry *entry, bool add,
+ struct netlink_ext_ack *extack)
+{
+ int err = 0;
+
+ if (add) {
+ if (entry->num < entry->max) {
+ entry->num++;
+ } else {
+ err = -ENOSPC;
+ NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported fib entries");
+ }
+ } else {
+ entry->num--;
+ }
+
+ return err;
+}
+
+static void nsim_fib_rt_init(struct nsim_fib_data *data,
+ struct nsim_fib_rt *fib_rt, const void *addr,
+ size_t addr_len, unsigned int prefix_len,
+ int family, u32 tb_id)
+{
+ memcpy(fib_rt->key.addr, addr, addr_len);
+ fib_rt->key.prefix_len = prefix_len;
+ fib_rt->key.family = family;
+ fib_rt->key.tb_id = tb_id;
+ list_add(&fib_rt->list, &data->fib_rt_list);
+}
+
+static void nsim_fib_rt_fini(struct nsim_fib_rt *fib_rt)
+{
+ list_del(&fib_rt->list);
+}
+
+static struct nsim_fib_rt *nsim_fib_rt_lookup(struct rhashtable *fib_rt_ht,
+ const void *addr, size_t addr_len,
+ unsigned int prefix_len,
+ int family, u32 tb_id)
+{
+ struct nsim_fib_rt_key key;
+
+ memset(&key, 0, sizeof(key));
+ memcpy(key.addr, addr, addr_len);
+ key.prefix_len = prefix_len;
+ key.family = family;
+ key.tb_id = tb_id;
+
+ return rhashtable_lookup_fast(fib_rt_ht, &key, nsim_fib_rt_ht_params);
+}
+
+static struct nsim_fib4_rt *
+nsim_fib4_rt_create(struct nsim_fib_data *data,
+ struct fib_entry_notifier_info *fen_info)
+{
+ struct nsim_fib4_rt *fib4_rt;
+
+ fib4_rt = kzalloc(sizeof(*fib4_rt), GFP_ATOMIC);
+ if (!fib4_rt)
+ return NULL;
+
+ nsim_fib_rt_init(data, &fib4_rt->common, &fen_info->dst, sizeof(u32),
+ fen_info->dst_len, AF_INET, fen_info->tb_id);
+
+ fib4_rt->fi = fen_info->fi;
+ fib_info_hold(fib4_rt->fi);
+ fib4_rt->tos = fen_info->tos;
+ fib4_rt->type = fen_info->type;
+
+ return fib4_rt;
+}
+
+static void nsim_fib4_rt_destroy(struct nsim_fib4_rt *fib4_rt)
+{
+ fib_info_put(fib4_rt->fi);
+ nsim_fib_rt_fini(&fib4_rt->common);
+ kfree(fib4_rt);
+}
+
+static struct nsim_fib4_rt *
+nsim_fib4_rt_lookup(struct rhashtable *fib_rt_ht,
+ const struct fib_entry_notifier_info *fen_info)
+{
+ struct nsim_fib_rt *fib_rt;
+
+ fib_rt = nsim_fib_rt_lookup(fib_rt_ht, &fen_info->dst, sizeof(u32),
+ fen_info->dst_len, AF_INET,
+ fen_info->tb_id);
+ if (!fib_rt)
+ return NULL;
+
+ return container_of(fib_rt, struct nsim_fib4_rt, common);
+}
+
+static void nsim_fib4_rt_hw_flags_set(struct net *net,
+ const struct nsim_fib4_rt *fib4_rt,
+ bool trap)
+{
+ u32 *p_dst = (u32 *) fib4_rt->common.key.addr;
+ int dst_len = fib4_rt->common.key.prefix_len;
+ struct fib_rt_info fri;
+
+ fri.fi = fib4_rt->fi;
+ fri.tb_id = fib4_rt->common.key.tb_id;
+ fri.dst = cpu_to_be32(*p_dst);
+ fri.dst_len = dst_len;
+ fri.tos = fib4_rt->tos;
+ fri.type = fib4_rt->type;
+ fri.offload = false;
+ fri.trap = trap;
+ fib_alias_hw_flags_set(net, &fri);
+}
+
+static int nsim_fib4_rt_add(struct nsim_fib_data *data,
+ struct nsim_fib4_rt *fib4_rt,
+ struct netlink_ext_ack *extack)
+{
+ struct net *net = devlink_net(data->devlink);
+ int err;
+
+ err = nsim_fib_account(&data->ipv4.fib, true, extack);
+ if (err)
+ return err;
+
+ err = rhashtable_insert_fast(&data->fib_rt_ht,
+ &fib4_rt->common.ht_node,
+ nsim_fib_rt_ht_params);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to insert IPv4 route");
+ goto err_fib_dismiss;
+ }
+
+ nsim_fib4_rt_hw_flags_set(net, fib4_rt, true);
+
+ return 0;
+
+err_fib_dismiss:
+ nsim_fib_account(&data->ipv4.fib, false, extack);
+ return err;
+}
+
+static int nsim_fib4_rt_replace(struct nsim_fib_data *data,
+ struct nsim_fib4_rt *fib4_rt,
+ struct nsim_fib4_rt *fib4_rt_old,
+ struct netlink_ext_ack *extack)
+{
+ struct net *net = devlink_net(data->devlink);
+ int err;
+
+ /* We are replacing a route, so no need to change the accounting. */
+ err = rhashtable_replace_fast(&data->fib_rt_ht,
+ &fib4_rt_old->common.ht_node,
+ &fib4_rt->common.ht_node,
+ nsim_fib_rt_ht_params);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to replace IPv4 route");
+ return err;
+ }
+
+ nsim_fib4_rt_hw_flags_set(net, fib4_rt, true);
+
+ nsim_fib4_rt_hw_flags_set(net, fib4_rt_old, false);
+ nsim_fib4_rt_destroy(fib4_rt_old);
+
+ return 0;
+}
+
+static int nsim_fib4_rt_insert(struct nsim_fib_data *data,
+ struct fib_entry_notifier_info *fen_info)
+{
+ struct netlink_ext_ack *extack = fen_info->info.extack;
+ struct nsim_fib4_rt *fib4_rt, *fib4_rt_old;
+ int err;
+
+ fib4_rt = nsim_fib4_rt_create(data, fen_info);
+ if (!fib4_rt)
+ return -ENOMEM;
+
+ fib4_rt_old = nsim_fib4_rt_lookup(&data->fib_rt_ht, fen_info);
+ if (!fib4_rt_old)
+ err = nsim_fib4_rt_add(data, fib4_rt, extack);
+ else
+ err = nsim_fib4_rt_replace(data, fib4_rt, fib4_rt_old, extack);
+
+ if (err)
+ nsim_fib4_rt_destroy(fib4_rt);
+
+ return err;
+}
+
+static void nsim_fib4_rt_remove(struct nsim_fib_data *data,
+ const struct fib_entry_notifier_info *fen_info)
+{
+ struct netlink_ext_ack *extack = fen_info->info.extack;
+ struct nsim_fib4_rt *fib4_rt;
+
+ fib4_rt = nsim_fib4_rt_lookup(&data->fib_rt_ht, fen_info);
+ if (WARN_ON_ONCE(!fib4_rt))
+ return;
+
+ rhashtable_remove_fast(&data->fib_rt_ht, &fib4_rt->common.ht_node,
+ nsim_fib_rt_ht_params);
+ nsim_fib_account(&data->ipv4.fib, false, extack);
+ nsim_fib4_rt_destroy(fib4_rt);
+}
+
+static int nsim_fib4_event(struct nsim_fib_data *data,
+ struct fib_notifier_info *info,
+ unsigned long event)
+{
+ struct fib_entry_notifier_info *fen_info;
+ int err = 0;
+
+ fen_info = container_of(info, struct fib_entry_notifier_info, info);
+
+ if (fen_info->fi->nh) {
+ NL_SET_ERR_MSG_MOD(info->extack, "IPv4 route with nexthop objects is not supported");
+ return 0;
+ }
+
+ switch (event) {
+ case FIB_EVENT_ENTRY_REPLACE:
+ err = nsim_fib4_rt_insert(data, fen_info);
+ break;
+ case FIB_EVENT_ENTRY_DEL:
+ nsim_fib4_rt_remove(data, fen_info);
+ break;
+ default:
+ break;
+ }
+
+ return err;
+}
+
+static struct nsim_fib6_rt_nh *
+nsim_fib6_rt_nh_find(const struct nsim_fib6_rt *fib6_rt,
+ const struct fib6_info *rt)
+{
+ struct nsim_fib6_rt_nh *fib6_rt_nh;
+
+ list_for_each_entry(fib6_rt_nh, &fib6_rt->nh_list, list) {
+ if (fib6_rt_nh->rt == rt)
+ return fib6_rt_nh;
+ }
+
+ return NULL;
+}
+
+static int nsim_fib6_rt_nh_add(struct nsim_fib6_rt *fib6_rt,
+ struct fib6_info *rt)
+{
+ struct nsim_fib6_rt_nh *fib6_rt_nh;
+
+ fib6_rt_nh = kzalloc(sizeof(*fib6_rt_nh), GFP_ATOMIC);
+ if (!fib6_rt_nh)
+ return -ENOMEM;
+
+ fib6_info_hold(rt);
+ fib6_rt_nh->rt = rt;
+ list_add_tail(&fib6_rt_nh->list, &fib6_rt->nh_list);
+ fib6_rt->nhs++;
+
+ return 0;
+}
+
+static void nsim_fib6_rt_nh_del(struct nsim_fib6_rt *fib6_rt,
+ const struct fib6_info *rt)
+{
+ struct nsim_fib6_rt_nh *fib6_rt_nh;
+
+ fib6_rt_nh = nsim_fib6_rt_nh_find(fib6_rt, rt);
+ if (WARN_ON_ONCE(!fib6_rt_nh))
+ return;
+
+ fib6_rt->nhs--;
+ list_del(&fib6_rt_nh->list);
+#if IS_ENABLED(CONFIG_IPV6)
+ fib6_info_release(fib6_rt_nh->rt);
+#endif
+ kfree(fib6_rt_nh);
+}
+
+static struct nsim_fib6_rt *
+nsim_fib6_rt_create(struct nsim_fib_data *data,
+ struct fib6_entry_notifier_info *fen6_info)
+{
+ struct fib6_info *iter, *rt = fen6_info->rt;
+ struct nsim_fib6_rt *fib6_rt;
+ int i = 0;
+ int err;
+
+ fib6_rt = kzalloc(sizeof(*fib6_rt), GFP_ATOMIC);
+ if (!fib6_rt)
+ return ERR_PTR(-ENOMEM);
+
+ nsim_fib_rt_init(data, &fib6_rt->common, &rt->fib6_dst.addr,
+ sizeof(rt->fib6_dst.addr), rt->fib6_dst.plen, AF_INET6,
+ rt->fib6_table->tb6_id);
+
+ /* We consider a multipath IPv6 route as one entry, but it can be made
+ * up from several fib6_info structs (one for each nexthop), so we
+ * add them all to the same list under the entry.
+ */
+ INIT_LIST_HEAD(&fib6_rt->nh_list);
+
+ err = nsim_fib6_rt_nh_add(fib6_rt, rt);
+ if (err)
+ goto err_fib_rt_fini;
+
+ if (!fen6_info->nsiblings)
+ return fib6_rt;
+
+ list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
+ if (i == fen6_info->nsiblings)
+ break;
+
+ err = nsim_fib6_rt_nh_add(fib6_rt, iter);
+ if (err)
+ goto err_fib6_rt_nh_del;
+ i++;
+ }
+ WARN_ON_ONCE(i != fen6_info->nsiblings);
+
+ return fib6_rt;
+
+err_fib6_rt_nh_del:
+ list_for_each_entry_continue_reverse(iter, &rt->fib6_siblings,
+ fib6_siblings)
+ nsim_fib6_rt_nh_del(fib6_rt, iter);
+ nsim_fib6_rt_nh_del(fib6_rt, rt);
+err_fib_rt_fini:
+ nsim_fib_rt_fini(&fib6_rt->common);
+ kfree(fib6_rt);
+ return ERR_PTR(err);
+}
+
+static void nsim_fib6_rt_destroy(struct nsim_fib6_rt *fib6_rt)
+{
+ struct nsim_fib6_rt_nh *iter, *tmp;
+
+ list_for_each_entry_safe(iter, tmp, &fib6_rt->nh_list, list)
+ nsim_fib6_rt_nh_del(fib6_rt, iter->rt);
+ WARN_ON_ONCE(!list_empty(&fib6_rt->nh_list));
+ nsim_fib_rt_fini(&fib6_rt->common);
+ kfree(fib6_rt);
+}
+
+static struct nsim_fib6_rt *
+nsim_fib6_rt_lookup(struct rhashtable *fib_rt_ht, const struct fib6_info *rt)
+{
+ struct nsim_fib_rt *fib_rt;
+
+ fib_rt = nsim_fib_rt_lookup(fib_rt_ht, &rt->fib6_dst.addr,
+ sizeof(rt->fib6_dst.addr),
+ rt->fib6_dst.plen, AF_INET6,
+ rt->fib6_table->tb6_id);
+ if (!fib_rt)
+ return NULL;
+
+ return container_of(fib_rt, struct nsim_fib6_rt, common);
+}
+
+static int nsim_fib6_rt_append(struct nsim_fib_data *data,
+ struct fib6_entry_notifier_info *fen6_info)
+{
+ struct fib6_info *iter, *rt = fen6_info->rt;
+ struct nsim_fib6_rt *fib6_rt;
+ int i = 0;
+ int err;
+
+ fib6_rt = nsim_fib6_rt_lookup(&data->fib_rt_ht, rt);
+ if (WARN_ON_ONCE(!fib6_rt))
+ return -EINVAL;
+
+ err = nsim_fib6_rt_nh_add(fib6_rt, rt);
+ if (err)
+ return err;
+ rt->trap = true;
+
+ if (!fen6_info->nsiblings)
+ return 0;
+
+ list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
+ if (i == fen6_info->nsiblings)
+ break;
+
+ err = nsim_fib6_rt_nh_add(fib6_rt, iter);
+ if (err)
+ goto err_fib6_rt_nh_del;
+ iter->trap = true;
+ i++;
+ }
+ WARN_ON_ONCE(i != fen6_info->nsiblings);
+
+ return 0;
+
+err_fib6_rt_nh_del:
+ list_for_each_entry_continue_reverse(iter, &rt->fib6_siblings,
+ fib6_siblings) {
+ iter->trap = false;
+ nsim_fib6_rt_nh_del(fib6_rt, iter);
+ }
+ rt->trap = false;
+ nsim_fib6_rt_nh_del(fib6_rt, rt);
+ return err;
+}
+
+static void nsim_fib6_rt_hw_flags_set(const struct nsim_fib6_rt *fib6_rt,
+ bool trap)
+{
+ struct nsim_fib6_rt_nh *fib6_rt_nh;
+
+ list_for_each_entry(fib6_rt_nh, &fib6_rt->nh_list, list)
+ fib6_info_hw_flags_set(fib6_rt_nh->rt, false, trap);
+}
+
+static int nsim_fib6_rt_add(struct nsim_fib_data *data,
+ struct nsim_fib6_rt *fib6_rt,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ err = nsim_fib_account(&data->ipv6.fib, true, extack);
+ if (err)
+ return err;
+
+ err = rhashtable_insert_fast(&data->fib_rt_ht,
+ &fib6_rt->common.ht_node,
+ nsim_fib_rt_ht_params);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to insert IPv6 route");
+ goto err_fib_dismiss;
+ }
+
+ nsim_fib6_rt_hw_flags_set(fib6_rt, true);
+
+ return 0;
+
+err_fib_dismiss:
+ nsim_fib_account(&data->ipv6.fib, false, extack);
+ return err;
+}
+
+static int nsim_fib6_rt_replace(struct nsim_fib_data *data,
+ struct nsim_fib6_rt *fib6_rt,
+ struct nsim_fib6_rt *fib6_rt_old,
+ struct netlink_ext_ack *extack)
+{
+ int err;
+
+ /* We are replacing a route, so no need to change the accounting. */
+ err = rhashtable_replace_fast(&data->fib_rt_ht,
+ &fib6_rt_old->common.ht_node,
+ &fib6_rt->common.ht_node,
+ nsim_fib_rt_ht_params);
+ if (err) {
+ NL_SET_ERR_MSG_MOD(extack, "Failed to replace IPv6 route");
+ return err;
+ }
+
+ nsim_fib6_rt_hw_flags_set(fib6_rt, true);
+
+ nsim_fib6_rt_hw_flags_set(fib6_rt_old, false);
+ nsim_fib6_rt_destroy(fib6_rt_old);
+
+ return 0;
+}
+
+static int nsim_fib6_rt_insert(struct nsim_fib_data *data,
+ struct fib6_entry_notifier_info *fen6_info)
+{
+ struct netlink_ext_ack *extack = fen6_info->info.extack;
+ struct nsim_fib6_rt *fib6_rt, *fib6_rt_old;
+ int err;
+
+ fib6_rt = nsim_fib6_rt_create(data, fen6_info);
+ if (IS_ERR(fib6_rt))
+ return PTR_ERR(fib6_rt);
+
+ fib6_rt_old = nsim_fib6_rt_lookup(&data->fib_rt_ht, fen6_info->rt);
+ if (!fib6_rt_old)
+ err = nsim_fib6_rt_add(data, fib6_rt, extack);
+ else
+ err = nsim_fib6_rt_replace(data, fib6_rt, fib6_rt_old, extack);
+
+ if (err)
+ nsim_fib6_rt_destroy(fib6_rt);
+
+ return err;
+}
+
+static void
+nsim_fib6_rt_remove(struct nsim_fib_data *data,
+ const struct fib6_entry_notifier_info *fen6_info)
+{
+ struct netlink_ext_ack *extack = fen6_info->info.extack;
+ struct nsim_fib6_rt *fib6_rt;
+
+ /* Multipath routes are first added to the FIB trie and only then
+ * notified. If we vetoed the addition, we will get a delete
+ * notification for a route we do not have. Therefore, do not warn if
+ * route was not found.
+ */
+ fib6_rt = nsim_fib6_rt_lookup(&data->fib_rt_ht, fen6_info->rt);
+ if (!fib6_rt)
+ return;
+
+ /* If not all the nexthops are deleted, then only reduce the nexthop
+ * group.
+ */
+ if (fen6_info->nsiblings + 1 != fib6_rt->nhs) {
+ nsim_fib6_rt_nh_del(fib6_rt, fen6_info->rt);
+ return;
+ }
+
+ rhashtable_remove_fast(&data->fib_rt_ht, &fib6_rt->common.ht_node,
+ nsim_fib_rt_ht_params);
+ nsim_fib_account(&data->ipv6.fib, false, extack);
+ nsim_fib6_rt_destroy(fib6_rt);
+}
+
+static int nsim_fib6_event(struct nsim_fib_data *data,
+ struct fib_notifier_info *info,
+ unsigned long event)
+{
+ struct fib6_entry_notifier_info *fen6_info;
+ int err = 0;
+
+ fen6_info = container_of(info, struct fib6_entry_notifier_info, info);
+
+ if (fen6_info->rt->nh) {
+ NL_SET_ERR_MSG_MOD(info->extack, "IPv6 route with nexthop objects is not supported");
+ return 0;
+ }
+
+ if (fen6_info->rt->fib6_src.plen) {
+ NL_SET_ERR_MSG_MOD(info->extack, "IPv6 source-specific route is not supported");
+ return 0;
+ }
+
+ switch (event) {
+ case FIB_EVENT_ENTRY_REPLACE:
+ err = nsim_fib6_rt_insert(data, fen6_info);
+ break;
+ case FIB_EVENT_ENTRY_APPEND:
+ err = nsim_fib6_rt_append(data, fen6_info);
+ break;
+ case FIB_EVENT_ENTRY_DEL:
+ nsim_fib6_rt_remove(data, fen6_info);
+ break;
+ default:
+ break;
+ }
+
+ return err;
+}
+
+static int nsim_fib_event(struct nsim_fib_data *data,
+ struct fib_notifier_info *info, unsigned long event)
+{
+ int err = 0;
+
+ switch (info->family) {
+ case AF_INET:
+ err = nsim_fib4_event(data, info, event);
+ break;
+ case AF_INET6:
+ err = nsim_fib6_event(data, info, event);
+ break;
+ }
+
+ return err;
+}
+
+static int nsim_fib_event_nb(struct notifier_block *nb, unsigned long event,
+ void *ptr)
+{
+ struct nsim_fib_data *data = container_of(nb, struct nsim_fib_data,
+ fib_nb);
+ struct fib_notifier_info *info = ptr;
+ int err = 0;
+
+ /* IPv6 routes can be added via RAs from softIRQ. */
+ spin_lock_bh(&data->fib_lock);
+
+ switch (event) {
+ case FIB_EVENT_RULE_ADD:
+ case FIB_EVENT_RULE_DEL:
+ err = nsim_fib_rule_event(data, info,
+ event == FIB_EVENT_RULE_ADD);
+ break;
+
+ case FIB_EVENT_ENTRY_REPLACE:
+ case FIB_EVENT_ENTRY_APPEND:
+ case FIB_EVENT_ENTRY_DEL:
+ err = nsim_fib_event(data, info, event);
+ break;
+ }
+
+ spin_unlock_bh(&data->fib_lock);
+
+ return notifier_from_errno(err);
+}
+
+static void nsim_fib4_rt_free(struct nsim_fib_rt *fib_rt,
+ struct nsim_fib_data *data)
+{
+ struct devlink *devlink = data->devlink;
+ struct nsim_fib4_rt *fib4_rt;
+
+ fib4_rt = container_of(fib_rt, struct nsim_fib4_rt, common);
+ nsim_fib4_rt_hw_flags_set(devlink_net(devlink), fib4_rt, false);
+ nsim_fib_account(&data->ipv4.fib, false, NULL);
+ nsim_fib4_rt_destroy(fib4_rt);
+}
+
+static void nsim_fib6_rt_free(struct nsim_fib_rt *fib_rt,
+ struct nsim_fib_data *data)
+{
+ struct nsim_fib6_rt *fib6_rt;
+
+ fib6_rt = container_of(fib_rt, struct nsim_fib6_rt, common);
+ nsim_fib6_rt_hw_flags_set(fib6_rt, false);
+ nsim_fib_account(&data->ipv6.fib, false, NULL);
+ nsim_fib6_rt_destroy(fib6_rt);
+}
+
+static void nsim_fib_rt_free(void *ptr, void *arg)
+{
+ struct nsim_fib_rt *fib_rt = ptr;
+ struct nsim_fib_data *data = arg;
+
+ switch (fib_rt->key.family) {
+ case AF_INET:
+ nsim_fib4_rt_free(fib_rt, data);
+ break;
+ case AF_INET6:
+ nsim_fib6_rt_free(fib_rt, data);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ }
+}
+
+/* inconsistent dump, trying again */
+static void nsim_fib_dump_inconsistent(struct notifier_block *nb)
+{
+ struct nsim_fib_data *data = container_of(nb, struct nsim_fib_data,
+ fib_nb);
+ struct nsim_fib_rt *fib_rt, *fib_rt_tmp;
+
+ /* The notifier block is still not registered, so we do not need to
+ * take any locks here.
+ */
+ list_for_each_entry_safe(fib_rt, fib_rt_tmp, &data->fib_rt_list, list) {
+ rhashtable_remove_fast(&data->fib_rt_ht, &fib_rt->ht_node,
+ nsim_fib_rt_ht_params);
+ nsim_fib_rt_free(fib_rt, data);
+ }
+
+ data->ipv4.rules.num = 0ULL;
+ data->ipv6.rules.num = 0ULL;
+}
+
+static u64 nsim_fib_ipv4_resource_occ_get(void *priv)
+{
+ struct nsim_fib_data *data = priv;
+
+ return nsim_fib_get_val(data, NSIM_RESOURCE_IPV4_FIB, false);
+}
+
+static u64 nsim_fib_ipv4_rules_res_occ_get(void *priv)
+{
+ struct nsim_fib_data *data = priv;
+
+ return nsim_fib_get_val(data, NSIM_RESOURCE_IPV4_FIB_RULES, false);
+}
+
+static u64 nsim_fib_ipv6_resource_occ_get(void *priv)
+{
+ struct nsim_fib_data *data = priv;
+
+ return nsim_fib_get_val(data, NSIM_RESOURCE_IPV6_FIB, false);
+}
+
+static u64 nsim_fib_ipv6_rules_res_occ_get(void *priv)
+{
+ struct nsim_fib_data *data = priv;
+
+ return nsim_fib_get_val(data, NSIM_RESOURCE_IPV6_FIB_RULES, false);
+}
+
+static void nsim_fib_set_max_all(struct nsim_fib_data *data,
+ struct devlink *devlink)
+{
+ enum nsim_resource_id res_ids[] = {
+ NSIM_RESOURCE_IPV4_FIB, NSIM_RESOURCE_IPV4_FIB_RULES,
+ NSIM_RESOURCE_IPV6_FIB, NSIM_RESOURCE_IPV6_FIB_RULES
+ };
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(res_ids); i++) {
+ int err;
+ u64 val;
+
+ err = devlink_resource_size_get(devlink, res_ids[i], &val);
+ if (err)
+ val = (u64) -1;
+ nsim_fib_set_max(data, res_ids[i], val);
+ }
+}
+
+struct nsim_fib_data *nsim_fib_create(struct devlink *devlink,
+ struct netlink_ext_ack *extack)
+{
+ struct nsim_fib_data *data;
+ int err;
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+ return ERR_PTR(-ENOMEM);
+ data->devlink = devlink;
+
+ spin_lock_init(&data->fib_lock);
+ INIT_LIST_HEAD(&data->fib_rt_list);
+ err = rhashtable_init(&data->fib_rt_ht, &nsim_fib_rt_ht_params);
+ if (err)
+ goto err_data_free;
+
+ nsim_fib_set_max_all(data, devlink);
+
+ data->fib_nb.notifier_call = nsim_fib_event_nb;
+ err = register_fib_notifier(devlink_net(devlink), &data->fib_nb,
+ nsim_fib_dump_inconsistent, extack);
+ if (err) {
+ pr_err("Failed to register fib notifier\n");
+ goto err_rhashtable_destroy;
+ }
+
+ devlink_resource_occ_get_register(devlink,
+ NSIM_RESOURCE_IPV4_FIB,
+ nsim_fib_ipv4_resource_occ_get,
+ data);
+ devlink_resource_occ_get_register(devlink,
+ NSIM_RESOURCE_IPV4_FIB_RULES,
+ nsim_fib_ipv4_rules_res_occ_get,
+ data);
+ devlink_resource_occ_get_register(devlink,
+ NSIM_RESOURCE_IPV6_FIB,
+ nsim_fib_ipv6_resource_occ_get,
+ data);
+ devlink_resource_occ_get_register(devlink,
+ NSIM_RESOURCE_IPV6_FIB_RULES,
+ nsim_fib_ipv6_rules_res_occ_get,
+ data);
+ return data;
+
+err_rhashtable_destroy:
+ rhashtable_free_and_destroy(&data->fib_rt_ht, nsim_fib_rt_free,
+ data);
+err_data_free:
+ kfree(data);
+ return ERR_PTR(err);
+}
+
+void nsim_fib_destroy(struct devlink *devlink, struct nsim_fib_data *data)
+{
+ devlink_resource_occ_get_unregister(devlink,
+ NSIM_RESOURCE_IPV6_FIB_RULES);
+ devlink_resource_occ_get_unregister(devlink,
+ NSIM_RESOURCE_IPV6_FIB);
+ devlink_resource_occ_get_unregister(devlink,
+ NSIM_RESOURCE_IPV4_FIB_RULES);
+ devlink_resource_occ_get_unregister(devlink,
+ NSIM_RESOURCE_IPV4_FIB);
+ unregister_fib_notifier(devlink_net(devlink), &data->fib_nb);
+ rhashtable_free_and_destroy(&data->fib_rt_ht, nsim_fib_rt_free,
+ data);
+ WARN_ON_ONCE(!list_empty(&data->fib_rt_list));
+ kfree(data);
+}
diff --git a/drivers/net/netdevsim/health.c b/drivers/net/netdevsim/health.c
new file mode 100644
index 000000000..21e297466
--- /dev/null
+++ b/drivers/net/netdevsim/health.c
@@ -0,0 +1,320 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019 Mellanox Technologies. All rights reserved */
+
+#include <linux/debugfs.h>
+#include <linux/err.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#include "netdevsim.h"
+
+static int
+nsim_dev_empty_reporter_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *priv_ctx,
+ struct netlink_ext_ack *extack)
+{
+ return 0;
+}
+
+static int
+nsim_dev_empty_reporter_diagnose(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg,
+ struct netlink_ext_ack *extack)
+{
+ return 0;
+}
+
+static const
+struct devlink_health_reporter_ops nsim_dev_empty_reporter_ops = {
+ .name = "empty",
+ .dump = nsim_dev_empty_reporter_dump,
+ .diagnose = nsim_dev_empty_reporter_diagnose,
+};
+
+struct nsim_dev_dummy_reporter_ctx {
+ char *break_msg;
+};
+
+static int
+nsim_dev_dummy_reporter_recover(struct devlink_health_reporter *reporter,
+ void *priv_ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct nsim_dev_health *health = devlink_health_reporter_priv(reporter);
+ struct nsim_dev_dummy_reporter_ctx *ctx = priv_ctx;
+
+ if (health->fail_recover) {
+ /* For testing purposes, user set debugfs fail_recover
+ * value to true. Fail right away.
+ */
+ NL_SET_ERR_MSG_MOD(extack, "User setup the recover to fail for testing purposes");
+ return -EINVAL;
+ }
+ if (ctx) {
+ kfree(health->recovered_break_msg);
+ health->recovered_break_msg = kstrdup(ctx->break_msg,
+ GFP_KERNEL);
+ if (!health->recovered_break_msg)
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+static int nsim_dev_dummy_fmsg_put(struct devlink_fmsg *fmsg, u32 binary_len)
+{
+ char *binary;
+ int err;
+ int i;
+
+ err = devlink_fmsg_bool_pair_put(fmsg, "test_bool", true);
+ if (err)
+ return err;
+ err = devlink_fmsg_u8_pair_put(fmsg, "test_u8", 1);
+ if (err)
+ return err;
+ err = devlink_fmsg_u32_pair_put(fmsg, "test_u32", 3);
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "test_u64", 4);
+ if (err)
+ return err;
+ err = devlink_fmsg_string_pair_put(fmsg, "test_string", "somestring");
+ if (err)
+ return err;
+
+ binary = kmalloc(binary_len, GFP_KERNEL | __GFP_NOWARN);
+ if (!binary)
+ return -ENOMEM;
+ get_random_bytes(binary, binary_len);
+ err = devlink_fmsg_binary_pair_put(fmsg, "test_binary", binary, binary_len);
+ kfree(binary);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_pair_nest_start(fmsg, "test_nest");
+ if (err)
+ return err;
+ err = devlink_fmsg_obj_nest_start(fmsg);
+ if (err)
+ return err;
+ err = devlink_fmsg_bool_pair_put(fmsg, "nested_test_bool", false);
+ if (err)
+ return err;
+ err = devlink_fmsg_u8_pair_put(fmsg, "nested_test_u8", false);
+ if (err)
+ return err;
+ err = devlink_fmsg_obj_nest_end(fmsg);
+ if (err)
+ return err;
+ err = devlink_fmsg_pair_nest_end(fmsg);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_arr_pair_nest_start(fmsg, "test_bool_array");
+ if (err)
+ return err;
+ for (i = 0; i < 10; i++) {
+ err = devlink_fmsg_bool_put(fmsg, true);
+ if (err)
+ return err;
+ }
+ err = devlink_fmsg_arr_pair_nest_end(fmsg);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_arr_pair_nest_start(fmsg, "test_u8_array");
+ if (err)
+ return err;
+ for (i = 0; i < 10; i++) {
+ err = devlink_fmsg_u8_put(fmsg, i);
+ if (err)
+ return err;
+ }
+ err = devlink_fmsg_arr_pair_nest_end(fmsg);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_arr_pair_nest_start(fmsg, "test_u32_array");
+ if (err)
+ return err;
+ for (i = 0; i < 10; i++) {
+ err = devlink_fmsg_u32_put(fmsg, i);
+ if (err)
+ return err;
+ }
+ err = devlink_fmsg_arr_pair_nest_end(fmsg);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_arr_pair_nest_start(fmsg, "test_u64_array");
+ if (err)
+ return err;
+ for (i = 0; i < 10; i++) {
+ err = devlink_fmsg_u64_put(fmsg, i);
+ if (err)
+ return err;
+ }
+ err = devlink_fmsg_arr_pair_nest_end(fmsg);
+ if (err)
+ return err;
+
+ err = devlink_fmsg_arr_pair_nest_start(fmsg, "test_array_of_objects");
+ if (err)
+ return err;
+ for (i = 0; i < 10; i++) {
+ err = devlink_fmsg_obj_nest_start(fmsg);
+ if (err)
+ return err;
+ err = devlink_fmsg_bool_pair_put(fmsg,
+ "in_array_nested_test_bool",
+ false);
+ if (err)
+ return err;
+ err = devlink_fmsg_u8_pair_put(fmsg,
+ "in_array_nested_test_u8",
+ i);
+ if (err)
+ return err;
+ err = devlink_fmsg_obj_nest_end(fmsg);
+ if (err)
+ return err;
+ }
+ return devlink_fmsg_arr_pair_nest_end(fmsg);
+}
+
+static int
+nsim_dev_dummy_reporter_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *priv_ctx,
+ struct netlink_ext_ack *extack)
+{
+ struct nsim_dev_health *health = devlink_health_reporter_priv(reporter);
+ struct nsim_dev_dummy_reporter_ctx *ctx = priv_ctx;
+ int err;
+
+ if (ctx) {
+ err = devlink_fmsg_string_pair_put(fmsg, "break_message",
+ ctx->break_msg);
+ if (err)
+ return err;
+ }
+ return nsim_dev_dummy_fmsg_put(fmsg, health->binary_len);
+}
+
+static int
+nsim_dev_dummy_reporter_diagnose(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg,
+ struct netlink_ext_ack *extack)
+{
+ struct nsim_dev_health *health = devlink_health_reporter_priv(reporter);
+ int err;
+
+ if (health->recovered_break_msg) {
+ err = devlink_fmsg_string_pair_put(fmsg,
+ "recovered_break_message",
+ health->recovered_break_msg);
+ if (err)
+ return err;
+ }
+ return nsim_dev_dummy_fmsg_put(fmsg, health->binary_len);
+}
+
+static const
+struct devlink_health_reporter_ops nsim_dev_dummy_reporter_ops = {
+ .name = "dummy",
+ .recover = nsim_dev_dummy_reporter_recover,
+ .dump = nsim_dev_dummy_reporter_dump,
+ .diagnose = nsim_dev_dummy_reporter_diagnose,
+};
+
+static ssize_t nsim_dev_health_break_write(struct file *file,
+ const char __user *data,
+ size_t count, loff_t *ppos)
+{
+ struct nsim_dev_health *health = file->private_data;
+ struct nsim_dev_dummy_reporter_ctx ctx;
+ char *break_msg;
+ int err;
+
+ break_msg = kmalloc(count + 1, GFP_KERNEL);
+ if (!break_msg)
+ return -ENOMEM;
+
+ if (copy_from_user(break_msg, data, count)) {
+ err = -EFAULT;
+ goto out;
+ }
+ break_msg[count] = '\0';
+ if (break_msg[count - 1] == '\n')
+ break_msg[count - 1] = '\0';
+
+ ctx.break_msg = break_msg;
+ err = devlink_health_report(health->dummy_reporter, break_msg, &ctx);
+ if (err)
+ goto out;
+
+out:
+ kfree(break_msg);
+ return err ?: count;
+}
+
+static const struct file_operations nsim_dev_health_break_fops = {
+ .open = simple_open,
+ .write = nsim_dev_health_break_write,
+ .llseek = generic_file_llseek,
+ .owner = THIS_MODULE,
+};
+
+int nsim_dev_health_init(struct nsim_dev *nsim_dev, struct devlink *devlink)
+{
+ struct nsim_dev_health *health = &nsim_dev->health;
+ int err;
+
+ health->empty_reporter =
+ devlink_health_reporter_create(devlink,
+ &nsim_dev_empty_reporter_ops,
+ 0, health);
+ if (IS_ERR(health->empty_reporter))
+ return PTR_ERR(health->empty_reporter);
+
+ health->dummy_reporter =
+ devlink_health_reporter_create(devlink,
+ &nsim_dev_dummy_reporter_ops,
+ 0, health);
+ if (IS_ERR(health->dummy_reporter)) {
+ err = PTR_ERR(health->dummy_reporter);
+ goto err_empty_reporter_destroy;
+ }
+
+ health->ddir = debugfs_create_dir("health", nsim_dev->ddir);
+ if (IS_ERR(health->ddir)) {
+ err = PTR_ERR(health->ddir);
+ goto err_dummy_reporter_destroy;
+ }
+
+ health->recovered_break_msg = NULL;
+ debugfs_create_file("break_health", 0200, health->ddir, health,
+ &nsim_dev_health_break_fops);
+ health->binary_len = 16;
+ debugfs_create_u32("binary_len", 0600, health->ddir,
+ &health->binary_len);
+ health->fail_recover = false;
+ debugfs_create_bool("fail_recover", 0600, health->ddir,
+ &health->fail_recover);
+ return 0;
+
+err_dummy_reporter_destroy:
+ devlink_health_reporter_destroy(health->dummy_reporter);
+err_empty_reporter_destroy:
+ devlink_health_reporter_destroy(health->empty_reporter);
+ return err;
+}
+
+void nsim_dev_health_exit(struct nsim_dev *nsim_dev)
+{
+ struct nsim_dev_health *health = &nsim_dev->health;
+
+ debugfs_remove_recursive(health->ddir);
+ kfree(health->recovered_break_msg);
+ devlink_health_reporter_destroy(health->dummy_reporter);
+ devlink_health_reporter_destroy(health->empty_reporter);
+}
diff --git a/drivers/net/netdevsim/ipsec.c b/drivers/net/netdevsim/ipsec.c
new file mode 100644
index 000000000..b80ed2ffd
--- /dev/null
+++ b/drivers/net/netdevsim/ipsec.c
@@ -0,0 +1,299 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright(c) 2018 Oracle and/or its affiliates. All rights reserved. */
+
+#include <crypto/aead.h>
+#include <linux/debugfs.h>
+#include <net/xfrm.h>
+
+#include "netdevsim.h"
+
+#define NSIM_IPSEC_AUTH_BITS 128
+
+static ssize_t nsim_dbg_netdev_ops_read(struct file *filp,
+ char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ struct netdevsim *ns = filp->private_data;
+ struct nsim_ipsec *ipsec = &ns->ipsec;
+ size_t bufsize;
+ char *buf, *p;
+ int len;
+ int i;
+
+ /* the buffer needed is
+ * (num SAs * 3 lines each * ~60 bytes per line) + one more line
+ */
+ bufsize = (ipsec->count * 4 * 60) + 60;
+ buf = kzalloc(bufsize, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ p = buf;
+ p += scnprintf(p, bufsize - (p - buf),
+ "SA count=%u tx=%u\n",
+ ipsec->count, ipsec->tx);
+
+ for (i = 0; i < NSIM_IPSEC_MAX_SA_COUNT; i++) {
+ struct nsim_sa *sap = &ipsec->sa[i];
+
+ if (!sap->used)
+ continue;
+
+ p += scnprintf(p, bufsize - (p - buf),
+ "sa[%i] %cx ipaddr=0x%08x %08x %08x %08x\n",
+ i, (sap->rx ? 'r' : 't'), sap->ipaddr[0],
+ sap->ipaddr[1], sap->ipaddr[2], sap->ipaddr[3]);
+ p += scnprintf(p, bufsize - (p - buf),
+ "sa[%i] spi=0x%08x proto=0x%x salt=0x%08x crypt=%d\n",
+ i, be32_to_cpu(sap->xs->id.spi),
+ sap->xs->id.proto, sap->salt, sap->crypt);
+ p += scnprintf(p, bufsize - (p - buf),
+ "sa[%i] key=0x%08x %08x %08x %08x\n",
+ i, sap->key[0], sap->key[1],
+ sap->key[2], sap->key[3]);
+ }
+
+ len = simple_read_from_buffer(buffer, count, ppos, buf, p - buf);
+
+ kfree(buf);
+ return len;
+}
+
+static const struct file_operations ipsec_dbg_fops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = nsim_dbg_netdev_ops_read,
+};
+
+static int nsim_ipsec_find_empty_idx(struct nsim_ipsec *ipsec)
+{
+ u32 i;
+
+ if (ipsec->count == NSIM_IPSEC_MAX_SA_COUNT)
+ return -ENOSPC;
+
+ /* search sa table */
+ for (i = 0; i < NSIM_IPSEC_MAX_SA_COUNT; i++) {
+ if (!ipsec->sa[i].used)
+ return i;
+ }
+
+ return -ENOSPC;
+}
+
+static int nsim_ipsec_parse_proto_keys(struct xfrm_state *xs,
+ u32 *mykey, u32 *mysalt)
+{
+ const char aes_gcm_name[] = "rfc4106(gcm(aes))";
+ struct net_device *dev = xs->xso.real_dev;
+ unsigned char *key_data;
+ char *alg_name = NULL;
+ int key_len;
+
+ if (!xs->aead) {
+ netdev_err(dev, "Unsupported IPsec algorithm\n");
+ return -EINVAL;
+ }
+
+ if (xs->aead->alg_icv_len != NSIM_IPSEC_AUTH_BITS) {
+ netdev_err(dev, "IPsec offload requires %d bit authentication\n",
+ NSIM_IPSEC_AUTH_BITS);
+ return -EINVAL;
+ }
+
+ key_data = &xs->aead->alg_key[0];
+ key_len = xs->aead->alg_key_len;
+ alg_name = xs->aead->alg_name;
+
+ if (strcmp(alg_name, aes_gcm_name)) {
+ netdev_err(dev, "Unsupported IPsec algorithm - please use %s\n",
+ aes_gcm_name);
+ return -EINVAL;
+ }
+
+ /* 160 accounts for 16 byte key and 4 byte salt */
+ if (key_len > NSIM_IPSEC_AUTH_BITS) {
+ *mysalt = ((u32 *)key_data)[4];
+ } else if (key_len == NSIM_IPSEC_AUTH_BITS) {
+ *mysalt = 0;
+ } else {
+ netdev_err(dev, "IPsec hw offload only supports 128 bit keys with optional 32 bit salt\n");
+ return -EINVAL;
+ }
+ memcpy(mykey, key_data, 16);
+
+ return 0;
+}
+
+static int nsim_ipsec_add_sa(struct xfrm_state *xs)
+{
+ struct nsim_ipsec *ipsec;
+ struct net_device *dev;
+ struct netdevsim *ns;
+ struct nsim_sa sa;
+ u16 sa_idx;
+ int ret;
+
+ dev = xs->xso.real_dev;
+ ns = netdev_priv(dev);
+ ipsec = &ns->ipsec;
+
+ if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) {
+ netdev_err(dev, "Unsupported protocol 0x%04x for ipsec offload\n",
+ xs->id.proto);
+ return -EINVAL;
+ }
+
+ if (xs->calg) {
+ netdev_err(dev, "Compression offload not supported\n");
+ return -EINVAL;
+ }
+
+ /* find the first unused index */
+ ret = nsim_ipsec_find_empty_idx(ipsec);
+ if (ret < 0) {
+ netdev_err(dev, "No space for SA in Rx table!\n");
+ return ret;
+ }
+ sa_idx = (u16)ret;
+
+ memset(&sa, 0, sizeof(sa));
+ sa.used = true;
+ sa.xs = xs;
+
+ if (sa.xs->id.proto & IPPROTO_ESP)
+ sa.crypt = xs->ealg || xs->aead;
+
+ /* get the key and salt */
+ ret = nsim_ipsec_parse_proto_keys(xs, sa.key, &sa.salt);
+ if (ret) {
+ netdev_err(dev, "Failed to get key data for SA table\n");
+ return ret;
+ }
+
+ if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) {
+ sa.rx = true;
+
+ if (xs->props.family == AF_INET6)
+ memcpy(sa.ipaddr, &xs->id.daddr.a6, 16);
+ else
+ memcpy(&sa.ipaddr[3], &xs->id.daddr.a4, 4);
+ }
+
+ /* the preparations worked, so save the info */
+ memcpy(&ipsec->sa[sa_idx], &sa, sizeof(sa));
+
+ /* the XFRM stack doesn't like offload_handle == 0,
+ * so add a bitflag in case our array index is 0
+ */
+ xs->xso.offload_handle = sa_idx | NSIM_IPSEC_VALID;
+ ipsec->count++;
+
+ return 0;
+}
+
+static void nsim_ipsec_del_sa(struct xfrm_state *xs)
+{
+ struct netdevsim *ns = netdev_priv(xs->xso.real_dev);
+ struct nsim_ipsec *ipsec = &ns->ipsec;
+ u16 sa_idx;
+
+ sa_idx = xs->xso.offload_handle & ~NSIM_IPSEC_VALID;
+ if (!ipsec->sa[sa_idx].used) {
+ netdev_err(ns->netdev, "Invalid SA for delete sa_idx=%d\n",
+ sa_idx);
+ return;
+ }
+
+ memset(&ipsec->sa[sa_idx], 0, sizeof(struct nsim_sa));
+ ipsec->count--;
+}
+
+static bool nsim_ipsec_offload_ok(struct sk_buff *skb, struct xfrm_state *xs)
+{
+ struct netdevsim *ns = netdev_priv(xs->xso.real_dev);
+ struct nsim_ipsec *ipsec = &ns->ipsec;
+
+ ipsec->ok++;
+
+ return true;
+}
+
+static const struct xfrmdev_ops nsim_xfrmdev_ops = {
+ .xdo_dev_state_add = nsim_ipsec_add_sa,
+ .xdo_dev_state_delete = nsim_ipsec_del_sa,
+ .xdo_dev_offload_ok = nsim_ipsec_offload_ok,
+};
+
+bool nsim_ipsec_tx(struct netdevsim *ns, struct sk_buff *skb)
+{
+ struct sec_path *sp = skb_sec_path(skb);
+ struct nsim_ipsec *ipsec = &ns->ipsec;
+ struct xfrm_state *xs;
+ struct nsim_sa *tsa;
+ u32 sa_idx;
+
+ /* do we even need to check this packet? */
+ if (!sp)
+ return true;
+
+ if (unlikely(!sp->len)) {
+ netdev_err(ns->netdev, "no xfrm state len = %d\n",
+ sp->len);
+ return false;
+ }
+
+ xs = xfrm_input_state(skb);
+ if (unlikely(!xs)) {
+ netdev_err(ns->netdev, "no xfrm_input_state() xs = %p\n", xs);
+ return false;
+ }
+
+ sa_idx = xs->xso.offload_handle & ~NSIM_IPSEC_VALID;
+ if (unlikely(sa_idx >= NSIM_IPSEC_MAX_SA_COUNT)) {
+ netdev_err(ns->netdev, "bad sa_idx=%d max=%d\n",
+ sa_idx, NSIM_IPSEC_MAX_SA_COUNT);
+ return false;
+ }
+
+ tsa = &ipsec->sa[sa_idx];
+ if (unlikely(!tsa->used)) {
+ netdev_err(ns->netdev, "unused sa_idx=%d\n", sa_idx);
+ return false;
+ }
+
+ if (xs->id.proto != IPPROTO_ESP && xs->id.proto != IPPROTO_AH) {
+ netdev_err(ns->netdev, "unexpected proto=%d\n", xs->id.proto);
+ return false;
+ }
+
+ ipsec->tx++;
+
+ return true;
+}
+
+void nsim_ipsec_init(struct netdevsim *ns)
+{
+ ns->netdev->xfrmdev_ops = &nsim_xfrmdev_ops;
+
+#define NSIM_ESP_FEATURES (NETIF_F_HW_ESP | \
+ NETIF_F_HW_ESP_TX_CSUM | \
+ NETIF_F_GSO_ESP)
+
+ ns->netdev->features |= NSIM_ESP_FEATURES;
+ ns->netdev->hw_enc_features |= NSIM_ESP_FEATURES;
+
+ ns->ipsec.pfile = debugfs_create_file("ipsec", 0400,
+ ns->nsim_dev_port->ddir, ns,
+ &ipsec_dbg_fops);
+}
+
+void nsim_ipsec_teardown(struct netdevsim *ns)
+{
+ struct nsim_ipsec *ipsec = &ns->ipsec;
+
+ if (ipsec->count)
+ netdev_err(ns->netdev, "tearing down IPsec offload with %d SAs left\n",
+ ipsec->count);
+ debugfs_remove_recursive(ipsec->pfile);
+}
diff --git a/drivers/net/netdevsim/netdev.c b/drivers/net/netdevsim/netdev.c
new file mode 100644
index 000000000..4fb0638a5
--- /dev/null
+++ b/drivers/net/netdevsim/netdev.c
@@ -0,0 +1,396 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree.
+ *
+ * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"
+ * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
+ * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
+ * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME
+ * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/etherdevice.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/slab.h>
+#include <net/netlink.h>
+#include <net/pkt_cls.h>
+#include <net/rtnetlink.h>
+#include <net/udp_tunnel.h>
+
+#include "netdevsim.h"
+
+static netdev_tx_t nsim_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+
+ if (!nsim_ipsec_tx(ns, skb))
+ goto out;
+
+ u64_stats_update_begin(&ns->syncp);
+ ns->tx_packets++;
+ ns->tx_bytes += skb->len;
+ u64_stats_update_end(&ns->syncp);
+
+out:
+ dev_kfree_skb(skb);
+
+ return NETDEV_TX_OK;
+}
+
+static void nsim_set_rx_mode(struct net_device *dev)
+{
+}
+
+static int nsim_change_mtu(struct net_device *dev, int new_mtu)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+
+ if (ns->xdp.prog && new_mtu > NSIM_XDP_MAX_MTU)
+ return -EBUSY;
+
+ dev->mtu = new_mtu;
+
+ return 0;
+}
+
+static void
+nsim_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin_irq(&ns->syncp);
+ stats->tx_bytes = ns->tx_bytes;
+ stats->tx_packets = ns->tx_packets;
+ } while (u64_stats_fetch_retry_irq(&ns->syncp, start));
+}
+
+static int
+nsim_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
+{
+ return nsim_bpf_setup_tc_block_cb(type, type_data, cb_priv);
+}
+
+static int nsim_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+ struct nsim_bus_dev *nsim_bus_dev = ns->nsim_bus_dev;
+
+ /* Only refuse multicast addresses, zero address can mean unset/any. */
+ if (vf >= nsim_bus_dev->num_vfs || is_multicast_ether_addr(mac))
+ return -EINVAL;
+ memcpy(nsim_bus_dev->vfconfigs[vf].vf_mac, mac, ETH_ALEN);
+
+ return 0;
+}
+
+static int nsim_set_vf_vlan(struct net_device *dev, int vf,
+ u16 vlan, u8 qos, __be16 vlan_proto)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+ struct nsim_bus_dev *nsim_bus_dev = ns->nsim_bus_dev;
+
+ if (vf >= nsim_bus_dev->num_vfs || vlan > 4095 || qos > 7)
+ return -EINVAL;
+
+ nsim_bus_dev->vfconfigs[vf].vlan = vlan;
+ nsim_bus_dev->vfconfigs[vf].qos = qos;
+ nsim_bus_dev->vfconfigs[vf].vlan_proto = vlan_proto;
+
+ return 0;
+}
+
+static int nsim_set_vf_rate(struct net_device *dev, int vf, int min, int max)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+ struct nsim_bus_dev *nsim_bus_dev = ns->nsim_bus_dev;
+
+ if (vf >= nsim_bus_dev->num_vfs)
+ return -EINVAL;
+
+ nsim_bus_dev->vfconfigs[vf].min_tx_rate = min;
+ nsim_bus_dev->vfconfigs[vf].max_tx_rate = max;
+
+ return 0;
+}
+
+static int nsim_set_vf_spoofchk(struct net_device *dev, int vf, bool val)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+ struct nsim_bus_dev *nsim_bus_dev = ns->nsim_bus_dev;
+
+ if (vf >= nsim_bus_dev->num_vfs)
+ return -EINVAL;
+ nsim_bus_dev->vfconfigs[vf].spoofchk_enabled = val;
+
+ return 0;
+}
+
+static int nsim_set_vf_rss_query_en(struct net_device *dev, int vf, bool val)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+ struct nsim_bus_dev *nsim_bus_dev = ns->nsim_bus_dev;
+
+ if (vf >= nsim_bus_dev->num_vfs)
+ return -EINVAL;
+ nsim_bus_dev->vfconfigs[vf].rss_query_enabled = val;
+
+ return 0;
+}
+
+static int nsim_set_vf_trust(struct net_device *dev, int vf, bool val)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+ struct nsim_bus_dev *nsim_bus_dev = ns->nsim_bus_dev;
+
+ if (vf >= nsim_bus_dev->num_vfs)
+ return -EINVAL;
+ nsim_bus_dev->vfconfigs[vf].trusted = val;
+
+ return 0;
+}
+
+static int
+nsim_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivi)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+ struct nsim_bus_dev *nsim_bus_dev = ns->nsim_bus_dev;
+
+ if (vf >= nsim_bus_dev->num_vfs)
+ return -EINVAL;
+
+ ivi->vf = vf;
+ ivi->linkstate = nsim_bus_dev->vfconfigs[vf].link_state;
+ ivi->min_tx_rate = nsim_bus_dev->vfconfigs[vf].min_tx_rate;
+ ivi->max_tx_rate = nsim_bus_dev->vfconfigs[vf].max_tx_rate;
+ ivi->vlan = nsim_bus_dev->vfconfigs[vf].vlan;
+ ivi->vlan_proto = nsim_bus_dev->vfconfigs[vf].vlan_proto;
+ ivi->qos = nsim_bus_dev->vfconfigs[vf].qos;
+ memcpy(&ivi->mac, nsim_bus_dev->vfconfigs[vf].vf_mac, ETH_ALEN);
+ ivi->spoofchk = nsim_bus_dev->vfconfigs[vf].spoofchk_enabled;
+ ivi->trusted = nsim_bus_dev->vfconfigs[vf].trusted;
+ ivi->rss_query_en = nsim_bus_dev->vfconfigs[vf].rss_query_enabled;
+
+ return 0;
+}
+
+static int nsim_set_vf_link_state(struct net_device *dev, int vf, int state)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+ struct nsim_bus_dev *nsim_bus_dev = ns->nsim_bus_dev;
+
+ if (vf >= nsim_bus_dev->num_vfs)
+ return -EINVAL;
+
+ switch (state) {
+ case IFLA_VF_LINK_STATE_AUTO:
+ case IFLA_VF_LINK_STATE_ENABLE:
+ case IFLA_VF_LINK_STATE_DISABLE:
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ nsim_bus_dev->vfconfigs[vf].link_state = state;
+
+ return 0;
+}
+
+static LIST_HEAD(nsim_block_cb_list);
+
+static int
+nsim_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+
+ switch (type) {
+ case TC_SETUP_BLOCK:
+ return flow_block_cb_setup_simple(type_data,
+ &nsim_block_cb_list,
+ nsim_setup_tc_block_cb,
+ ns, ns, true);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static int
+nsim_set_features(struct net_device *dev, netdev_features_t features)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+
+ if ((dev->features & NETIF_F_HW_TC) > (features & NETIF_F_HW_TC))
+ return nsim_bpf_disable_tc(ns);
+
+ return 0;
+}
+
+static struct devlink_port *nsim_get_devlink_port(struct net_device *dev)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+
+ return &ns->nsim_dev_port->devlink_port;
+}
+
+static const struct net_device_ops nsim_netdev_ops = {
+ .ndo_start_xmit = nsim_start_xmit,
+ .ndo_set_rx_mode = nsim_set_rx_mode,
+ .ndo_set_mac_address = eth_mac_addr,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = nsim_change_mtu,
+ .ndo_get_stats64 = nsim_get_stats64,
+ .ndo_set_vf_mac = nsim_set_vf_mac,
+ .ndo_set_vf_vlan = nsim_set_vf_vlan,
+ .ndo_set_vf_rate = nsim_set_vf_rate,
+ .ndo_set_vf_spoofchk = nsim_set_vf_spoofchk,
+ .ndo_set_vf_trust = nsim_set_vf_trust,
+ .ndo_get_vf_config = nsim_get_vf_config,
+ .ndo_set_vf_link_state = nsim_set_vf_link_state,
+ .ndo_set_vf_rss_query_en = nsim_set_vf_rss_query_en,
+ .ndo_setup_tc = nsim_setup_tc,
+ .ndo_set_features = nsim_set_features,
+ .ndo_bpf = nsim_bpf,
+ .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
+ .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
+ .ndo_get_devlink_port = nsim_get_devlink_port,
+};
+
+static void nsim_setup(struct net_device *dev)
+{
+ ether_setup(dev);
+ eth_hw_addr_random(dev);
+
+ dev->tx_queue_len = 0;
+ dev->flags |= IFF_NOARP;
+ dev->flags &= ~IFF_MULTICAST;
+ dev->priv_flags |= IFF_LIVE_ADDR_CHANGE |
+ IFF_NO_QUEUE;
+ dev->features |= NETIF_F_HIGHDMA |
+ NETIF_F_SG |
+ NETIF_F_FRAGLIST |
+ NETIF_F_HW_CSUM |
+ NETIF_F_TSO;
+ dev->hw_features |= NETIF_F_HW_TC;
+ dev->max_mtu = ETH_MAX_MTU;
+}
+
+struct netdevsim *
+nsim_create(struct nsim_dev *nsim_dev, struct nsim_dev_port *nsim_dev_port)
+{
+ struct net_device *dev;
+ struct netdevsim *ns;
+ int err;
+
+ dev = alloc_netdev(sizeof(*ns), "eth%d", NET_NAME_UNKNOWN, nsim_setup);
+ if (!dev)
+ return ERR_PTR(-ENOMEM);
+
+ dev_net_set(dev, nsim_dev_net(nsim_dev));
+ ns = netdev_priv(dev);
+ ns->netdev = dev;
+ u64_stats_init(&ns->syncp);
+ ns->nsim_dev = nsim_dev;
+ ns->nsim_dev_port = nsim_dev_port;
+ ns->nsim_bus_dev = nsim_dev->nsim_bus_dev;
+ SET_NETDEV_DEV(dev, &ns->nsim_bus_dev->dev);
+ dev->netdev_ops = &nsim_netdev_ops;
+ nsim_ethtool_init(ns);
+
+ err = nsim_udp_tunnels_info_create(nsim_dev, dev);
+ if (err)
+ goto err_free_netdev;
+
+ rtnl_lock();
+ err = nsim_bpf_init(ns);
+ if (err)
+ goto err_utn_destroy;
+
+ nsim_ipsec_init(ns);
+
+ err = register_netdevice(dev);
+ if (err)
+ goto err_ipsec_teardown;
+ rtnl_unlock();
+
+ return ns;
+
+err_ipsec_teardown:
+ nsim_ipsec_teardown(ns);
+ nsim_bpf_uninit(ns);
+err_utn_destroy:
+ rtnl_unlock();
+ nsim_udp_tunnels_info_destroy(dev);
+err_free_netdev:
+ free_netdev(dev);
+ return ERR_PTR(err);
+}
+
+void nsim_destroy(struct netdevsim *ns)
+{
+ struct net_device *dev = ns->netdev;
+
+ rtnl_lock();
+ unregister_netdevice(dev);
+ nsim_ipsec_teardown(ns);
+ nsim_bpf_uninit(ns);
+ rtnl_unlock();
+ nsim_udp_tunnels_info_destroy(dev);
+ free_netdev(dev);
+}
+
+static int nsim_validate(struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ NL_SET_ERR_MSG_MOD(extack, "Please use: echo \"[ID] [PORT_COUNT]\" > /sys/bus/netdevsim/new_device");
+ return -EOPNOTSUPP;
+}
+
+static struct rtnl_link_ops nsim_link_ops __read_mostly = {
+ .kind = DRV_NAME,
+ .validate = nsim_validate,
+};
+
+static int __init nsim_module_init(void)
+{
+ int err;
+
+ err = nsim_dev_init();
+ if (err)
+ return err;
+
+ err = nsim_bus_init();
+ if (err)
+ goto err_dev_exit;
+
+ err = rtnl_link_register(&nsim_link_ops);
+ if (err)
+ goto err_bus_exit;
+
+ return 0;
+
+err_bus_exit:
+ nsim_bus_exit();
+err_dev_exit:
+ nsim_dev_exit();
+ return err;
+}
+
+static void __exit nsim_module_exit(void)
+{
+ rtnl_link_unregister(&nsim_link_ops);
+ nsim_bus_exit();
+ nsim_dev_exit();
+}
+
+module_init(nsim_module_init);
+module_exit(nsim_module_exit);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_RTNL_LINK(DRV_NAME);
diff --git a/drivers/net/netdevsim/netdevsim.h b/drivers/net/netdevsim/netdevsim.h
new file mode 100644
index 000000000..c4e7ad2a1
--- /dev/null
+++ b/drivers/net/netdevsim/netdevsim.h
@@ -0,0 +1,293 @@
+/*
+ * Copyright (C) 2017 Netronome Systems, Inc.
+ *
+ * This software is licensed under the GNU General License Version 2,
+ * June 1991 as shown in the file COPYING in the top-level directory of this
+ * source tree.
+ *
+ * THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS"
+ * WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING,
+ * BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE
+ * OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME
+ * THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+ */
+
+#include <linux/debugfs.h>
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/u64_stats_sync.h>
+#include <net/devlink.h>
+#include <net/udp_tunnel.h>
+#include <net/xdp.h>
+
+#define DRV_NAME "netdevsim"
+
+#define NSIM_XDP_MAX_MTU 4000
+
+#define NSIM_EA(extack, msg) NL_SET_ERR_MSG_MOD((extack), msg)
+
+#define NSIM_IPSEC_MAX_SA_COUNT 33
+#define NSIM_IPSEC_VALID BIT(31)
+#define NSIM_UDP_TUNNEL_N_PORTS 4
+
+struct nsim_sa {
+ struct xfrm_state *xs;
+ __be32 ipaddr[4];
+ u32 key[4];
+ u32 salt;
+ bool used;
+ bool crypt;
+ bool rx;
+};
+
+struct nsim_ipsec {
+ struct nsim_sa sa[NSIM_IPSEC_MAX_SA_COUNT];
+ struct dentry *pfile;
+ u32 count;
+ u32 tx;
+ u32 ok;
+};
+
+struct nsim_ethtool {
+ bool rx;
+ bool tx;
+ bool report_stats_rx;
+ bool report_stats_tx;
+};
+
+struct netdevsim {
+ struct net_device *netdev;
+ struct nsim_dev *nsim_dev;
+ struct nsim_dev_port *nsim_dev_port;
+
+ u64 tx_packets;
+ u64 tx_bytes;
+ struct u64_stats_sync syncp;
+
+ struct nsim_bus_dev *nsim_bus_dev;
+
+ struct bpf_prog *bpf_offloaded;
+ u32 bpf_offloaded_id;
+
+ struct xdp_attachment_info xdp;
+ struct xdp_attachment_info xdp_hw;
+
+ bool bpf_tc_accept;
+ bool bpf_tc_non_bound_accept;
+ bool bpf_xdpdrv_accept;
+ bool bpf_xdpoffload_accept;
+
+ bool bpf_map_accept;
+ struct nsim_ipsec ipsec;
+ struct {
+ u32 inject_error;
+ u32 sleep;
+ u32 __ports[2][NSIM_UDP_TUNNEL_N_PORTS];
+ u32 (*ports)[NSIM_UDP_TUNNEL_N_PORTS];
+ struct debugfs_u32_array dfs_ports[2];
+ } udp_ports;
+
+ struct nsim_ethtool ethtool;
+};
+
+struct netdevsim *
+nsim_create(struct nsim_dev *nsim_dev, struct nsim_dev_port *nsim_dev_port);
+void nsim_destroy(struct netdevsim *ns);
+
+void nsim_ethtool_init(struct netdevsim *ns);
+
+void nsim_udp_tunnels_debugfs_create(struct nsim_dev *nsim_dev);
+int nsim_udp_tunnels_info_create(struct nsim_dev *nsim_dev,
+ struct net_device *dev);
+void nsim_udp_tunnels_info_destroy(struct net_device *dev);
+
+#ifdef CONFIG_BPF_SYSCALL
+int nsim_bpf_dev_init(struct nsim_dev *nsim_dev);
+void nsim_bpf_dev_exit(struct nsim_dev *nsim_dev);
+int nsim_bpf_init(struct netdevsim *ns);
+void nsim_bpf_uninit(struct netdevsim *ns);
+int nsim_bpf(struct net_device *dev, struct netdev_bpf *bpf);
+int nsim_bpf_disable_tc(struct netdevsim *ns);
+int nsim_bpf_setup_tc_block_cb(enum tc_setup_type type,
+ void *type_data, void *cb_priv);
+#else
+
+static inline int nsim_bpf_dev_init(struct nsim_dev *nsim_dev)
+{
+ return 0;
+}
+
+static inline void nsim_bpf_dev_exit(struct nsim_dev *nsim_dev)
+{
+}
+static inline int nsim_bpf_init(struct netdevsim *ns)
+{
+ return 0;
+}
+
+static inline void nsim_bpf_uninit(struct netdevsim *ns)
+{
+}
+
+static inline int nsim_bpf(struct net_device *dev, struct netdev_bpf *bpf)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int nsim_bpf_disable_tc(struct netdevsim *ns)
+{
+ return 0;
+}
+
+static inline int
+nsim_bpf_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
+ void *cb_priv)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+enum nsim_resource_id {
+ NSIM_RESOURCE_NONE, /* DEVLINK_RESOURCE_ID_PARENT_TOP */
+ NSIM_RESOURCE_IPV4,
+ NSIM_RESOURCE_IPV4_FIB,
+ NSIM_RESOURCE_IPV4_FIB_RULES,
+ NSIM_RESOURCE_IPV6,
+ NSIM_RESOURCE_IPV6_FIB,
+ NSIM_RESOURCE_IPV6_FIB_RULES,
+};
+
+struct nsim_dev_health {
+ struct devlink_health_reporter *empty_reporter;
+ struct devlink_health_reporter *dummy_reporter;
+ struct dentry *ddir;
+ char *recovered_break_msg;
+ u32 binary_len;
+ bool fail_recover;
+};
+
+int nsim_dev_health_init(struct nsim_dev *nsim_dev, struct devlink *devlink);
+void nsim_dev_health_exit(struct nsim_dev *nsim_dev);
+
+struct nsim_dev_port {
+ struct list_head list;
+ struct devlink_port devlink_port;
+ unsigned int port_index;
+ struct dentry *ddir;
+ struct netdevsim *ns;
+};
+
+struct nsim_dev {
+ struct nsim_bus_dev *nsim_bus_dev;
+ struct nsim_fib_data *fib_data;
+ struct nsim_trap_data *trap_data;
+ struct dentry *ddir;
+ struct dentry *ports_ddir;
+ struct dentry *take_snapshot;
+ struct bpf_offload_dev *bpf_dev;
+ bool bpf_bind_accept;
+ bool bpf_bind_verifier_accept;
+ u32 bpf_bind_verifier_delay;
+ struct dentry *ddir_bpf_bound_progs;
+ u32 prog_id_gen;
+ struct list_head bpf_bound_progs;
+ struct list_head bpf_bound_maps;
+ struct netdev_phys_item_id switch_id;
+ struct list_head port_list;
+ struct mutex port_list_lock; /* protects port list */
+ bool fw_update_status;
+ u32 fw_update_overwrite_mask;
+ u32 max_macs;
+ bool test1;
+ bool dont_allow_reload;
+ bool fail_reload;
+ struct devlink_region *dummy_region;
+ struct nsim_dev_health health;
+ struct flow_action_cookie *fa_cookie;
+ spinlock_t fa_cookie_lock; /* protects fa_cookie */
+ bool fail_trap_group_set;
+ bool fail_trap_policer_set;
+ bool fail_trap_policer_counter_get;
+ struct {
+ struct udp_tunnel_nic_shared utn_shared;
+ u32 __ports[2][NSIM_UDP_TUNNEL_N_PORTS];
+ bool sync_all;
+ bool open_only;
+ bool ipv4_only;
+ bool shared;
+ bool static_iana_vxlan;
+ u32 sleep;
+ } udp_ports;
+};
+
+static inline struct net *nsim_dev_net(struct nsim_dev *nsim_dev)
+{
+ return devlink_net(priv_to_devlink(nsim_dev));
+}
+
+int nsim_dev_init(void);
+void nsim_dev_exit(void);
+int nsim_dev_probe(struct nsim_bus_dev *nsim_bus_dev);
+void nsim_dev_remove(struct nsim_bus_dev *nsim_bus_dev);
+int nsim_dev_port_add(struct nsim_bus_dev *nsim_bus_dev,
+ unsigned int port_index);
+int nsim_dev_port_del(struct nsim_bus_dev *nsim_bus_dev,
+ unsigned int port_index);
+
+struct nsim_fib_data *nsim_fib_create(struct devlink *devlink,
+ struct netlink_ext_ack *extack);
+void nsim_fib_destroy(struct devlink *devlink, struct nsim_fib_data *fib_data);
+u64 nsim_fib_get_val(struct nsim_fib_data *fib_data,
+ enum nsim_resource_id res_id, bool max);
+
+#if IS_ENABLED(CONFIG_XFRM_OFFLOAD)
+void nsim_ipsec_init(struct netdevsim *ns);
+void nsim_ipsec_teardown(struct netdevsim *ns);
+bool nsim_ipsec_tx(struct netdevsim *ns, struct sk_buff *skb);
+#else
+static inline void nsim_ipsec_init(struct netdevsim *ns)
+{
+}
+
+static inline void nsim_ipsec_teardown(struct netdevsim *ns)
+{
+}
+
+static inline bool nsim_ipsec_tx(struct netdevsim *ns, struct sk_buff *skb)
+{
+ return true;
+}
+#endif
+
+struct nsim_vf_config {
+ int link_state;
+ u16 min_tx_rate;
+ u16 max_tx_rate;
+ u16 vlan;
+ __be16 vlan_proto;
+ u16 qos;
+ u8 vf_mac[ETH_ALEN];
+ bool spoofchk_enabled;
+ bool trusted;
+ bool rss_query_enabled;
+};
+
+struct nsim_bus_dev {
+ struct device dev;
+ struct list_head list;
+ unsigned int port_count;
+ struct net *initial_net; /* Purpose of this is to carry net pointer
+ * during the probe time only.
+ */
+ unsigned int num_vfs;
+ struct nsim_vf_config *vfconfigs;
+ /* Lock for devlink->reload_enabled in netdevsim module */
+ struct mutex nsim_bus_reload_lock;
+ bool init;
+};
+
+int nsim_bus_init(void);
+void nsim_bus_exit(void);
diff --git a/drivers/net/netdevsim/udp_tunnels.c b/drivers/net/netdevsim/udp_tunnels.c
new file mode 100644
index 000000000..02dc3123e
--- /dev/null
+++ b/drivers/net/netdevsim/udp_tunnels.c
@@ -0,0 +1,217 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (c) 2020 Facebook Inc.
+
+#include <linux/debugfs.h>
+#include <linux/netdevice.h>
+#include <linux/slab.h>
+#include <net/udp_tunnel.h>
+
+#include "netdevsim.h"
+
+static int
+nsim_udp_tunnel_set_port(struct net_device *dev, unsigned int table,
+ unsigned int entry, struct udp_tunnel_info *ti)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+ int ret;
+
+ ret = -ns->udp_ports.inject_error;
+ ns->udp_ports.inject_error = 0;
+
+ if (ns->udp_ports.sleep)
+ msleep(ns->udp_ports.sleep);
+
+ if (!ret) {
+ if (ns->udp_ports.ports[table][entry]) {
+ WARN(1, "entry already in use\n");
+ ret = -EBUSY;
+ } else {
+ ns->udp_ports.ports[table][entry] =
+ be16_to_cpu(ti->port) << 16 | ti->type;
+ }
+ }
+
+ netdev_info(dev, "set [%d, %d] type %d family %d port %d - %d\n",
+ table, entry, ti->type, ti->sa_family, ntohs(ti->port),
+ ret);
+ return ret;
+}
+
+static int
+nsim_udp_tunnel_unset_port(struct net_device *dev, unsigned int table,
+ unsigned int entry, struct udp_tunnel_info *ti)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+ int ret;
+
+ ret = -ns->udp_ports.inject_error;
+ ns->udp_ports.inject_error = 0;
+
+ if (ns->udp_ports.sleep)
+ msleep(ns->udp_ports.sleep);
+ if (!ret) {
+ u32 val = be16_to_cpu(ti->port) << 16 | ti->type;
+
+ if (val == ns->udp_ports.ports[table][entry]) {
+ ns->udp_ports.ports[table][entry] = 0;
+ } else {
+ WARN(1, "entry not installed %x vs %x\n",
+ val, ns->udp_ports.ports[table][entry]);
+ ret = -ENOENT;
+ }
+ }
+
+ netdev_info(dev, "unset [%d, %d] type %d family %d port %d - %d\n",
+ table, entry, ti->type, ti->sa_family, ntohs(ti->port),
+ ret);
+ return ret;
+}
+
+static int
+nsim_udp_tunnel_sync_table(struct net_device *dev, unsigned int table)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+ struct udp_tunnel_info ti;
+ unsigned int i;
+ int ret;
+
+ ret = -ns->udp_ports.inject_error;
+ ns->udp_ports.inject_error = 0;
+
+ for (i = 0; i < NSIM_UDP_TUNNEL_N_PORTS; i++) {
+ udp_tunnel_nic_get_port(dev, table, i, &ti);
+ ns->udp_ports.ports[table][i] =
+ be16_to_cpu(ti.port) << 16 | ti.type;
+ }
+
+ return ret;
+}
+
+static const struct udp_tunnel_nic_info nsim_udp_tunnel_info = {
+ .set_port = nsim_udp_tunnel_set_port,
+ .unset_port = nsim_udp_tunnel_unset_port,
+ .sync_table = nsim_udp_tunnel_sync_table,
+
+ .tables = {
+ {
+ .n_entries = NSIM_UDP_TUNNEL_N_PORTS,
+ .tunnel_types = UDP_TUNNEL_TYPE_VXLAN,
+ },
+ {
+ .n_entries = NSIM_UDP_TUNNEL_N_PORTS,
+ .tunnel_types = UDP_TUNNEL_TYPE_GENEVE |
+ UDP_TUNNEL_TYPE_VXLAN_GPE,
+ },
+ },
+};
+
+static ssize_t
+nsim_udp_tunnels_info_reset_write(struct file *file, const char __user *data,
+ size_t count, loff_t *ppos)
+{
+ struct net_device *dev = file->private_data;
+ struct netdevsim *ns = netdev_priv(dev);
+
+ memset(ns->udp_ports.ports, 0, sizeof(ns->udp_ports.__ports));
+ rtnl_lock();
+ udp_tunnel_nic_reset_ntf(dev);
+ rtnl_unlock();
+
+ return count;
+}
+
+static const struct file_operations nsim_udp_tunnels_info_reset_fops = {
+ .open = simple_open,
+ .write = nsim_udp_tunnels_info_reset_write,
+ .llseek = generic_file_llseek,
+ .owner = THIS_MODULE,
+};
+
+int nsim_udp_tunnels_info_create(struct nsim_dev *nsim_dev,
+ struct net_device *dev)
+{
+ struct netdevsim *ns = netdev_priv(dev);
+ struct udp_tunnel_nic_info *info;
+
+ if (nsim_dev->udp_ports.shared && nsim_dev->udp_ports.open_only) {
+ dev_err(&nsim_dev->nsim_bus_dev->dev,
+ "shared can't be used in conjunction with open_only\n");
+ return -EINVAL;
+ }
+
+ if (!nsim_dev->udp_ports.shared)
+ ns->udp_ports.ports = ns->udp_ports.__ports;
+ else
+ ns->udp_ports.ports = nsim_dev->udp_ports.__ports;
+
+ debugfs_create_u32("udp_ports_inject_error", 0600,
+ ns->nsim_dev_port->ddir,
+ &ns->udp_ports.inject_error);
+
+ ns->udp_ports.dfs_ports[0].array = ns->udp_ports.ports[0];
+ ns->udp_ports.dfs_ports[0].n_elements = NSIM_UDP_TUNNEL_N_PORTS;
+ debugfs_create_u32_array("udp_ports_table0", 0400,
+ ns->nsim_dev_port->ddir,
+ &ns->udp_ports.dfs_ports[0]);
+
+ ns->udp_ports.dfs_ports[1].array = ns->udp_ports.ports[1];
+ ns->udp_ports.dfs_ports[1].n_elements = NSIM_UDP_TUNNEL_N_PORTS;
+ debugfs_create_u32_array("udp_ports_table1", 0400,
+ ns->nsim_dev_port->ddir,
+ &ns->udp_ports.dfs_ports[1]);
+
+ debugfs_create_file("udp_ports_reset", 0200, ns->nsim_dev_port->ddir,
+ dev, &nsim_udp_tunnels_info_reset_fops);
+
+ /* Note: it's not normal to allocate the info struct like this!
+ * Drivers are expected to use a static const one, here we're testing.
+ */
+ info = kmemdup(&nsim_udp_tunnel_info, sizeof(nsim_udp_tunnel_info),
+ GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+ ns->udp_ports.sleep = nsim_dev->udp_ports.sleep;
+
+ if (nsim_dev->udp_ports.sync_all) {
+ info->set_port = NULL;
+ info->unset_port = NULL;
+ } else {
+ info->sync_table = NULL;
+ }
+
+ if (ns->udp_ports.sleep)
+ info->flags |= UDP_TUNNEL_NIC_INFO_MAY_SLEEP;
+ if (nsim_dev->udp_ports.open_only)
+ info->flags |= UDP_TUNNEL_NIC_INFO_OPEN_ONLY;
+ if (nsim_dev->udp_ports.ipv4_only)
+ info->flags |= UDP_TUNNEL_NIC_INFO_IPV4_ONLY;
+ if (nsim_dev->udp_ports.shared)
+ info->shared = &nsim_dev->udp_ports.utn_shared;
+ if (nsim_dev->udp_ports.static_iana_vxlan)
+ info->flags |= UDP_TUNNEL_NIC_INFO_STATIC_IANA_VXLAN;
+
+ dev->udp_tunnel_nic_info = info;
+ return 0;
+}
+
+void nsim_udp_tunnels_info_destroy(struct net_device *dev)
+{
+ kfree(dev->udp_tunnel_nic_info);
+ dev->udp_tunnel_nic_info = NULL;
+}
+
+void nsim_udp_tunnels_debugfs_create(struct nsim_dev *nsim_dev)
+{
+ debugfs_create_bool("udp_ports_sync_all", 0600, nsim_dev->ddir,
+ &nsim_dev->udp_ports.sync_all);
+ debugfs_create_bool("udp_ports_open_only", 0600, nsim_dev->ddir,
+ &nsim_dev->udp_ports.open_only);
+ debugfs_create_bool("udp_ports_ipv4_only", 0600, nsim_dev->ddir,
+ &nsim_dev->udp_ports.ipv4_only);
+ debugfs_create_bool("udp_ports_shared", 0600, nsim_dev->ddir,
+ &nsim_dev->udp_ports.shared);
+ debugfs_create_bool("udp_ports_static_iana_vxlan", 0600, nsim_dev->ddir,
+ &nsim_dev->udp_ports.static_iana_vxlan);
+ debugfs_create_u32("udp_ports_sleep", 0600, nsim_dev->ddir,
+ &nsim_dev->udp_ports.sleep);
+}