summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/qualcomm/rmnet
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/qualcomm/rmnet')
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/Kconfig14
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/Makefile11
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c523
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h101
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c275
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.h16
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h63
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c102
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c711
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h18
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c409
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h25
12 files changed, 2268 insertions, 0 deletions
diff --git a/drivers/net/ethernet/qualcomm/rmnet/Kconfig b/drivers/net/ethernet/qualcomm/rmnet/Kconfig
new file mode 100644
index 0000000000..b4a575eb01
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/rmnet/Kconfig
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# RMNET MAP driver
+#
+
+menuconfig RMNET
+ tristate "RmNet MAP driver"
+ default n
+ select GRO_CELLS
+ help
+ If you select this, you will enable the RMNET module which is used
+ for handling data in the multiplexing and aggregation protocol (MAP)
+ format in the embedded data path. RMNET devices can be attached to
+ any IP mode physical device.
diff --git a/drivers/net/ethernet/qualcomm/rmnet/Makefile b/drivers/net/ethernet/qualcomm/rmnet/Makefile
new file mode 100644
index 0000000000..8252e40bf5
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/rmnet/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for the RMNET module
+#
+
+rmnet-y := rmnet_config.o
+rmnet-y += rmnet_vnd.o
+rmnet-y += rmnet_handlers.o
+rmnet-y += rmnet_map_data.o
+rmnet-y += rmnet_map_command.o
+obj-$(CONFIG_RMNET) += rmnet.o
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
new file mode 100644
index 0000000000..5b69b9268c
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
@@ -0,0 +1,523 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ *
+ * RMNET configuration engine
+ */
+
+#include <net/sock.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netdevice.h>
+#include "rmnet_config.h"
+#include "rmnet_handlers.h"
+#include "rmnet_vnd.h"
+#include "rmnet_private.h"
+#include "rmnet_map.h"
+
+/* Local Definitions and Declarations */
+
+static const struct nla_policy rmnet_policy[IFLA_RMNET_MAX + 1] = {
+ [IFLA_RMNET_MUX_ID] = { .type = NLA_U16 },
+ [IFLA_RMNET_FLAGS] = { .len = sizeof(struct ifla_rmnet_flags) },
+};
+
+static int rmnet_is_real_dev_registered(const struct net_device *real_dev)
+{
+ return rcu_access_pointer(real_dev->rx_handler) == rmnet_rx_handler;
+}
+
+/* Needs rtnl lock */
+struct rmnet_port*
+rmnet_get_port_rtnl(const struct net_device *real_dev)
+{
+ return rtnl_dereference(real_dev->rx_handler_data);
+}
+
+static int rmnet_unregister_real_device(struct net_device *real_dev)
+{
+ struct rmnet_port *port = rmnet_get_port_rtnl(real_dev);
+
+ if (port->nr_rmnet_devs)
+ return -EINVAL;
+
+ rmnet_map_tx_aggregate_exit(port);
+
+ netdev_rx_handler_unregister(real_dev);
+
+ kfree(port);
+
+ netdev_dbg(real_dev, "Removed from rmnet\n");
+ return 0;
+}
+
+static int rmnet_register_real_device(struct net_device *real_dev,
+ struct netlink_ext_ack *extack)
+{
+ struct rmnet_port *port;
+ int rc, entry;
+
+ ASSERT_RTNL();
+
+ if (rmnet_is_real_dev_registered(real_dev)) {
+ port = rmnet_get_port_rtnl(real_dev);
+ if (port->rmnet_mode != RMNET_EPMODE_VND) {
+ NL_SET_ERR_MSG_MOD(extack, "bridge device already exists");
+ return -EINVAL;
+ }
+
+ return 0;
+ }
+
+ port = kzalloc(sizeof(*port), GFP_KERNEL);
+ if (!port)
+ return -ENOMEM;
+
+ port->dev = real_dev;
+ rc = netdev_rx_handler_register(real_dev, rmnet_rx_handler, port);
+ if (rc) {
+ kfree(port);
+ return -EBUSY;
+ }
+
+ for (entry = 0; entry < RMNET_MAX_LOGICAL_EP; entry++)
+ INIT_HLIST_HEAD(&port->muxed_ep[entry]);
+
+ rmnet_map_tx_aggregate_init(port);
+
+ netdev_dbg(real_dev, "registered with rmnet\n");
+ return 0;
+}
+
+static void rmnet_unregister_bridge(struct rmnet_port *port)
+{
+ struct net_device *bridge_dev, *real_dev, *rmnet_dev;
+ struct rmnet_port *real_port;
+
+ if (port->rmnet_mode != RMNET_EPMODE_BRIDGE)
+ return;
+
+ rmnet_dev = port->rmnet_dev;
+ if (!port->nr_rmnet_devs) {
+ /* bridge device */
+ real_dev = port->bridge_ep;
+ bridge_dev = port->dev;
+
+ real_port = rmnet_get_port_rtnl(real_dev);
+ real_port->bridge_ep = NULL;
+ real_port->rmnet_mode = RMNET_EPMODE_VND;
+ } else {
+ /* real device */
+ bridge_dev = port->bridge_ep;
+
+ port->bridge_ep = NULL;
+ port->rmnet_mode = RMNET_EPMODE_VND;
+ }
+
+ netdev_upper_dev_unlink(bridge_dev, rmnet_dev);
+ rmnet_unregister_real_device(bridge_dev);
+}
+
+static int rmnet_newlink(struct net *src_net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ u32 data_format = RMNET_FLAGS_INGRESS_DEAGGREGATION;
+ struct net_device *real_dev;
+ int mode = RMNET_EPMODE_VND;
+ struct rmnet_endpoint *ep;
+ struct rmnet_port *port;
+ int err = 0;
+ u16 mux_id;
+
+ if (!tb[IFLA_LINK]) {
+ NL_SET_ERR_MSG_MOD(extack, "link not specified");
+ return -EINVAL;
+ }
+
+ real_dev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
+ if (!real_dev) {
+ NL_SET_ERR_MSG_MOD(extack, "link does not exist");
+ return -ENODEV;
+ }
+
+ ep = kzalloc(sizeof(*ep), GFP_KERNEL);
+ if (!ep)
+ return -ENOMEM;
+
+ mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]);
+
+ err = rmnet_register_real_device(real_dev, extack);
+ if (err)
+ goto err0;
+
+ port = rmnet_get_port_rtnl(real_dev);
+ err = rmnet_vnd_newlink(mux_id, dev, port, real_dev, ep, extack);
+ if (err)
+ goto err1;
+
+ err = netdev_upper_dev_link(real_dev, dev, extack);
+ if (err < 0)
+ goto err2;
+
+ port->rmnet_mode = mode;
+ port->rmnet_dev = dev;
+
+ hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]);
+
+ if (data[IFLA_RMNET_FLAGS]) {
+ struct ifla_rmnet_flags *flags;
+
+ flags = nla_data(data[IFLA_RMNET_FLAGS]);
+ data_format &= ~flags->mask;
+ data_format |= flags->flags & flags->mask;
+ }
+
+ netdev_dbg(dev, "data format [0x%08X]\n", data_format);
+ port->data_format = data_format;
+
+ return 0;
+
+err2:
+ unregister_netdevice(dev);
+ rmnet_vnd_dellink(mux_id, port, ep);
+err1:
+ rmnet_unregister_real_device(real_dev);
+err0:
+ kfree(ep);
+ return err;
+}
+
+static void rmnet_dellink(struct net_device *dev, struct list_head *head)
+{
+ struct rmnet_priv *priv = netdev_priv(dev);
+ struct net_device *real_dev, *bridge_dev;
+ struct rmnet_port *real_port, *bridge_port;
+ struct rmnet_endpoint *ep;
+ u8 mux_id = priv->mux_id;
+
+ real_dev = priv->real_dev;
+
+ if (!rmnet_is_real_dev_registered(real_dev))
+ return;
+
+ real_port = rmnet_get_port_rtnl(real_dev);
+ bridge_dev = real_port->bridge_ep;
+ if (bridge_dev) {
+ bridge_port = rmnet_get_port_rtnl(bridge_dev);
+ rmnet_unregister_bridge(bridge_port);
+ }
+
+ ep = rmnet_get_endpoint(real_port, mux_id);
+ if (ep) {
+ hlist_del_init_rcu(&ep->hlnode);
+ rmnet_vnd_dellink(mux_id, real_port, ep);
+ kfree(ep);
+ }
+
+ netdev_upper_dev_unlink(real_dev, dev);
+ rmnet_unregister_real_device(real_dev);
+ unregister_netdevice_queue(dev, head);
+}
+
+static void rmnet_force_unassociate_device(struct net_device *real_dev)
+{
+ struct hlist_node *tmp_ep;
+ struct rmnet_endpoint *ep;
+ struct rmnet_port *port;
+ unsigned long bkt_ep;
+ LIST_HEAD(list);
+
+ port = rmnet_get_port_rtnl(real_dev);
+
+ if (port->nr_rmnet_devs) {
+ /* real device */
+ rmnet_unregister_bridge(port);
+ hash_for_each_safe(port->muxed_ep, bkt_ep, tmp_ep, ep, hlnode) {
+ unregister_netdevice_queue(ep->egress_dev, &list);
+ netdev_upper_dev_unlink(real_dev, ep->egress_dev);
+ rmnet_vnd_dellink(ep->mux_id, port, ep);
+ hlist_del_init_rcu(&ep->hlnode);
+ kfree(ep);
+ }
+ rmnet_unregister_real_device(real_dev);
+ unregister_netdevice_many(&list);
+ } else {
+ rmnet_unregister_bridge(port);
+ }
+}
+
+static int rmnet_config_notify_cb(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct net_device *real_dev = netdev_notifier_info_to_dev(data);
+
+ if (!rmnet_is_real_dev_registered(real_dev))
+ return NOTIFY_DONE;
+
+ switch (event) {
+ case NETDEV_UNREGISTER:
+ netdev_dbg(real_dev, "Kernel unregister\n");
+ rmnet_force_unassociate_device(real_dev);
+ break;
+ case NETDEV_CHANGEMTU:
+ if (rmnet_vnd_validate_real_dev_mtu(real_dev))
+ return NOTIFY_BAD;
+ break;
+ default:
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block rmnet_dev_notifier __read_mostly = {
+ .notifier_call = rmnet_config_notify_cb,
+};
+
+static int rmnet_rtnl_validate(struct nlattr *tb[], struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ u16 mux_id;
+
+ if (!data || !data[IFLA_RMNET_MUX_ID]) {
+ NL_SET_ERR_MSG_MOD(extack, "MUX ID not specified");
+ return -EINVAL;
+ }
+
+ mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]);
+ if (mux_id > (RMNET_MAX_LOGICAL_EP - 1)) {
+ NL_SET_ERR_MSG_MOD(extack, "invalid MUX ID");
+ return -ERANGE;
+ }
+
+ return 0;
+}
+
+static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[],
+ struct nlattr *data[],
+ struct netlink_ext_ack *extack)
+{
+ struct rmnet_priv *priv = netdev_priv(dev);
+ struct net_device *real_dev;
+ struct rmnet_port *port;
+ u16 mux_id;
+
+ if (!dev)
+ return -ENODEV;
+
+ real_dev = priv->real_dev;
+ if (!rmnet_is_real_dev_registered(real_dev))
+ return -ENODEV;
+
+ port = rmnet_get_port_rtnl(real_dev);
+
+ if (data[IFLA_RMNET_MUX_ID]) {
+ mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]);
+
+ if (mux_id != priv->mux_id) {
+ struct rmnet_endpoint *ep;
+
+ ep = rmnet_get_endpoint(port, priv->mux_id);
+ if (!ep)
+ return -ENODEV;
+
+ if (rmnet_get_endpoint(port, mux_id)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "MUX ID already exists");
+ return -EINVAL;
+ }
+
+ hlist_del_init_rcu(&ep->hlnode);
+ hlist_add_head_rcu(&ep->hlnode,
+ &port->muxed_ep[mux_id]);
+
+ ep->mux_id = mux_id;
+ priv->mux_id = mux_id;
+ }
+ }
+
+ if (data[IFLA_RMNET_FLAGS]) {
+ struct ifla_rmnet_flags *flags;
+ u32 old_data_format;
+
+ old_data_format = port->data_format;
+ flags = nla_data(data[IFLA_RMNET_FLAGS]);
+ port->data_format &= ~flags->mask;
+ port->data_format |= flags->flags & flags->mask;
+
+ if (rmnet_vnd_update_dev_mtu(port, real_dev)) {
+ port->data_format = old_data_format;
+ NL_SET_ERR_MSG_MOD(extack, "Invalid MTU on real dev");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static size_t rmnet_get_size(const struct net_device *dev)
+{
+ return
+ /* IFLA_RMNET_MUX_ID */
+ nla_total_size(2) +
+ /* IFLA_RMNET_FLAGS */
+ nla_total_size(sizeof(struct ifla_rmnet_flags));
+}
+
+static int rmnet_fill_info(struct sk_buff *skb, const struct net_device *dev)
+{
+ struct rmnet_priv *priv = netdev_priv(dev);
+ struct net_device *real_dev;
+ struct ifla_rmnet_flags f;
+ struct rmnet_port *port;
+
+ real_dev = priv->real_dev;
+
+ if (nla_put_u16(skb, IFLA_RMNET_MUX_ID, priv->mux_id))
+ goto nla_put_failure;
+
+ if (rmnet_is_real_dev_registered(real_dev)) {
+ port = rmnet_get_port_rtnl(real_dev);
+ f.flags = port->data_format;
+ } else {
+ f.flags = 0;
+ }
+
+ f.mask = ~0;
+
+ if (nla_put(skb, IFLA_RMNET_FLAGS, sizeof(f), &f))
+ goto nla_put_failure;
+
+ return 0;
+
+nla_put_failure:
+ return -EMSGSIZE;
+}
+
+struct rtnl_link_ops rmnet_link_ops __read_mostly = {
+ .kind = "rmnet",
+ .maxtype = IFLA_RMNET_MAX,
+ .priv_size = sizeof(struct rmnet_priv),
+ .setup = rmnet_vnd_setup,
+ .validate = rmnet_rtnl_validate,
+ .newlink = rmnet_newlink,
+ .dellink = rmnet_dellink,
+ .get_size = rmnet_get_size,
+ .changelink = rmnet_changelink,
+ .policy = rmnet_policy,
+ .fill_info = rmnet_fill_info,
+};
+
+struct rmnet_port *rmnet_get_port_rcu(struct net_device *real_dev)
+{
+ if (rmnet_is_real_dev_registered(real_dev))
+ return rcu_dereference_bh(real_dev->rx_handler_data);
+ else
+ return NULL;
+}
+
+struct rmnet_endpoint *rmnet_get_endpoint(struct rmnet_port *port, u8 mux_id)
+{
+ struct rmnet_endpoint *ep;
+
+ hlist_for_each_entry_rcu(ep, &port->muxed_ep[mux_id], hlnode) {
+ if (ep->mux_id == mux_id)
+ return ep;
+ }
+
+ return NULL;
+}
+
+int rmnet_add_bridge(struct net_device *rmnet_dev,
+ struct net_device *slave_dev,
+ struct netlink_ext_ack *extack)
+{
+ struct rmnet_priv *priv = netdev_priv(rmnet_dev);
+ struct net_device *real_dev = priv->real_dev;
+ struct rmnet_port *port, *slave_port;
+ int err;
+
+ port = rmnet_get_port_rtnl(real_dev);
+
+ /* If there is more than one rmnet dev attached, its probably being
+ * used for muxing. Skip the briding in that case
+ */
+ if (port->nr_rmnet_devs > 1) {
+ NL_SET_ERR_MSG_MOD(extack, "more than one rmnet dev attached");
+ return -EINVAL;
+ }
+
+ if (port->rmnet_mode != RMNET_EPMODE_VND) {
+ NL_SET_ERR_MSG_MOD(extack, "more than one bridge dev attached");
+ return -EINVAL;
+ }
+
+ if (rmnet_is_real_dev_registered(slave_dev)) {
+ NL_SET_ERR_MSG_MOD(extack,
+ "slave cannot be another rmnet dev");
+
+ return -EBUSY;
+ }
+
+ err = rmnet_register_real_device(slave_dev, extack);
+ if (err)
+ return -EBUSY;
+
+ err = netdev_master_upper_dev_link(slave_dev, rmnet_dev, NULL, NULL,
+ extack);
+ if (err) {
+ rmnet_unregister_real_device(slave_dev);
+ return err;
+ }
+
+ slave_port = rmnet_get_port_rtnl(slave_dev);
+ slave_port->rmnet_mode = RMNET_EPMODE_BRIDGE;
+ slave_port->bridge_ep = real_dev;
+ slave_port->rmnet_dev = rmnet_dev;
+
+ port->rmnet_mode = RMNET_EPMODE_BRIDGE;
+ port->bridge_ep = slave_dev;
+
+ netdev_dbg(slave_dev, "registered with rmnet as slave\n");
+ return 0;
+}
+
+int rmnet_del_bridge(struct net_device *rmnet_dev,
+ struct net_device *slave_dev)
+{
+ struct rmnet_port *port = rmnet_get_port_rtnl(slave_dev);
+
+ rmnet_unregister_bridge(port);
+
+ netdev_dbg(slave_dev, "removed from rmnet as slave\n");
+ return 0;
+}
+
+/* Startup/Shutdown */
+
+static int __init rmnet_init(void)
+{
+ int rc;
+
+ rc = register_netdevice_notifier(&rmnet_dev_notifier);
+ if (rc != 0)
+ return rc;
+
+ rc = rtnl_link_register(&rmnet_link_ops);
+ if (rc != 0) {
+ unregister_netdevice_notifier(&rmnet_dev_notifier);
+ return rc;
+ }
+ return rc;
+}
+
+static void __exit rmnet_exit(void)
+{
+ rtnl_link_unregister(&rmnet_link_ops);
+ unregister_netdevice_notifier(&rmnet_dev_notifier);
+}
+
+module_init(rmnet_init)
+module_exit(rmnet_exit)
+MODULE_ALIAS_RTNL_LINK("rmnet");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
new file mode 100644
index 0000000000..ed112d51ac
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
@@ -0,0 +1,101 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2013-2014, 2016-2018, 2021 The Linux Foundation.
+ * All rights reserved.
+ *
+ * RMNET Data configuration engine
+ */
+
+#include <linux/skbuff.h>
+#include <linux/time.h>
+#include <net/gro_cells.h>
+
+#ifndef _RMNET_CONFIG_H_
+#define _RMNET_CONFIG_H_
+
+#define RMNET_MAX_LOGICAL_EP 255
+
+struct rmnet_endpoint {
+ u8 mux_id;
+ struct net_device *egress_dev;
+ struct hlist_node hlnode;
+};
+
+struct rmnet_egress_agg_params {
+ u32 bytes;
+ u32 count;
+ u64 time_nsec;
+};
+
+/* One instance of this structure is instantiated for each real_dev associated
+ * with rmnet.
+ */
+struct rmnet_port {
+ struct net_device *dev;
+ u32 data_format;
+ u8 nr_rmnet_devs;
+ u8 rmnet_mode;
+ struct hlist_head muxed_ep[RMNET_MAX_LOGICAL_EP];
+ struct net_device *bridge_ep;
+ struct net_device *rmnet_dev;
+
+ /* Egress aggregation information */
+ struct rmnet_egress_agg_params egress_agg_params;
+ /* Protect aggregation related elements */
+ spinlock_t agg_lock;
+ struct sk_buff *skbagg_head;
+ struct sk_buff *skbagg_tail;
+ int agg_state;
+ u8 agg_count;
+ struct timespec64 agg_time;
+ struct timespec64 agg_last;
+ struct hrtimer hrtimer;
+ struct work_struct agg_wq;
+};
+
+extern struct rtnl_link_ops rmnet_link_ops;
+
+struct rmnet_vnd_stats {
+ u64 rx_pkts;
+ u64 rx_bytes;
+ u64 tx_pkts;
+ u64 tx_bytes;
+ u32 tx_drops;
+};
+
+struct rmnet_pcpu_stats {
+ struct rmnet_vnd_stats stats;
+ struct u64_stats_sync syncp;
+};
+
+struct rmnet_priv_stats {
+ u64 csum_ok;
+ u64 csum_ip4_header_bad;
+ u64 csum_valid_unset;
+ u64 csum_validation_failed;
+ u64 csum_err_bad_buffer;
+ u64 csum_err_invalid_ip_version;
+ u64 csum_err_invalid_transport;
+ u64 csum_fragmented_pkt;
+ u64 csum_skipped;
+ u64 csum_sw;
+ u64 csum_hw;
+};
+
+struct rmnet_priv {
+ u8 mux_id;
+ struct net_device *real_dev;
+ struct rmnet_pcpu_stats __percpu *pcpu_stats;
+ struct gro_cells gro_cells;
+ struct rmnet_priv_stats stats;
+};
+
+struct rmnet_port *rmnet_get_port_rcu(struct net_device *real_dev);
+struct rmnet_endpoint *rmnet_get_endpoint(struct rmnet_port *port, u8 mux_id);
+int rmnet_add_bridge(struct net_device *rmnet_dev,
+ struct net_device *slave_dev,
+ struct netlink_ext_ack *extack);
+int rmnet_del_bridge(struct net_device *rmnet_dev,
+ struct net_device *slave_dev);
+struct rmnet_port*
+rmnet_get_port_rtnl(const struct net_device *real_dev);
+#endif /* _RMNET_CONFIG_H_ */
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
new file mode 100644
index 0000000000..9f3479500f
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
@@ -0,0 +1,275 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2013-2018, 2021, The Linux Foundation. All rights reserved.
+ *
+ * RMNET Data ingress/egress handler
+ */
+
+#include <linux/netdevice.h>
+#include <linux/netdev_features.h>
+#include <linux/if_arp.h>
+#include <net/sock.h>
+#include "rmnet_private.h"
+#include "rmnet_config.h"
+#include "rmnet_vnd.h"
+#include "rmnet_map.h"
+#include "rmnet_handlers.h"
+
+#define RMNET_IP_VERSION_4 0x40
+#define RMNET_IP_VERSION_6 0x60
+
+/* Helper Functions */
+
+static void rmnet_set_skb_proto(struct sk_buff *skb)
+{
+ switch (skb->data[0] & 0xF0) {
+ case RMNET_IP_VERSION_4:
+ skb->protocol = htons(ETH_P_IP);
+ break;
+ case RMNET_IP_VERSION_6:
+ skb->protocol = htons(ETH_P_IPV6);
+ break;
+ default:
+ skb->protocol = htons(ETH_P_MAP);
+ break;
+ }
+}
+
+/* Generic handler */
+
+static void
+rmnet_deliver_skb(struct sk_buff *skb)
+{
+ struct rmnet_priv *priv = netdev_priv(skb->dev);
+
+ skb_reset_transport_header(skb);
+ skb_reset_network_header(skb);
+ rmnet_vnd_rx_fixup(skb, skb->dev);
+
+ skb->pkt_type = PACKET_HOST;
+ skb_set_mac_header(skb, 0);
+ gro_cells_receive(&priv->gro_cells, skb);
+}
+
+/* MAP handler */
+
+static void
+__rmnet_map_ingress_handler(struct sk_buff *skb,
+ struct rmnet_port *port)
+{
+ struct rmnet_map_header *map_header = (void *)skb->data;
+ struct rmnet_endpoint *ep;
+ u16 len, pad;
+ u8 mux_id;
+
+ if (map_header->flags & MAP_CMD_FLAG) {
+ /* Packet contains a MAP command (not data) */
+ if (port->data_format & RMNET_FLAGS_INGRESS_MAP_COMMANDS)
+ return rmnet_map_command(skb, port);
+
+ goto free_skb;
+ }
+
+ mux_id = map_header->mux_id;
+ pad = map_header->flags & MAP_PAD_LEN_MASK;
+ len = ntohs(map_header->pkt_len) - pad;
+
+ if (mux_id >= RMNET_MAX_LOGICAL_EP)
+ goto free_skb;
+
+ ep = rmnet_get_endpoint(port, mux_id);
+ if (!ep)
+ goto free_skb;
+
+ skb->dev = ep->egress_dev;
+
+ if ((port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV5) &&
+ (map_header->flags & MAP_NEXT_HEADER_FLAG)) {
+ if (rmnet_map_process_next_hdr_packet(skb, len))
+ goto free_skb;
+ skb_pull(skb, sizeof(*map_header));
+ rmnet_set_skb_proto(skb);
+ } else {
+ /* Subtract MAP header */
+ skb_pull(skb, sizeof(*map_header));
+ rmnet_set_skb_proto(skb);
+ if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4 &&
+ !rmnet_map_checksum_downlink_packet(skb, len + pad))
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ }
+
+ skb_trim(skb, len);
+ rmnet_deliver_skb(skb);
+ return;
+
+free_skb:
+ kfree_skb(skb);
+}
+
+static void
+rmnet_map_ingress_handler(struct sk_buff *skb,
+ struct rmnet_port *port)
+{
+ struct sk_buff *skbn;
+
+ if (skb->dev->type == ARPHRD_ETHER) {
+ if (pskb_expand_head(skb, ETH_HLEN, 0, GFP_ATOMIC)) {
+ kfree_skb(skb);
+ return;
+ }
+
+ skb_push(skb, ETH_HLEN);
+ }
+
+ if (port->data_format & RMNET_FLAGS_INGRESS_DEAGGREGATION) {
+ while ((skbn = rmnet_map_deaggregate(skb, port)) != NULL)
+ __rmnet_map_ingress_handler(skbn, port);
+
+ consume_skb(skb);
+ } else {
+ __rmnet_map_ingress_handler(skb, port);
+ }
+}
+
+static int rmnet_map_egress_handler(struct sk_buff *skb,
+ struct rmnet_port *port, u8 mux_id,
+ struct net_device *orig_dev)
+{
+ int required_headroom, additional_header_len, csum_type = 0;
+ struct rmnet_map_header *map_header;
+
+ additional_header_len = 0;
+ required_headroom = sizeof(struct rmnet_map_header);
+
+ if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4) {
+ additional_header_len = sizeof(struct rmnet_map_ul_csum_header);
+ csum_type = RMNET_FLAGS_EGRESS_MAP_CKSUMV4;
+ } else if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV5) {
+ additional_header_len = sizeof(struct rmnet_map_v5_csum_header);
+ csum_type = RMNET_FLAGS_EGRESS_MAP_CKSUMV5;
+ }
+
+ required_headroom += additional_header_len;
+
+ if (skb_cow_head(skb, required_headroom) < 0)
+ return -ENOMEM;
+
+ if (csum_type)
+ rmnet_map_checksum_uplink_packet(skb, port, orig_dev,
+ csum_type);
+
+ map_header = rmnet_map_add_map_header(skb, additional_header_len,
+ port, 0);
+ if (!map_header)
+ return -ENOMEM;
+
+ map_header->mux_id = mux_id;
+
+ if (READ_ONCE(port->egress_agg_params.count) > 1) {
+ unsigned int len;
+
+ len = rmnet_map_tx_aggregate(skb, port, orig_dev);
+ if (likely(len)) {
+ rmnet_vnd_tx_fixup_len(len, orig_dev);
+ return -EINPROGRESS;
+ }
+ return -ENOMEM;
+ }
+
+ skb->protocol = htons(ETH_P_MAP);
+ return 0;
+}
+
+static void
+rmnet_bridge_handler(struct sk_buff *skb, struct net_device *bridge_dev)
+{
+ if (skb_mac_header_was_set(skb))
+ skb_push(skb, skb->mac_len);
+
+ if (bridge_dev) {
+ skb->dev = bridge_dev;
+ dev_queue_xmit(skb);
+ }
+}
+
+/* Ingress / Egress Entry Points */
+
+/* Processes packet as per ingress data format for receiving device. Logical
+ * endpoint is determined from packet inspection. Packet is then sent to the
+ * egress device listed in the logical endpoint configuration.
+ */
+rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
+{
+ struct sk_buff *skb = *pskb;
+ struct rmnet_port *port;
+ struct net_device *dev;
+
+ if (!skb)
+ goto done;
+
+ if (skb_linearize(skb)) {
+ kfree_skb(skb);
+ goto done;
+ }
+
+ if (skb->pkt_type == PACKET_LOOPBACK)
+ return RX_HANDLER_PASS;
+
+ dev = skb->dev;
+ port = rmnet_get_port_rcu(dev);
+ if (unlikely(!port)) {
+ dev_core_stats_rx_nohandler_inc(skb->dev);
+ kfree_skb(skb);
+ goto done;
+ }
+
+ switch (port->rmnet_mode) {
+ case RMNET_EPMODE_VND:
+ rmnet_map_ingress_handler(skb, port);
+ break;
+ case RMNET_EPMODE_BRIDGE:
+ rmnet_bridge_handler(skb, port->bridge_ep);
+ break;
+ }
+
+done:
+ return RX_HANDLER_CONSUMED;
+}
+
+/* Modifies packet as per logical endpoint configuration and egress data format
+ * for egress device configured in logical endpoint. Packet is then transmitted
+ * on the egress device.
+ */
+void rmnet_egress_handler(struct sk_buff *skb)
+{
+ struct net_device *orig_dev;
+ struct rmnet_port *port;
+ struct rmnet_priv *priv;
+ u8 mux_id;
+ int err;
+
+ sk_pacing_shift_update(skb->sk, 8);
+
+ orig_dev = skb->dev;
+ priv = netdev_priv(orig_dev);
+ skb->dev = priv->real_dev;
+ mux_id = priv->mux_id;
+
+ port = rmnet_get_port_rcu(skb->dev);
+ if (!port)
+ goto drop;
+
+ err = rmnet_map_egress_handler(skb, port, mux_id, orig_dev);
+ if (err == -ENOMEM)
+ goto drop;
+ else if (err == -EINPROGRESS)
+ return;
+
+ rmnet_vnd_tx_fixup(skb, orig_dev);
+
+ dev_queue_xmit(skb);
+ return;
+
+drop:
+ this_cpu_inc(priv->pcpu_stats->stats.tx_drops);
+ kfree_skb(skb);
+}
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.h
new file mode 100644
index 0000000000..c4571dc323
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2013, 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * RMNET Data ingress/egress handler
+ */
+
+#ifndef _RMNET_HANDLERS_H_
+#define _RMNET_HANDLERS_H_
+
+#include "rmnet_config.h"
+
+void rmnet_egress_handler(struct sk_buff *skb);
+
+rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb);
+
+#endif /* _RMNET_HANDLERS_H_ */
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
new file mode 100644
index 0000000000..b702840955
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
@@ -0,0 +1,63 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2013-2018, 2021, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _RMNET_MAP_H_
+#define _RMNET_MAP_H_
+#include <linux/if_rmnet.h>
+
+struct rmnet_map_control_command {
+ u8 command_name;
+ u8 cmd_type:2;
+ u8 reserved:6;
+ u16 reserved2;
+ u32 transaction_id;
+ union {
+ struct {
+ u16 ip_family:2;
+ u16 reserved:14;
+ __be16 flow_control_seq_num;
+ __be32 qos_id;
+ } flow_control;
+ DECLARE_FLEX_ARRAY(u8, data);
+ };
+} __aligned(1);
+
+enum rmnet_map_commands {
+ RMNET_MAP_COMMAND_NONE,
+ RMNET_MAP_COMMAND_FLOW_DISABLE,
+ RMNET_MAP_COMMAND_FLOW_ENABLE,
+ /* These should always be the last 2 elements */
+ RMNET_MAP_COMMAND_UNKNOWN,
+ RMNET_MAP_COMMAND_ENUM_LENGTH
+};
+
+#define RMNET_MAP_COMMAND_REQUEST 0
+#define RMNET_MAP_COMMAND_ACK 1
+#define RMNET_MAP_COMMAND_UNSUPPORTED 2
+#define RMNET_MAP_COMMAND_INVALID 3
+
+#define RMNET_MAP_NO_PAD_BYTES 0
+#define RMNET_MAP_ADD_PAD_BYTES 1
+
+struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
+ struct rmnet_port *port);
+struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
+ int hdrlen,
+ struct rmnet_port *port,
+ int pad);
+void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port);
+int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len);
+void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
+ struct rmnet_port *port,
+ struct net_device *orig_dev,
+ int csum_type);
+int rmnet_map_process_next_hdr_packet(struct sk_buff *skb, u16 len);
+unsigned int rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port,
+ struct net_device *orig_dev);
+void rmnet_map_tx_aggregate_init(struct rmnet_port *port);
+void rmnet_map_tx_aggregate_exit(struct rmnet_port *port);
+void rmnet_map_update_ul_agg_config(struct rmnet_port *port, u32 size,
+ u32 count, u32 time);
+
+#endif /* _RMNET_MAP_H_ */
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
new file mode 100644
index 0000000000..add0f5ade2
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
@@ -0,0 +1,102 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <linux/netdevice.h>
+#include "rmnet_config.h"
+#include "rmnet_map.h"
+#include "rmnet_private.h"
+#include "rmnet_vnd.h"
+
+static u8 rmnet_map_do_flow_control(struct sk_buff *skb,
+ struct rmnet_port *port,
+ int enable)
+{
+ struct rmnet_map_header *map_header = (void *)skb->data;
+ struct rmnet_endpoint *ep;
+ struct net_device *vnd;
+ u8 mux_id;
+ int r;
+
+ mux_id = map_header->mux_id;
+
+ if (mux_id >= RMNET_MAX_LOGICAL_EP) {
+ kfree_skb(skb);
+ return RX_HANDLER_CONSUMED;
+ }
+
+ ep = rmnet_get_endpoint(port, mux_id);
+ if (!ep) {
+ kfree_skb(skb);
+ return RX_HANDLER_CONSUMED;
+ }
+
+ vnd = ep->egress_dev;
+
+ /* Ignore the ip family and pass the sequence number for both v4 and v6
+ * sequence. User space does not support creating dedicated flows for
+ * the 2 protocols
+ */
+ r = rmnet_vnd_do_flow_control(vnd, enable);
+ if (r) {
+ kfree_skb(skb);
+ return RMNET_MAP_COMMAND_UNSUPPORTED;
+ } else {
+ return RMNET_MAP_COMMAND_ACK;
+ }
+}
+
+static void rmnet_map_send_ack(struct sk_buff *skb,
+ unsigned char type,
+ struct rmnet_port *port)
+{
+ struct rmnet_map_header *map_header = (void *)skb->data;
+ struct rmnet_map_control_command *cmd;
+ struct net_device *dev = skb->dev;
+
+ if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4)
+ skb_trim(skb,
+ skb->len - sizeof(struct rmnet_map_dl_csum_trailer));
+
+ skb->protocol = htons(ETH_P_MAP);
+
+ /* Command data immediately follows the MAP header */
+ cmd = (struct rmnet_map_control_command *)(map_header + 1);
+ cmd->cmd_type = type & 0x03;
+
+ netif_tx_lock(dev);
+ dev->netdev_ops->ndo_start_xmit(skb, dev);
+ netif_tx_unlock(dev);
+}
+
+/* Process MAP command frame and send N/ACK message as appropriate. Message cmd
+ * name is decoded here and appropriate handler is called.
+ */
+void rmnet_map_command(struct sk_buff *skb, struct rmnet_port *port)
+{
+ struct rmnet_map_header *map_header = (void *)skb->data;
+ struct rmnet_map_control_command *cmd;
+ unsigned char command_name;
+ unsigned char rc = 0;
+
+ /* Command data immediately follows the MAP header */
+ cmd = (struct rmnet_map_control_command *)(map_header + 1);
+ command_name = cmd->command_name;
+
+ switch (command_name) {
+ case RMNET_MAP_COMMAND_FLOW_ENABLE:
+ rc = rmnet_map_do_flow_control(skb, port, 1);
+ break;
+
+ case RMNET_MAP_COMMAND_FLOW_DISABLE:
+ rc = rmnet_map_do_flow_control(skb, port, 0);
+ break;
+
+ default:
+ rc = RMNET_MAP_COMMAND_UNSUPPORTED;
+ kfree_skb(skb);
+ break;
+ }
+ if (rc == RMNET_MAP_COMMAND_ACK)
+ rmnet_map_send_ack(skb, rc, port);
+}
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
new file mode 100644
index 0000000000..a5e3d1a883
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
@@ -0,0 +1,711 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2013-2018, 2021, The Linux Foundation. All rights reserved.
+ *
+ * RMNET Data MAP protocol
+ */
+
+#include <linux/netdevice.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <net/ip6_checksum.h>
+#include <linux/bitfield.h>
+#include "rmnet_config.h"
+#include "rmnet_map.h"
+#include "rmnet_private.h"
+#include "rmnet_vnd.h"
+
+#define RMNET_MAP_DEAGGR_SPACING 64
+#define RMNET_MAP_DEAGGR_HEADROOM (RMNET_MAP_DEAGGR_SPACING / 2)
+
+static __sum16 *rmnet_map_get_csum_field(unsigned char protocol,
+ const void *txporthdr)
+{
+ if (protocol == IPPROTO_TCP)
+ return &((struct tcphdr *)txporthdr)->check;
+
+ if (protocol == IPPROTO_UDP)
+ return &((struct udphdr *)txporthdr)->check;
+
+ return NULL;
+}
+
+static int
+rmnet_map_ipv4_dl_csum_trailer(struct sk_buff *skb,
+ struct rmnet_map_dl_csum_trailer *csum_trailer,
+ struct rmnet_priv *priv)
+{
+ struct iphdr *ip4h = (struct iphdr *)skb->data;
+ void *txporthdr = skb->data + ip4h->ihl * 4;
+ __sum16 *csum_field, pseudo_csum;
+ __sum16 ip_payload_csum;
+
+ /* Computing the checksum over just the IPv4 header--including its
+ * checksum field--should yield 0. If it doesn't, the IP header
+ * is bad, so return an error and let the IP layer drop it.
+ */
+ if (ip_fast_csum(ip4h, ip4h->ihl)) {
+ priv->stats.csum_ip4_header_bad++;
+ return -EINVAL;
+ }
+
+ /* We don't support checksum offload on IPv4 fragments */
+ if (ip_is_fragment(ip4h)) {
+ priv->stats.csum_fragmented_pkt++;
+ return -EOPNOTSUPP;
+ }
+
+ /* Checksum offload is only supported for UDP and TCP protocols */
+ csum_field = rmnet_map_get_csum_field(ip4h->protocol, txporthdr);
+ if (!csum_field) {
+ priv->stats.csum_err_invalid_transport++;
+ return -EPROTONOSUPPORT;
+ }
+
+ /* RFC 768: UDP checksum is optional for IPv4, and is 0 if unused */
+ if (!*csum_field && ip4h->protocol == IPPROTO_UDP) {
+ priv->stats.csum_skipped++;
+ return 0;
+ }
+
+ /* The checksum value in the trailer is computed over the entire
+ * IP packet, including the IP header and payload. To derive the
+ * transport checksum from this, we first subract the contribution
+ * of the IP header from the trailer checksum. We then add the
+ * checksum computed over the pseudo header.
+ *
+ * We verified above that the IP header contributes zero to the
+ * trailer checksum. Therefore the checksum in the trailer is
+ * just the checksum computed over the IP payload.
+
+ * If the IP payload arrives intact, adding the pseudo header
+ * checksum to the IP payload checksum will yield 0xffff (negative
+ * zero). This means the trailer checksum and the pseudo checksum
+ * are additive inverses of each other. Put another way, the
+ * message passes the checksum test if the trailer checksum value
+ * is the negated pseudo header checksum.
+ *
+ * Knowing this, we don't even need to examine the transport
+ * header checksum value; it is already accounted for in the
+ * checksum value found in the trailer.
+ */
+ ip_payload_csum = csum_trailer->csum_value;
+
+ pseudo_csum = csum_tcpudp_magic(ip4h->saddr, ip4h->daddr,
+ ntohs(ip4h->tot_len) - ip4h->ihl * 4,
+ ip4h->protocol, 0);
+
+ /* The cast is required to ensure only the low 16 bits are examined */
+ if (ip_payload_csum != (__sum16)~pseudo_csum) {
+ priv->stats.csum_validation_failed++;
+ return -EINVAL;
+ }
+
+ priv->stats.csum_ok++;
+ return 0;
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static int
+rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb,
+ struct rmnet_map_dl_csum_trailer *csum_trailer,
+ struct rmnet_priv *priv)
+{
+ struct ipv6hdr *ip6h = (struct ipv6hdr *)skb->data;
+ void *txporthdr = skb->data + sizeof(*ip6h);
+ __sum16 *csum_field, pseudo_csum;
+ __sum16 ip6_payload_csum;
+ __be16 ip_header_csum;
+
+ /* Checksum offload is only supported for UDP and TCP protocols;
+ * the packet cannot include any IPv6 extension headers
+ */
+ csum_field = rmnet_map_get_csum_field(ip6h->nexthdr, txporthdr);
+ if (!csum_field) {
+ priv->stats.csum_err_invalid_transport++;
+ return -EPROTONOSUPPORT;
+ }
+
+ /* The checksum value in the trailer is computed over the entire
+ * IP packet, including the IP header and payload. To derive the
+ * transport checksum from this, we first subract the contribution
+ * of the IP header from the trailer checksum. We then add the
+ * checksum computed over the pseudo header.
+ */
+ ip_header_csum = (__force __be16)ip_fast_csum(ip6h, sizeof(*ip6h) / 4);
+ ip6_payload_csum = csum16_sub(csum_trailer->csum_value, ip_header_csum);
+
+ pseudo_csum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
+ ntohs(ip6h->payload_len),
+ ip6h->nexthdr, 0);
+
+ /* It's sufficient to compare the IP payload checksum with the
+ * negated pseudo checksum to determine whether the packet
+ * checksum was good. (See further explanation in comments
+ * in rmnet_map_ipv4_dl_csum_trailer()).
+ *
+ * The cast is required to ensure only the low 16 bits are
+ * examined.
+ */
+ if (ip6_payload_csum != (__sum16)~pseudo_csum) {
+ priv->stats.csum_validation_failed++;
+ return -EINVAL;
+ }
+
+ priv->stats.csum_ok++;
+ return 0;
+}
+#else
+static int
+rmnet_map_ipv6_dl_csum_trailer(struct sk_buff *skb,
+ struct rmnet_map_dl_csum_trailer *csum_trailer,
+ struct rmnet_priv *priv)
+{
+ return 0;
+}
+#endif
+
+static void rmnet_map_complement_ipv4_txporthdr_csum_field(struct iphdr *ip4h)
+{
+ void *txphdr;
+ u16 *csum;
+
+ txphdr = (void *)ip4h + ip4h->ihl * 4;
+
+ if (ip4h->protocol == IPPROTO_TCP || ip4h->protocol == IPPROTO_UDP) {
+ csum = (u16 *)rmnet_map_get_csum_field(ip4h->protocol, txphdr);
+ *csum = ~(*csum);
+ }
+}
+
+static void
+rmnet_map_ipv4_ul_csum_header(struct iphdr *iphdr,
+ struct rmnet_map_ul_csum_header *ul_header,
+ struct sk_buff *skb)
+{
+ u16 val;
+
+ val = MAP_CSUM_UL_ENABLED_FLAG;
+ if (iphdr->protocol == IPPROTO_UDP)
+ val |= MAP_CSUM_UL_UDP_FLAG;
+ val |= skb->csum_offset & MAP_CSUM_UL_OFFSET_MASK;
+
+ ul_header->csum_start_offset = htons(skb_network_header_len(skb));
+ ul_header->csum_info = htons(val);
+
+ skb->ip_summed = CHECKSUM_NONE;
+
+ rmnet_map_complement_ipv4_txporthdr_csum_field(iphdr);
+}
+
+#if IS_ENABLED(CONFIG_IPV6)
+static void
+rmnet_map_complement_ipv6_txporthdr_csum_field(struct ipv6hdr *ip6h)
+{
+ void *txphdr;
+ u16 *csum;
+
+ txphdr = ip6h + 1;
+
+ if (ip6h->nexthdr == IPPROTO_TCP || ip6h->nexthdr == IPPROTO_UDP) {
+ csum = (u16 *)rmnet_map_get_csum_field(ip6h->nexthdr, txphdr);
+ *csum = ~(*csum);
+ }
+}
+
+static void
+rmnet_map_ipv6_ul_csum_header(struct ipv6hdr *ipv6hdr,
+ struct rmnet_map_ul_csum_header *ul_header,
+ struct sk_buff *skb)
+{
+ u16 val;
+
+ val = MAP_CSUM_UL_ENABLED_FLAG;
+ if (ipv6hdr->nexthdr == IPPROTO_UDP)
+ val |= MAP_CSUM_UL_UDP_FLAG;
+ val |= skb->csum_offset & MAP_CSUM_UL_OFFSET_MASK;
+
+ ul_header->csum_start_offset = htons(skb_network_header_len(skb));
+ ul_header->csum_info = htons(val);
+
+ skb->ip_summed = CHECKSUM_NONE;
+
+ rmnet_map_complement_ipv6_txporthdr_csum_field(ipv6hdr);
+}
+#else
+static void
+rmnet_map_ipv6_ul_csum_header(void *ip6hdr,
+ struct rmnet_map_ul_csum_header *ul_header,
+ struct sk_buff *skb)
+{
+}
+#endif
+
+static void rmnet_map_v5_checksum_uplink_packet(struct sk_buff *skb,
+ struct rmnet_port *port,
+ struct net_device *orig_dev)
+{
+ struct rmnet_priv *priv = netdev_priv(orig_dev);
+ struct rmnet_map_v5_csum_header *ul_header;
+
+ ul_header = skb_push(skb, sizeof(*ul_header));
+ memset(ul_header, 0, sizeof(*ul_header));
+ ul_header->header_info = u8_encode_bits(RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD,
+ MAPV5_HDRINFO_HDR_TYPE_FMASK);
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ void *iph = ip_hdr(skb);
+ __sum16 *check;
+ void *trans;
+ u8 proto;
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ u16 ip_len = ((struct iphdr *)iph)->ihl * 4;
+
+ proto = ((struct iphdr *)iph)->protocol;
+ trans = iph + ip_len;
+ } else if (IS_ENABLED(CONFIG_IPV6) &&
+ skb->protocol == htons(ETH_P_IPV6)) {
+ u16 ip_len = sizeof(struct ipv6hdr);
+
+ proto = ((struct ipv6hdr *)iph)->nexthdr;
+ trans = iph + ip_len;
+ } else {
+ priv->stats.csum_err_invalid_ip_version++;
+ goto sw_csum;
+ }
+
+ check = rmnet_map_get_csum_field(proto, trans);
+ if (check) {
+ skb->ip_summed = CHECKSUM_NONE;
+ /* Ask for checksum offloading */
+ ul_header->csum_info |= MAPV5_CSUMINFO_VALID_FLAG;
+ priv->stats.csum_hw++;
+ return;
+ }
+ }
+
+sw_csum:
+ priv->stats.csum_sw++;
+}
+
+/* Adds MAP header to front of skb->data
+ * Padding is calculated and set appropriately in MAP header. Mux ID is
+ * initialized to 0.
+ */
+struct rmnet_map_header *rmnet_map_add_map_header(struct sk_buff *skb,
+ int hdrlen,
+ struct rmnet_port *port,
+ int pad)
+{
+ struct rmnet_map_header *map_header;
+ u32 padding, map_datalen;
+
+ map_datalen = skb->len - hdrlen;
+ map_header = (struct rmnet_map_header *)
+ skb_push(skb, sizeof(struct rmnet_map_header));
+ memset(map_header, 0, sizeof(struct rmnet_map_header));
+
+ /* Set next_hdr bit for csum offload packets */
+ if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV5)
+ map_header->flags |= MAP_NEXT_HEADER_FLAG;
+
+ if (pad == RMNET_MAP_NO_PAD_BYTES) {
+ map_header->pkt_len = htons(map_datalen);
+ return map_header;
+ }
+
+ BUILD_BUG_ON(MAP_PAD_LEN_MASK < 3);
+ padding = ALIGN(map_datalen, 4) - map_datalen;
+
+ if (padding == 0)
+ goto done;
+
+ if (skb_tailroom(skb) < padding)
+ return NULL;
+
+ skb_put_zero(skb, padding);
+
+done:
+ map_header->pkt_len = htons(map_datalen + padding);
+ /* This is a data packet, so the CMD bit is 0 */
+ map_header->flags = padding & MAP_PAD_LEN_MASK;
+
+ return map_header;
+}
+
+/* Deaggregates a single packet
+ * A whole new buffer is allocated for each portion of an aggregated frame.
+ * Caller should keep calling deaggregate() on the source skb until 0 is
+ * returned, indicating that there are no more packets to deaggregate. Caller
+ * is responsible for freeing the original skb.
+ */
+struct sk_buff *rmnet_map_deaggregate(struct sk_buff *skb,
+ struct rmnet_port *port)
+{
+ struct rmnet_map_v5_csum_header *next_hdr = NULL;
+ struct rmnet_map_header *maph;
+ void *data = skb->data;
+ struct sk_buff *skbn;
+ u8 nexthdr_type;
+ u32 packet_len;
+
+ if (skb->len == 0)
+ return NULL;
+
+ maph = (struct rmnet_map_header *)skb->data;
+ packet_len = ntohs(maph->pkt_len) + sizeof(*maph);
+
+ if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) {
+ packet_len += sizeof(struct rmnet_map_dl_csum_trailer);
+ } else if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV5) {
+ if (!(maph->flags & MAP_CMD_FLAG)) {
+ packet_len += sizeof(*next_hdr);
+ if (maph->flags & MAP_NEXT_HEADER_FLAG)
+ next_hdr = data + sizeof(*maph);
+ else
+ /* Mapv5 data pkt without csum hdr is invalid */
+ return NULL;
+ }
+ }
+
+ if (((int)skb->len - (int)packet_len) < 0)
+ return NULL;
+
+ /* Some hardware can send us empty frames. Catch them */
+ if (!maph->pkt_len)
+ return NULL;
+
+ if (next_hdr) {
+ nexthdr_type = u8_get_bits(next_hdr->header_info,
+ MAPV5_HDRINFO_HDR_TYPE_FMASK);
+ if (nexthdr_type != RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD)
+ return NULL;
+ }
+
+ skbn = alloc_skb(packet_len + RMNET_MAP_DEAGGR_SPACING, GFP_ATOMIC);
+ if (!skbn)
+ return NULL;
+
+ skb_reserve(skbn, RMNET_MAP_DEAGGR_HEADROOM);
+ skb_put(skbn, packet_len);
+ memcpy(skbn->data, skb->data, packet_len);
+ skb_pull(skb, packet_len);
+
+ return skbn;
+}
+
+/* Validates packet checksums. Function takes a pointer to
+ * the beginning of a buffer which contains the IP payload +
+ * padding + checksum trailer.
+ * Only IPv4 and IPv6 are supported along with TCP & UDP.
+ * Fragmented or tunneled packets are not supported.
+ */
+int rmnet_map_checksum_downlink_packet(struct sk_buff *skb, u16 len)
+{
+ struct rmnet_priv *priv = netdev_priv(skb->dev);
+ struct rmnet_map_dl_csum_trailer *csum_trailer;
+
+ if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) {
+ priv->stats.csum_sw++;
+ return -EOPNOTSUPP;
+ }
+
+ csum_trailer = (struct rmnet_map_dl_csum_trailer *)(skb->data + len);
+
+ if (!(csum_trailer->flags & MAP_CSUM_DL_VALID_FLAG)) {
+ priv->stats.csum_valid_unset++;
+ return -EINVAL;
+ }
+
+ if (skb->protocol == htons(ETH_P_IP))
+ return rmnet_map_ipv4_dl_csum_trailer(skb, csum_trailer, priv);
+
+ if (IS_ENABLED(CONFIG_IPV6) && skb->protocol == htons(ETH_P_IPV6))
+ return rmnet_map_ipv6_dl_csum_trailer(skb, csum_trailer, priv);
+
+ priv->stats.csum_err_invalid_ip_version++;
+
+ return -EPROTONOSUPPORT;
+}
+
+static void rmnet_map_v4_checksum_uplink_packet(struct sk_buff *skb,
+ struct net_device *orig_dev)
+{
+ struct rmnet_priv *priv = netdev_priv(orig_dev);
+ struct rmnet_map_ul_csum_header *ul_header;
+ void *iphdr;
+
+ ul_header = (struct rmnet_map_ul_csum_header *)
+ skb_push(skb, sizeof(struct rmnet_map_ul_csum_header));
+
+ if (unlikely(!(orig_dev->features &
+ (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))))
+ goto sw_csum;
+
+ if (skb->ip_summed != CHECKSUM_PARTIAL)
+ goto sw_csum;
+
+ iphdr = (char *)ul_header +
+ sizeof(struct rmnet_map_ul_csum_header);
+
+ if (skb->protocol == htons(ETH_P_IP)) {
+ rmnet_map_ipv4_ul_csum_header(iphdr, ul_header, skb);
+ priv->stats.csum_hw++;
+ return;
+ }
+
+ if (IS_ENABLED(CONFIG_IPV6) && skb->protocol == htons(ETH_P_IPV6)) {
+ rmnet_map_ipv6_ul_csum_header(iphdr, ul_header, skb);
+ priv->stats.csum_hw++;
+ return;
+ }
+
+ priv->stats.csum_err_invalid_ip_version++;
+
+sw_csum:
+ memset(ul_header, 0, sizeof(*ul_header));
+
+ priv->stats.csum_sw++;
+}
+
+/* Generates UL checksum meta info header for IPv4 and IPv6 over TCP and UDP
+ * packets that are supported for UL checksum offload.
+ */
+void rmnet_map_checksum_uplink_packet(struct sk_buff *skb,
+ struct rmnet_port *port,
+ struct net_device *orig_dev,
+ int csum_type)
+{
+ switch (csum_type) {
+ case RMNET_FLAGS_EGRESS_MAP_CKSUMV4:
+ rmnet_map_v4_checksum_uplink_packet(skb, orig_dev);
+ break;
+ case RMNET_FLAGS_EGRESS_MAP_CKSUMV5:
+ rmnet_map_v5_checksum_uplink_packet(skb, port, orig_dev);
+ break;
+ default:
+ break;
+ }
+}
+
+/* Process a MAPv5 packet header */
+int rmnet_map_process_next_hdr_packet(struct sk_buff *skb,
+ u16 len)
+{
+ struct rmnet_priv *priv = netdev_priv(skb->dev);
+ struct rmnet_map_v5_csum_header *next_hdr;
+ u8 nexthdr_type;
+
+ next_hdr = (struct rmnet_map_v5_csum_header *)(skb->data +
+ sizeof(struct rmnet_map_header));
+
+ nexthdr_type = u8_get_bits(next_hdr->header_info,
+ MAPV5_HDRINFO_HDR_TYPE_FMASK);
+
+ if (nexthdr_type != RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD)
+ return -EINVAL;
+
+ if (unlikely(!(skb->dev->features & NETIF_F_RXCSUM))) {
+ priv->stats.csum_sw++;
+ } else if (next_hdr->csum_info & MAPV5_CSUMINFO_VALID_FLAG) {
+ priv->stats.csum_ok++;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ } else {
+ priv->stats.csum_valid_unset++;
+ }
+
+ /* Pull csum v5 header */
+ skb_pull(skb, sizeof(*next_hdr));
+
+ return 0;
+}
+
+#define RMNET_AGG_BYPASS_TIME_NSEC 10000000L
+
+static void reset_aggr_params(struct rmnet_port *port)
+{
+ port->skbagg_head = NULL;
+ port->agg_count = 0;
+ port->agg_state = 0;
+ memset(&port->agg_time, 0, sizeof(struct timespec64));
+}
+
+static void rmnet_send_skb(struct rmnet_port *port, struct sk_buff *skb)
+{
+ if (skb_needs_linearize(skb, port->dev->features)) {
+ if (unlikely(__skb_linearize(skb))) {
+ struct rmnet_priv *priv;
+
+ priv = netdev_priv(port->rmnet_dev);
+ this_cpu_inc(priv->pcpu_stats->stats.tx_drops);
+ dev_kfree_skb_any(skb);
+ return;
+ }
+ }
+
+ dev_queue_xmit(skb);
+}
+
+static void rmnet_map_flush_tx_packet_work(struct work_struct *work)
+{
+ struct sk_buff *skb = NULL;
+ struct rmnet_port *port;
+
+ port = container_of(work, struct rmnet_port, agg_wq);
+
+ spin_lock_bh(&port->agg_lock);
+ if (likely(port->agg_state == -EINPROGRESS)) {
+ /* Buffer may have already been shipped out */
+ if (likely(port->skbagg_head)) {
+ skb = port->skbagg_head;
+ reset_aggr_params(port);
+ }
+ port->agg_state = 0;
+ }
+
+ spin_unlock_bh(&port->agg_lock);
+ if (skb)
+ rmnet_send_skb(port, skb);
+}
+
+static enum hrtimer_restart rmnet_map_flush_tx_packet_queue(struct hrtimer *t)
+{
+ struct rmnet_port *port;
+
+ port = container_of(t, struct rmnet_port, hrtimer);
+
+ schedule_work(&port->agg_wq);
+
+ return HRTIMER_NORESTART;
+}
+
+unsigned int rmnet_map_tx_aggregate(struct sk_buff *skb, struct rmnet_port *port,
+ struct net_device *orig_dev)
+{
+ struct timespec64 diff, last;
+ unsigned int len = skb->len;
+ struct sk_buff *agg_skb;
+ int size;
+
+ spin_lock_bh(&port->agg_lock);
+ memcpy(&last, &port->agg_last, sizeof(struct timespec64));
+ ktime_get_real_ts64(&port->agg_last);
+
+ if (!port->skbagg_head) {
+ /* Check to see if we should agg first. If the traffic is very
+ * sparse, don't aggregate.
+ */
+new_packet:
+ diff = timespec64_sub(port->agg_last, last);
+ size = port->egress_agg_params.bytes - skb->len;
+
+ if (size < 0) {
+ /* dropped */
+ spin_unlock_bh(&port->agg_lock);
+ return 0;
+ }
+
+ if (diff.tv_sec > 0 || diff.tv_nsec > RMNET_AGG_BYPASS_TIME_NSEC ||
+ size == 0)
+ goto no_aggr;
+
+ port->skbagg_head = skb_copy_expand(skb, 0, size, GFP_ATOMIC);
+ if (!port->skbagg_head)
+ goto no_aggr;
+
+ dev_kfree_skb_any(skb);
+ port->skbagg_head->protocol = htons(ETH_P_MAP);
+ port->agg_count = 1;
+ ktime_get_real_ts64(&port->agg_time);
+ skb_frag_list_init(port->skbagg_head);
+ goto schedule;
+ }
+ diff = timespec64_sub(port->agg_last, port->agg_time);
+ size = port->egress_agg_params.bytes - port->skbagg_head->len;
+
+ if (skb->len > size) {
+ agg_skb = port->skbagg_head;
+ reset_aggr_params(port);
+ spin_unlock_bh(&port->agg_lock);
+ hrtimer_cancel(&port->hrtimer);
+ rmnet_send_skb(port, agg_skb);
+ spin_lock_bh(&port->agg_lock);
+ goto new_packet;
+ }
+
+ if (skb_has_frag_list(port->skbagg_head))
+ port->skbagg_tail->next = skb;
+ else
+ skb_shinfo(port->skbagg_head)->frag_list = skb;
+
+ port->skbagg_head->len += skb->len;
+ port->skbagg_head->data_len += skb->len;
+ port->skbagg_head->truesize += skb->truesize;
+ port->skbagg_tail = skb;
+ port->agg_count++;
+
+ if (diff.tv_sec > 0 || diff.tv_nsec > port->egress_agg_params.time_nsec ||
+ port->agg_count >= port->egress_agg_params.count ||
+ port->skbagg_head->len == port->egress_agg_params.bytes) {
+ agg_skb = port->skbagg_head;
+ reset_aggr_params(port);
+ spin_unlock_bh(&port->agg_lock);
+ hrtimer_cancel(&port->hrtimer);
+ rmnet_send_skb(port, agg_skb);
+ return len;
+ }
+
+schedule:
+ if (!hrtimer_active(&port->hrtimer) && port->agg_state != -EINPROGRESS) {
+ port->agg_state = -EINPROGRESS;
+ hrtimer_start(&port->hrtimer,
+ ns_to_ktime(port->egress_agg_params.time_nsec),
+ HRTIMER_MODE_REL);
+ }
+ spin_unlock_bh(&port->agg_lock);
+
+ return len;
+
+no_aggr:
+ spin_unlock_bh(&port->agg_lock);
+ skb->protocol = htons(ETH_P_MAP);
+ dev_queue_xmit(skb);
+
+ return len;
+}
+
+void rmnet_map_update_ul_agg_config(struct rmnet_port *port, u32 size,
+ u32 count, u32 time)
+{
+ spin_lock_bh(&port->agg_lock);
+ port->egress_agg_params.bytes = size;
+ WRITE_ONCE(port->egress_agg_params.count, count);
+ port->egress_agg_params.time_nsec = time * NSEC_PER_USEC;
+ spin_unlock_bh(&port->agg_lock);
+}
+
+void rmnet_map_tx_aggregate_init(struct rmnet_port *port)
+{
+ hrtimer_init(&port->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ port->hrtimer.function = rmnet_map_flush_tx_packet_queue;
+ spin_lock_init(&port->agg_lock);
+ rmnet_map_update_ul_agg_config(port, 4096, 1, 800);
+ INIT_WORK(&port->agg_wq, rmnet_map_flush_tx_packet_work);
+}
+
+void rmnet_map_tx_aggregate_exit(struct rmnet_port *port)
+{
+ hrtimer_cancel(&port->hrtimer);
+ cancel_work_sync(&port->agg_wq);
+
+ spin_lock_bh(&port->agg_lock);
+ if (port->agg_state == -EINPROGRESS) {
+ if (port->skbagg_head) {
+ dev_kfree_skb_any(port->skbagg_head);
+ reset_aggr_params(port);
+ }
+
+ port->agg_state = 0;
+ }
+ spin_unlock_bh(&port->agg_lock);
+}
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h
new file mode 100644
index 0000000000..e1337f164f
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_private.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2013-2014, 2016-2018 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _RMNET_PRIVATE_H_
+#define _RMNET_PRIVATE_H_
+
+#define RMNET_MAX_PACKET_SIZE 16384
+#define RMNET_DFLT_PACKET_SIZE 1500
+#define RMNET_NEEDED_HEADROOM 16
+#define RMNET_TX_QUEUE_LEN 1000
+
+/* Replace skb->dev to a virtual rmnet device and pass up the stack */
+#define RMNET_EPMODE_VND (1)
+/* Pass the frame directly to another device with dev_queue_xmit() */
+#define RMNET_EPMODE_BRIDGE (2)
+
+#endif /* _RMNET_PRIVATE_H_ */
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
new file mode 100644
index 0000000000..046b5f7d8e
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
@@ -0,0 +1,409 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+ *
+ * RMNET Data virtual network driver
+ */
+
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/if_arp.h>
+#include <net/pkt_sched.h>
+#include "rmnet_config.h"
+#include "rmnet_handlers.h"
+#include "rmnet_private.h"
+#include "rmnet_map.h"
+#include "rmnet_vnd.h"
+
+/* RX/TX Fixup */
+
+void rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev)
+{
+ struct rmnet_priv *priv = netdev_priv(dev);
+ struct rmnet_pcpu_stats *pcpu_ptr;
+
+ pcpu_ptr = this_cpu_ptr(priv->pcpu_stats);
+
+ u64_stats_update_begin(&pcpu_ptr->syncp);
+ pcpu_ptr->stats.rx_pkts++;
+ pcpu_ptr->stats.rx_bytes += skb->len;
+ u64_stats_update_end(&pcpu_ptr->syncp);
+}
+
+void rmnet_vnd_tx_fixup_len(unsigned int len, struct net_device *dev)
+{
+ struct rmnet_priv *priv = netdev_priv(dev);
+ struct rmnet_pcpu_stats *pcpu_ptr;
+
+ pcpu_ptr = this_cpu_ptr(priv->pcpu_stats);
+
+ u64_stats_update_begin(&pcpu_ptr->syncp);
+ pcpu_ptr->stats.tx_pkts++;
+ pcpu_ptr->stats.tx_bytes += len;
+ u64_stats_update_end(&pcpu_ptr->syncp);
+}
+
+void rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev)
+{
+ rmnet_vnd_tx_fixup_len(skb->len, dev);
+}
+
+/* Network Device Operations */
+
+static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct rmnet_priv *priv;
+
+ priv = netdev_priv(dev);
+ if (priv->real_dev) {
+ rmnet_egress_handler(skb);
+ } else {
+ this_cpu_inc(priv->pcpu_stats->stats.tx_drops);
+ kfree_skb(skb);
+ }
+ return NETDEV_TX_OK;
+}
+
+static int rmnet_vnd_headroom(struct rmnet_port *port)
+{
+ u32 headroom;
+
+ headroom = sizeof(struct rmnet_map_header);
+
+ if (port->data_format & RMNET_FLAGS_EGRESS_MAP_CKSUMV4)
+ headroom += sizeof(struct rmnet_map_ul_csum_header);
+
+ return headroom;
+}
+
+static int rmnet_vnd_change_mtu(struct net_device *rmnet_dev, int new_mtu)
+{
+ struct rmnet_priv *priv = netdev_priv(rmnet_dev);
+ struct rmnet_port *port;
+ u32 headroom;
+
+ port = rmnet_get_port_rtnl(priv->real_dev);
+
+ headroom = rmnet_vnd_headroom(port);
+
+ if (new_mtu < 0 || new_mtu > RMNET_MAX_PACKET_SIZE ||
+ new_mtu > (priv->real_dev->mtu - headroom))
+ return -EINVAL;
+
+ rmnet_dev->mtu = new_mtu;
+ return 0;
+}
+
+static int rmnet_vnd_get_iflink(const struct net_device *dev)
+{
+ struct rmnet_priv *priv = netdev_priv(dev);
+
+ return priv->real_dev->ifindex;
+}
+
+static int rmnet_vnd_init(struct net_device *dev)
+{
+ struct rmnet_priv *priv = netdev_priv(dev);
+ int err;
+
+ priv->pcpu_stats = alloc_percpu(struct rmnet_pcpu_stats);
+ if (!priv->pcpu_stats)
+ return -ENOMEM;
+
+ err = gro_cells_init(&priv->gro_cells, dev);
+ if (err) {
+ free_percpu(priv->pcpu_stats);
+ return err;
+ }
+
+ return 0;
+}
+
+static void rmnet_vnd_uninit(struct net_device *dev)
+{
+ struct rmnet_priv *priv = netdev_priv(dev);
+
+ gro_cells_destroy(&priv->gro_cells);
+ free_percpu(priv->pcpu_stats);
+}
+
+static void rmnet_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *s)
+{
+ struct rmnet_priv *priv = netdev_priv(dev);
+ struct rmnet_vnd_stats total_stats = { };
+ struct rmnet_pcpu_stats *pcpu_ptr;
+ struct rmnet_vnd_stats snapshot;
+ unsigned int cpu, start;
+
+ for_each_possible_cpu(cpu) {
+ pcpu_ptr = per_cpu_ptr(priv->pcpu_stats, cpu);
+
+ do {
+ start = u64_stats_fetch_begin(&pcpu_ptr->syncp);
+ snapshot = pcpu_ptr->stats; /* struct assignment */
+ } while (u64_stats_fetch_retry(&pcpu_ptr->syncp, start));
+
+ total_stats.rx_pkts += snapshot.rx_pkts;
+ total_stats.rx_bytes += snapshot.rx_bytes;
+ total_stats.tx_pkts += snapshot.tx_pkts;
+ total_stats.tx_bytes += snapshot.tx_bytes;
+ total_stats.tx_drops += snapshot.tx_drops;
+ }
+
+ s->rx_packets = total_stats.rx_pkts;
+ s->rx_bytes = total_stats.rx_bytes;
+ s->tx_packets = total_stats.tx_pkts;
+ s->tx_bytes = total_stats.tx_bytes;
+ s->tx_dropped = total_stats.tx_drops;
+}
+
+static const struct net_device_ops rmnet_vnd_ops = {
+ .ndo_start_xmit = rmnet_vnd_start_xmit,
+ .ndo_change_mtu = rmnet_vnd_change_mtu,
+ .ndo_get_iflink = rmnet_vnd_get_iflink,
+ .ndo_add_slave = rmnet_add_bridge,
+ .ndo_del_slave = rmnet_del_bridge,
+ .ndo_init = rmnet_vnd_init,
+ .ndo_uninit = rmnet_vnd_uninit,
+ .ndo_get_stats64 = rmnet_get_stats64,
+};
+
+static const char rmnet_gstrings_stats[][ETH_GSTRING_LEN] = {
+ "Checksum ok",
+ "Bad IPv4 header checksum",
+ "Checksum valid bit not set",
+ "Checksum validation failed",
+ "Checksum error bad buffer",
+ "Checksum error bad ip version",
+ "Checksum error bad transport",
+ "Checksum skipped on ip fragment",
+ "Checksum skipped",
+ "Checksum computed in software",
+ "Checksum computed in hardware",
+};
+
+static void rmnet_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
+{
+ switch (stringset) {
+ case ETH_SS_STATS:
+ memcpy(buf, &rmnet_gstrings_stats,
+ sizeof(rmnet_gstrings_stats));
+ break;
+ }
+}
+
+static int rmnet_get_sset_count(struct net_device *dev, int sset)
+{
+ switch (sset) {
+ case ETH_SS_STATS:
+ return ARRAY_SIZE(rmnet_gstrings_stats);
+ default:
+ return -EOPNOTSUPP;
+ }
+}
+
+static void rmnet_get_ethtool_stats(struct net_device *dev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct rmnet_priv *priv = netdev_priv(dev);
+ struct rmnet_priv_stats *st = &priv->stats;
+
+ if (!data)
+ return;
+
+ memcpy(data, st, ARRAY_SIZE(rmnet_gstrings_stats) * sizeof(u64));
+}
+
+static int rmnet_get_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *coal,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct rmnet_priv *priv = netdev_priv(dev);
+ struct rmnet_port *port;
+
+ port = rmnet_get_port_rtnl(priv->real_dev);
+
+ memset(kernel_coal, 0, sizeof(*kernel_coal));
+ kernel_coal->tx_aggr_max_bytes = port->egress_agg_params.bytes;
+ kernel_coal->tx_aggr_max_frames = port->egress_agg_params.count;
+ kernel_coal->tx_aggr_time_usecs = div_u64(port->egress_agg_params.time_nsec,
+ NSEC_PER_USEC);
+
+ return 0;
+}
+
+static int rmnet_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *coal,
+ struct kernel_ethtool_coalesce *kernel_coal,
+ struct netlink_ext_ack *extack)
+{
+ struct rmnet_priv *priv = netdev_priv(dev);
+ struct rmnet_port *port;
+
+ port = rmnet_get_port_rtnl(priv->real_dev);
+
+ if (kernel_coal->tx_aggr_max_frames < 1 || kernel_coal->tx_aggr_max_frames > 64)
+ return -EINVAL;
+
+ if (kernel_coal->tx_aggr_max_bytes > 32768)
+ return -EINVAL;
+
+ rmnet_map_update_ul_agg_config(port, kernel_coal->tx_aggr_max_bytes,
+ kernel_coal->tx_aggr_max_frames,
+ kernel_coal->tx_aggr_time_usecs);
+
+ return 0;
+}
+
+static const struct ethtool_ops rmnet_ethtool_ops = {
+ .supported_coalesce_params = ETHTOOL_COALESCE_TX_AGGR,
+ .get_coalesce = rmnet_get_coalesce,
+ .set_coalesce = rmnet_set_coalesce,
+ .get_ethtool_stats = rmnet_get_ethtool_stats,
+ .get_strings = rmnet_get_strings,
+ .get_sset_count = rmnet_get_sset_count,
+};
+
+/* Called by kernel whenever a new rmnet<n> device is created. Sets MTU,
+ * flags, ARP type, needed headroom, etc...
+ */
+void rmnet_vnd_setup(struct net_device *rmnet_dev)
+{
+ rmnet_dev->netdev_ops = &rmnet_vnd_ops;
+ rmnet_dev->mtu = RMNET_DFLT_PACKET_SIZE;
+ rmnet_dev->needed_headroom = RMNET_NEEDED_HEADROOM;
+ eth_hw_addr_random(rmnet_dev);
+ rmnet_dev->tx_queue_len = RMNET_TX_QUEUE_LEN;
+
+ /* Raw IP mode */
+ rmnet_dev->header_ops = NULL; /* No header */
+ rmnet_dev->type = ARPHRD_RAWIP;
+ rmnet_dev->hard_header_len = 0;
+ rmnet_dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
+
+ rmnet_dev->needs_free_netdev = true;
+ rmnet_dev->ethtool_ops = &rmnet_ethtool_ops;
+
+ rmnet_dev->features |= NETIF_F_LLTX;
+
+ /* This perm addr will be used as interface identifier by IPv6 */
+ rmnet_dev->addr_assign_type = NET_ADDR_RANDOM;
+ eth_random_addr(rmnet_dev->perm_addr);
+}
+
+/* Exposed API */
+
+int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
+ struct rmnet_port *port,
+ struct net_device *real_dev,
+ struct rmnet_endpoint *ep,
+ struct netlink_ext_ack *extack)
+
+{
+ struct rmnet_priv *priv = netdev_priv(rmnet_dev);
+ u32 headroom;
+ int rc;
+
+ if (rmnet_get_endpoint(port, id)) {
+ NL_SET_ERR_MSG_MOD(extack, "MUX ID already exists");
+ return -EBUSY;
+ }
+
+ rmnet_dev->hw_features = NETIF_F_RXCSUM;
+ rmnet_dev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ rmnet_dev->hw_features |= NETIF_F_SG;
+
+ priv->real_dev = real_dev;
+
+ headroom = rmnet_vnd_headroom(port);
+
+ if (rmnet_vnd_change_mtu(rmnet_dev, real_dev->mtu - headroom)) {
+ NL_SET_ERR_MSG_MOD(extack, "Invalid MTU on real dev");
+ return -EINVAL;
+ }
+
+ rc = register_netdevice(rmnet_dev);
+ if (!rc) {
+ ep->egress_dev = rmnet_dev;
+ ep->mux_id = id;
+ port->nr_rmnet_devs++;
+
+ rmnet_dev->rtnl_link_ops = &rmnet_link_ops;
+
+ priv->mux_id = id;
+
+ netdev_dbg(rmnet_dev, "rmnet dev created\n");
+ }
+
+ return rc;
+}
+
+int rmnet_vnd_dellink(u8 id, struct rmnet_port *port,
+ struct rmnet_endpoint *ep)
+{
+ if (id >= RMNET_MAX_LOGICAL_EP || !ep->egress_dev)
+ return -EINVAL;
+
+ ep->egress_dev = NULL;
+ port->nr_rmnet_devs--;
+ return 0;
+}
+
+int rmnet_vnd_do_flow_control(struct net_device *rmnet_dev, int enable)
+{
+ netdev_dbg(rmnet_dev, "Setting VND TX queue state to %d\n", enable);
+ /* Although we expect similar number of enable/disable
+ * commands, optimize for the disable. That is more
+ * latency sensitive than enable
+ */
+ if (unlikely(enable))
+ netif_wake_queue(rmnet_dev);
+ else
+ netif_stop_queue(rmnet_dev);
+
+ return 0;
+}
+
+int rmnet_vnd_validate_real_dev_mtu(struct net_device *real_dev)
+{
+ struct hlist_node *tmp_ep;
+ struct rmnet_endpoint *ep;
+ struct rmnet_port *port;
+ unsigned long bkt_ep;
+ u32 headroom;
+
+ port = rmnet_get_port_rtnl(real_dev);
+
+ headroom = rmnet_vnd_headroom(port);
+
+ hash_for_each_safe(port->muxed_ep, bkt_ep, tmp_ep, ep, hlnode) {
+ if (ep->egress_dev->mtu > (real_dev->mtu - headroom))
+ return -1;
+ }
+
+ return 0;
+}
+
+int rmnet_vnd_update_dev_mtu(struct rmnet_port *port,
+ struct net_device *real_dev)
+{
+ struct hlist_node *tmp_ep;
+ struct rmnet_endpoint *ep;
+ unsigned long bkt_ep;
+ u32 headroom;
+
+ headroom = rmnet_vnd_headroom(port);
+
+ hash_for_each_safe(port->muxed_ep, bkt_ep, tmp_ep, ep, hlnode) {
+ if (ep->egress_dev->mtu <= (real_dev->mtu - headroom))
+ continue;
+
+ if (rmnet_vnd_change_mtu(ep->egress_dev,
+ real_dev->mtu - headroom))
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h
new file mode 100644
index 0000000000..c2b2baf868
--- /dev/null
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
+ *
+ * RMNET Data Virtual Network Device APIs
+ */
+
+#ifndef _RMNET_VND_H_
+#define _RMNET_VND_H_
+
+int rmnet_vnd_do_flow_control(struct net_device *dev, int enable);
+int rmnet_vnd_newlink(u8 id, struct net_device *rmnet_dev,
+ struct rmnet_port *port,
+ struct net_device *real_dev,
+ struct rmnet_endpoint *ep,
+ struct netlink_ext_ack *extack);
+int rmnet_vnd_dellink(u8 id, struct rmnet_port *port,
+ struct rmnet_endpoint *ep);
+void rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev);
+void rmnet_vnd_tx_fixup_len(unsigned int len, struct net_device *dev);
+void rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev);
+void rmnet_vnd_setup(struct net_device *dev);
+int rmnet_vnd_validate_real_dev_mtu(struct net_device *real_dev);
+int rmnet_vnd_update_dev_mtu(struct rmnet_port *port,
+ struct net_device *real_dev);
+#endif /* _RMNET_VND_H_ */