diff options
Diffstat (limited to '')
-rw-r--r-- | drivers/net/caif/Kconfig | 33 | ||||
-rw-r--r-- | drivers/net/caif/Makefile | 8 | ||||
-rw-r--r-- | drivers/net/caif/caif_serial.c | 453 | ||||
-rw-r--r-- | drivers/net/caif/caif_virtio.c | 792 |
4 files changed, 1286 insertions, 0 deletions
diff --git a/drivers/net/caif/Kconfig b/drivers/net/caif/Kconfig new file mode 100644 index 000000000..709660cb3 --- /dev/null +++ b/drivers/net/caif/Kconfig @@ -0,0 +1,33 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# CAIF physical drivers +# + +menuconfig CAIF_DRIVERS + bool "CAIF transport drivers" + depends on CAIF + help + Enable this to see CAIF physical drivers. + +if CAIF_DRIVERS + +config CAIF_TTY + tristate "CAIF TTY transport driver" + depends on CAIF && TTY + default n + help + The CAIF TTY transport driver is a Line Discipline (ldisc) + identified as N_CAIF. When this ldisc is opened from user space + it will redirect the TTY's traffic into the CAIF stack. + +config CAIF_VIRTIO + tristate "CAIF virtio transport driver" + depends on CAIF && HAS_DMA + select VHOST_RING + select VIRTIO + select GENERIC_ALLOCATOR + default n + help + The CAIF driver for CAIF over Virtio. + +endif # CAIF_DRIVERS diff --git a/drivers/net/caif/Makefile b/drivers/net/caif/Makefile new file mode 100644 index 000000000..97f664f80 --- /dev/null +++ b/drivers/net/caif/Makefile @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-2.0 +ccflags-$(CONFIG_CAIF_DEBUG) := -DDEBUG + +# Serial interface +obj-$(CONFIG_CAIF_TTY) += caif_serial.o + +# Virtio interface +obj-$(CONFIG_CAIF_VIRTIO) += caif_virtio.o diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c new file mode 100644 index 000000000..688075859 --- /dev/null +++ b/drivers/net/caif/caif_serial.c @@ -0,0 +1,453 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) ST-Ericsson AB 2010 + * Author: Sjur Brendeland + */ + +#include <linux/hardirq.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/types.h> +#include <linux/skbuff.h> +#include <linux/netdevice.h> +#include <linux/rtnetlink.h> +#include <linux/tty.h> +#include <linux/file.h> +#include <linux/if_arp.h> +#include <net/caif/caif_device.h> +#include <net/caif/cfcnfg.h> +#include <linux/err.h> +#include <linux/debugfs.h> + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Sjur Brendeland"); +MODULE_DESCRIPTION("CAIF serial device TTY line discipline"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS_LDISC(N_CAIF); + +#define SEND_QUEUE_LOW 10 +#define SEND_QUEUE_HIGH 100 +#define CAIF_SENDING 1 /* Bit 1 = 0x02*/ +#define CAIF_FLOW_OFF_SENT 4 /* Bit 4 = 0x10 */ +#define MAX_WRITE_CHUNK 4096 +#define ON 1 +#define OFF 0 +#define CAIF_MAX_MTU 4096 + +static DEFINE_SPINLOCK(ser_lock); +static LIST_HEAD(ser_list); +static LIST_HEAD(ser_release_list); + +static bool ser_loop; +module_param(ser_loop, bool, 0444); +MODULE_PARM_DESC(ser_loop, "Run in simulated loopback mode."); + +static bool ser_use_stx = true; +module_param(ser_use_stx, bool, 0444); +MODULE_PARM_DESC(ser_use_stx, "STX enabled or not."); + +static bool ser_use_fcs = true; + +module_param(ser_use_fcs, bool, 0444); +MODULE_PARM_DESC(ser_use_fcs, "FCS enabled or not."); + +static int ser_write_chunk = MAX_WRITE_CHUNK; +module_param(ser_write_chunk, int, 0444); + +MODULE_PARM_DESC(ser_write_chunk, "Maximum size of data written to UART."); + +static struct dentry *debugfsdir; + +static int caif_net_open(struct net_device *dev); +static int caif_net_close(struct net_device *dev); + +struct ser_device { + struct caif_dev_common common; + struct list_head node; + struct net_device *dev; + struct sk_buff_head head; + struct tty_struct *tty; + bool tx_started; + unsigned long state; +#ifdef CONFIG_DEBUG_FS + struct dentry *debugfs_tty_dir; + struct debugfs_blob_wrapper tx_blob; + struct debugfs_blob_wrapper rx_blob; + u8 rx_data[128]; + u8 tx_data[128]; + u8 tty_status; + +#endif +}; + +static void caifdev_setup(struct net_device *dev); +static void ldisc_tx_wakeup(struct tty_struct *tty); +#ifdef CONFIG_DEBUG_FS +static inline void update_tty_status(struct ser_device *ser) +{ + ser->tty_status = + ser->tty->flow.stopped << 5 | + ser->tty->flow.tco_stopped << 3 | + ser->tty->ctrl.packet << 2; +} +static inline void debugfs_init(struct ser_device *ser, struct tty_struct *tty) +{ + ser->debugfs_tty_dir = debugfs_create_dir(tty->name, debugfsdir); + + debugfs_create_blob("last_tx_msg", 0400, ser->debugfs_tty_dir, + &ser->tx_blob); + + debugfs_create_blob("last_rx_msg", 0400, ser->debugfs_tty_dir, + &ser->rx_blob); + + debugfs_create_xul("ser_state", 0400, ser->debugfs_tty_dir, + &ser->state); + + debugfs_create_x8("tty_status", 0400, ser->debugfs_tty_dir, + &ser->tty_status); + + ser->tx_blob.data = ser->tx_data; + ser->tx_blob.size = 0; + ser->rx_blob.data = ser->rx_data; + ser->rx_blob.size = 0; +} + +static inline void debugfs_deinit(struct ser_device *ser) +{ + debugfs_remove_recursive(ser->debugfs_tty_dir); +} + +static inline void debugfs_rx(struct ser_device *ser, const u8 *data, int size) +{ + if (size > sizeof(ser->rx_data)) + size = sizeof(ser->rx_data); + memcpy(ser->rx_data, data, size); + ser->rx_blob.data = ser->rx_data; + ser->rx_blob.size = size; +} + +static inline void debugfs_tx(struct ser_device *ser, const u8 *data, int size) +{ + if (size > sizeof(ser->tx_data)) + size = sizeof(ser->tx_data); + memcpy(ser->tx_data, data, size); + ser->tx_blob.data = ser->tx_data; + ser->tx_blob.size = size; +} +#else +static inline void debugfs_init(struct ser_device *ser, struct tty_struct *tty) +{ +} + +static inline void debugfs_deinit(struct ser_device *ser) +{ +} + +static inline void update_tty_status(struct ser_device *ser) +{ +} + +static inline void debugfs_rx(struct ser_device *ser, const u8 *data, int size) +{ +} + +static inline void debugfs_tx(struct ser_device *ser, const u8 *data, int size) +{ +} + +#endif + +static void ldisc_receive(struct tty_struct *tty, const u8 *data, + const char *flags, int count) +{ + struct sk_buff *skb = NULL; + struct ser_device *ser; + int ret; + + ser = tty->disc_data; + + /* + * NOTE: flags may contain information about break or overrun. + * This is not yet handled. + */ + + + /* + * Workaround for garbage at start of transmission, + * only enable if STX handling is not enabled. + */ + if (!ser->common.use_stx && !ser->tx_started) { + dev_info(&ser->dev->dev, + "Bytes received before initial transmission -" + "bytes discarded.\n"); + return; + } + + BUG_ON(ser->dev == NULL); + + /* Get a suitable caif packet and copy in data. */ + skb = netdev_alloc_skb(ser->dev, count+1); + if (skb == NULL) + return; + skb_put_data(skb, data, count); + + skb->protocol = htons(ETH_P_CAIF); + skb_reset_mac_header(skb); + debugfs_rx(ser, data, count); + /* Push received packet up the stack. */ + ret = netif_rx(skb); + if (!ret) { + ser->dev->stats.rx_packets++; + ser->dev->stats.rx_bytes += count; + } else + ++ser->dev->stats.rx_dropped; + update_tty_status(ser); +} + +static int handle_tx(struct ser_device *ser) +{ + struct tty_struct *tty; + struct sk_buff *skb; + int tty_wr, len, room; + + tty = ser->tty; + ser->tx_started = true; + + /* Enter critical section */ + if (test_and_set_bit(CAIF_SENDING, &ser->state)) + return 0; + + /* skb_peek is safe because handle_tx is called after skb_queue_tail */ + while ((skb = skb_peek(&ser->head)) != NULL) { + + /* Make sure you don't write too much */ + len = skb->len; + room = tty_write_room(tty); + if (!room) + break; + if (room > ser_write_chunk) + room = ser_write_chunk; + if (len > room) + len = room; + + /* Write to tty or loopback */ + if (!ser_loop) { + tty_wr = tty->ops->write(tty, skb->data, len); + update_tty_status(ser); + } else { + tty_wr = len; + ldisc_receive(tty, skb->data, NULL, len); + } + ser->dev->stats.tx_packets++; + ser->dev->stats.tx_bytes += tty_wr; + + /* Error on TTY ?! */ + if (tty_wr < 0) + goto error; + /* Reduce buffer written, and discard if empty */ + skb_pull(skb, tty_wr); + if (skb->len == 0) { + struct sk_buff *tmp = skb_dequeue(&ser->head); + WARN_ON(tmp != skb); + dev_consume_skb_any(skb); + } + } + /* Send flow off if queue is empty */ + if (ser->head.qlen <= SEND_QUEUE_LOW && + test_and_clear_bit(CAIF_FLOW_OFF_SENT, &ser->state) && + ser->common.flowctrl != NULL) + ser->common.flowctrl(ser->dev, ON); + clear_bit(CAIF_SENDING, &ser->state); + return 0; +error: + clear_bit(CAIF_SENDING, &ser->state); + return tty_wr; +} + +static netdev_tx_t caif_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct ser_device *ser; + + ser = netdev_priv(dev); + + /* Send flow off once, on high water mark */ + if (ser->head.qlen > SEND_QUEUE_HIGH && + !test_and_set_bit(CAIF_FLOW_OFF_SENT, &ser->state) && + ser->common.flowctrl != NULL) + + ser->common.flowctrl(ser->dev, OFF); + + skb_queue_tail(&ser->head, skb); + return handle_tx(ser); +} + + +static void ldisc_tx_wakeup(struct tty_struct *tty) +{ + struct ser_device *ser; + + ser = tty->disc_data; + BUG_ON(ser == NULL); + WARN_ON(ser->tty != tty); + handle_tx(ser); +} + + +static void ser_release(struct work_struct *work) +{ + struct list_head list; + struct ser_device *ser, *tmp; + + spin_lock(&ser_lock); + list_replace_init(&ser_release_list, &list); + spin_unlock(&ser_lock); + + if (!list_empty(&list)) { + rtnl_lock(); + list_for_each_entry_safe(ser, tmp, &list, node) { + dev_close(ser->dev); + unregister_netdevice(ser->dev); + debugfs_deinit(ser); + } + rtnl_unlock(); + } +} + +static DECLARE_WORK(ser_release_work, ser_release); + +static int ldisc_open(struct tty_struct *tty) +{ + struct ser_device *ser; + struct net_device *dev; + char name[64]; + int result; + + /* No write no play */ + if (tty->ops->write == NULL) + return -EOPNOTSUPP; + if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_TTY_CONFIG)) + return -EPERM; + + /* release devices to avoid name collision */ + ser_release(NULL); + + result = snprintf(name, sizeof(name), "cf%s", tty->name); + if (result >= IFNAMSIZ) + return -EINVAL; + dev = alloc_netdev(sizeof(*ser), name, NET_NAME_UNKNOWN, + caifdev_setup); + if (!dev) + return -ENOMEM; + + ser = netdev_priv(dev); + ser->tty = tty_kref_get(tty); + ser->dev = dev; + debugfs_init(ser, tty); + tty->receive_room = N_TTY_BUF_SIZE; + tty->disc_data = ser; + set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); + rtnl_lock(); + result = register_netdevice(dev); + if (result) { + tty_kref_put(tty); + rtnl_unlock(); + free_netdev(dev); + return -ENODEV; + } + + spin_lock(&ser_lock); + list_add(&ser->node, &ser_list); + spin_unlock(&ser_lock); + rtnl_unlock(); + netif_stop_queue(dev); + update_tty_status(ser); + return 0; +} + +static void ldisc_close(struct tty_struct *tty) +{ + struct ser_device *ser = tty->disc_data; + + tty_kref_put(ser->tty); + + spin_lock(&ser_lock); + list_move(&ser->node, &ser_release_list); + spin_unlock(&ser_lock); + schedule_work(&ser_release_work); +} + +/* The line discipline structure. */ +static struct tty_ldisc_ops caif_ldisc = { + .owner = THIS_MODULE, + .num = N_CAIF, + .name = "n_caif", + .open = ldisc_open, + .close = ldisc_close, + .receive_buf = ldisc_receive, + .write_wakeup = ldisc_tx_wakeup +}; + +static const struct net_device_ops netdev_ops = { + .ndo_open = caif_net_open, + .ndo_stop = caif_net_close, + .ndo_start_xmit = caif_xmit +}; + +static void caifdev_setup(struct net_device *dev) +{ + struct ser_device *serdev = netdev_priv(dev); + + dev->features = 0; + dev->netdev_ops = &netdev_ops; + dev->type = ARPHRD_CAIF; + dev->flags = IFF_POINTOPOINT | IFF_NOARP; + dev->mtu = CAIF_MAX_MTU; + dev->priv_flags |= IFF_NO_QUEUE; + dev->needs_free_netdev = true; + skb_queue_head_init(&serdev->head); + serdev->common.link_select = CAIF_LINK_LOW_LATENCY; + serdev->common.use_frag = true; + serdev->common.use_stx = ser_use_stx; + serdev->common.use_fcs = ser_use_fcs; + serdev->dev = dev; +} + + +static int caif_net_open(struct net_device *dev) +{ + netif_wake_queue(dev); + return 0; +} + +static int caif_net_close(struct net_device *dev) +{ + netif_stop_queue(dev); + return 0; +} + +static int __init caif_ser_init(void) +{ + int ret; + + ret = tty_register_ldisc(&caif_ldisc); + if (ret < 0) + pr_err("cannot register CAIF ldisc=%d err=%d\n", N_CAIF, ret); + + debugfsdir = debugfs_create_dir("caif_serial", NULL); + return ret; +} + +static void __exit caif_ser_exit(void) +{ + spin_lock(&ser_lock); + list_splice(&ser_list, &ser_release_list); + spin_unlock(&ser_lock); + ser_release(NULL); + cancel_work_sync(&ser_release_work); + tty_unregister_ldisc(&caif_ldisc); + debugfs_remove_recursive(debugfsdir); +} + +module_init(caif_ser_init); +module_exit(caif_ser_exit); diff --git a/drivers/net/caif/caif_virtio.c b/drivers/net/caif/caif_virtio.c new file mode 100644 index 000000000..0b0f234b0 --- /dev/null +++ b/drivers/net/caif/caif_virtio.c @@ -0,0 +1,792 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) ST-Ericsson AB 2013 + * Authors: Vicram Arv + * Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no> + * Sjur Brendeland + */ +#include <linux/module.h> +#include <linux/if_arp.h> +#include <linux/virtio.h> +#include <linux/vringh.h> +#include <linux/debugfs.h> +#include <linux/spinlock.h> +#include <linux/genalloc.h> +#include <linux/interrupt.h> +#include <linux/netdevice.h> +#include <linux/rtnetlink.h> +#include <linux/virtio_ids.h> +#include <linux/virtio_caif.h> +#include <linux/virtio_ring.h> +#include <linux/dma-mapping.h> +#include <net/caif/caif_dev.h> +#include <linux/virtio_config.h> + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Vicram Arv"); +MODULE_AUTHOR("Sjur Brendeland"); +MODULE_DESCRIPTION("Virtio CAIF Driver"); + +/* NAPI schedule quota */ +#define CFV_DEFAULT_QUOTA 32 + +/* Defaults used if virtio config space is unavailable */ +#define CFV_DEF_MTU_SIZE 4096 +#define CFV_DEF_HEADROOM 32 +#define CFV_DEF_TAILROOM 32 + +/* Required IP header alignment */ +#define IP_HDR_ALIGN 4 + +/* struct cfv_napi_contxt - NAPI context info + * @riov: IOV holding data read from the ring. Note that riov may + * still hold data when cfv_rx_poll() returns. + * @head: Last descriptor ID we received from vringh_getdesc_kern. + * We use this to put descriptor back on the used ring. USHRT_MAX is + * used to indicate invalid head-id. + */ +struct cfv_napi_context { + struct vringh_kiov riov; + unsigned short head; +}; + +/* struct cfv_stats - statistics for debugfs + * @rx_napi_complete: Number of NAPI completions (RX) + * @rx_napi_resched: Number of calls where the full quota was used (RX) + * @rx_nomem: Number of SKB alloc failures (RX) + * @rx_kicks: Number of RX kicks + * @tx_full_ring: Number times TX ring was full + * @tx_no_mem: Number of times TX went out of memory + * @tx_flow_on: Number of flow on (TX) + * @tx_kicks: Number of TX kicks + */ +struct cfv_stats { + u32 rx_napi_complete; + u32 rx_napi_resched; + u32 rx_nomem; + u32 rx_kicks; + u32 tx_full_ring; + u32 tx_no_mem; + u32 tx_flow_on; + u32 tx_kicks; +}; + +/* struct cfv_info - Caif Virtio control structure + * @cfdev: caif common header + * @vdev: Associated virtio device + * @vr_rx: rx/downlink host vring + * @vq_tx: tx/uplink virtqueue + * @ndev: CAIF link layer device + * @watermark_tx: indicates number of free descriptors we need + * to reopen the tx-queues after overload. + * @tx_lock: protects vq_tx from concurrent use + * @tx_release_tasklet: Tasklet for freeing consumed TX buffers + * @napi: Napi context used in cfv_rx_poll() + * @ctx: Context data used in cfv_rx_poll() + * @tx_hr: transmit headroom + * @rx_hr: receive headroom + * @tx_tr: transmit tail room + * @rx_tr: receive tail room + * @mtu: transmit max size + * @mru: receive max size + * @allocsz: size of dma memory reserved for TX buffers + * @alloc_addr: virtual address to dma memory for TX buffers + * @alloc_dma: dma address to dma memory for TX buffers + * @genpool: Gen Pool used for allocating TX buffers + * @reserved_mem: Pointer to memory reserve allocated from genpool + * @reserved_size: Size of memory reserve allocated from genpool + * @stats: Statistics exposed in sysfs + * @debugfs: Debugfs dentry for statistic counters + */ +struct cfv_info { + struct caif_dev_common cfdev; + struct virtio_device *vdev; + struct vringh *vr_rx; + struct virtqueue *vq_tx; + struct net_device *ndev; + unsigned int watermark_tx; + /* Protect access to vq_tx */ + spinlock_t tx_lock; + struct tasklet_struct tx_release_tasklet; + struct napi_struct napi; + struct cfv_napi_context ctx; + u16 tx_hr; + u16 rx_hr; + u16 tx_tr; + u16 rx_tr; + u32 mtu; + u32 mru; + size_t allocsz; + void *alloc_addr; + dma_addr_t alloc_dma; + struct gen_pool *genpool; + unsigned long reserved_mem; + size_t reserved_size; + struct cfv_stats stats; + struct dentry *debugfs; +}; + +/* struct buf_info - maintains transmit buffer data handle + * @size: size of transmit buffer + * @dma_handle: handle to allocated dma device memory area + * @vaddr: virtual address mapping to allocated memory area + */ +struct buf_info { + size_t size; + u8 *vaddr; +}; + +/* Called from virtio device, in IRQ context */ +static void cfv_release_cb(struct virtqueue *vq_tx) +{ + struct cfv_info *cfv = vq_tx->vdev->priv; + + ++cfv->stats.tx_kicks; + tasklet_schedule(&cfv->tx_release_tasklet); +} + +static void free_buf_info(struct cfv_info *cfv, struct buf_info *buf_info) +{ + if (!buf_info) + return; + gen_pool_free(cfv->genpool, (unsigned long) buf_info->vaddr, + buf_info->size); + kfree(buf_info); +} + +/* This is invoked whenever the remote processor completed processing + * a TX msg we just sent, and the buffer is put back to the used ring. + */ +static void cfv_release_used_buf(struct virtqueue *vq_tx) +{ + struct cfv_info *cfv = vq_tx->vdev->priv; + unsigned long flags; + + BUG_ON(vq_tx != cfv->vq_tx); + + for (;;) { + unsigned int len; + struct buf_info *buf_info; + + /* Get used buffer from used ring to recycle used descriptors */ + spin_lock_irqsave(&cfv->tx_lock, flags); + buf_info = virtqueue_get_buf(vq_tx, &len); + spin_unlock_irqrestore(&cfv->tx_lock, flags); + + /* Stop looping if there are no more buffers to free */ + if (!buf_info) + break; + + free_buf_info(cfv, buf_info); + + /* watermark_tx indicates if we previously stopped the tx + * queues. If we have enough free stots in the virtio ring, + * re-establish memory reserved and open up tx queues. + */ + if (cfv->vq_tx->num_free <= cfv->watermark_tx) + continue; + + /* Re-establish memory reserve */ + if (cfv->reserved_mem == 0 && cfv->genpool) + cfv->reserved_mem = + gen_pool_alloc(cfv->genpool, + cfv->reserved_size); + + /* Open up the tx queues */ + if (cfv->reserved_mem) { + cfv->watermark_tx = + virtqueue_get_vring_size(cfv->vq_tx); + netif_tx_wake_all_queues(cfv->ndev); + /* Buffers are recycled in cfv_netdev_tx, so + * disable notifications when queues are opened. + */ + virtqueue_disable_cb(cfv->vq_tx); + ++cfv->stats.tx_flow_on; + } else { + /* if no memory reserve, wait for more free slots */ + WARN_ON(cfv->watermark_tx > + virtqueue_get_vring_size(cfv->vq_tx)); + cfv->watermark_tx += + virtqueue_get_vring_size(cfv->vq_tx) / 4; + } + } +} + +/* Allocate a SKB and copy packet data to it */ +static struct sk_buff *cfv_alloc_and_copy_skb(int *err, + struct cfv_info *cfv, + u8 *frm, u32 frm_len) +{ + struct sk_buff *skb; + u32 cfpkt_len, pad_len; + + *err = 0; + /* Verify that packet size with down-link header and mtu size */ + if (frm_len > cfv->mru || frm_len <= cfv->rx_hr + cfv->rx_tr) { + netdev_err(cfv->ndev, + "Invalid frmlen:%u mtu:%u hr:%d tr:%d\n", + frm_len, cfv->mru, cfv->rx_hr, + cfv->rx_tr); + *err = -EPROTO; + return NULL; + } + + cfpkt_len = frm_len - (cfv->rx_hr + cfv->rx_tr); + pad_len = (unsigned long)(frm + cfv->rx_hr) & (IP_HDR_ALIGN - 1); + + skb = netdev_alloc_skb(cfv->ndev, frm_len + pad_len); + if (!skb) { + *err = -ENOMEM; + return NULL; + } + + skb_reserve(skb, cfv->rx_hr + pad_len); + + skb_put_data(skb, frm + cfv->rx_hr, cfpkt_len); + return skb; +} + +/* Get packets from the host vring */ +static int cfv_rx_poll(struct napi_struct *napi, int quota) +{ + struct cfv_info *cfv = container_of(napi, struct cfv_info, napi); + int rxcnt = 0; + int err = 0; + void *buf; + struct sk_buff *skb; + struct vringh_kiov *riov = &cfv->ctx.riov; + unsigned int skb_len; + + do { + skb = NULL; + + /* Put the previous iovec back on the used ring and + * fetch a new iovec if we have processed all elements. + */ + if (riov->i == riov->used) { + if (cfv->ctx.head != USHRT_MAX) { + vringh_complete_kern(cfv->vr_rx, + cfv->ctx.head, + 0); + cfv->ctx.head = USHRT_MAX; + } + + err = vringh_getdesc_kern( + cfv->vr_rx, + riov, + NULL, + &cfv->ctx.head, + GFP_ATOMIC); + + if (err <= 0) + goto exit; + } + + buf = phys_to_virt((unsigned long) riov->iov[riov->i].iov_base); + /* TODO: Add check on valid buffer address */ + + skb = cfv_alloc_and_copy_skb(&err, cfv, buf, + riov->iov[riov->i].iov_len); + if (unlikely(err)) + goto exit; + + /* Push received packet up the stack. */ + skb_len = skb->len; + skb->protocol = htons(ETH_P_CAIF); + skb_reset_mac_header(skb); + skb->dev = cfv->ndev; + err = netif_receive_skb(skb); + if (unlikely(err)) { + ++cfv->ndev->stats.rx_dropped; + } else { + ++cfv->ndev->stats.rx_packets; + cfv->ndev->stats.rx_bytes += skb_len; + } + + ++riov->i; + ++rxcnt; + } while (rxcnt < quota); + + ++cfv->stats.rx_napi_resched; + goto out; + +exit: + switch (err) { + case 0: + ++cfv->stats.rx_napi_complete; + + /* Really out of packets? (stolen from virtio_net)*/ + napi_complete(napi); + if (unlikely(!vringh_notify_enable_kern(cfv->vr_rx)) && + napi_schedule_prep(napi)) { + vringh_notify_disable_kern(cfv->vr_rx); + __napi_schedule(napi); + } + break; + + case -ENOMEM: + ++cfv->stats.rx_nomem; + dev_kfree_skb(skb); + /* Stop NAPI poll on OOM, we hope to be polled later */ + napi_complete(napi); + vringh_notify_enable_kern(cfv->vr_rx); + break; + + default: + /* We're doomed, any modem fault is fatal */ + netdev_warn(cfv->ndev, "Bad ring, disable device\n"); + cfv->ndev->stats.rx_dropped = riov->used - riov->i; + napi_complete(napi); + vringh_notify_disable_kern(cfv->vr_rx); + netif_carrier_off(cfv->ndev); + break; + } +out: + if (rxcnt && vringh_need_notify_kern(cfv->vr_rx) > 0) + vringh_notify(cfv->vr_rx); + return rxcnt; +} + +static void cfv_recv(struct virtio_device *vdev, struct vringh *vr_rx) +{ + struct cfv_info *cfv = vdev->priv; + + ++cfv->stats.rx_kicks; + vringh_notify_disable_kern(cfv->vr_rx); + napi_schedule(&cfv->napi); +} + +static void cfv_destroy_genpool(struct cfv_info *cfv) +{ + if (cfv->alloc_addr) + dma_free_coherent(cfv->vdev->dev.parent->parent, + cfv->allocsz, cfv->alloc_addr, + cfv->alloc_dma); + + if (!cfv->genpool) + return; + gen_pool_free(cfv->genpool, cfv->reserved_mem, + cfv->reserved_size); + gen_pool_destroy(cfv->genpool); + cfv->genpool = NULL; +} + +static int cfv_create_genpool(struct cfv_info *cfv) +{ + int err; + + /* dma_alloc can only allocate whole pages, and we need a more + * fine graned allocation so we use genpool. We ask for space needed + * by IP and a full ring. If the dma allcoation fails we retry with a + * smaller allocation size. + */ + err = -ENOMEM; + cfv->allocsz = (virtqueue_get_vring_size(cfv->vq_tx) * + (ETH_DATA_LEN + cfv->tx_hr + cfv->tx_tr) * 11)/10; + if (cfv->allocsz <= (num_possible_cpus() + 1) * cfv->ndev->mtu) + return -EINVAL; + + for (;;) { + if (cfv->allocsz <= num_possible_cpus() * cfv->ndev->mtu) { + netdev_info(cfv->ndev, "Not enough device memory\n"); + return -ENOMEM; + } + + cfv->alloc_addr = dma_alloc_coherent( + cfv->vdev->dev.parent->parent, + cfv->allocsz, &cfv->alloc_dma, + GFP_ATOMIC); + if (cfv->alloc_addr) + break; + + cfv->allocsz = (cfv->allocsz * 3) >> 2; + } + + netdev_dbg(cfv->ndev, "Allocated %zd bytes from dma-memory\n", + cfv->allocsz); + + /* Allocate on 128 bytes boundaries (1 << 7)*/ + cfv->genpool = gen_pool_create(7, -1); + if (!cfv->genpool) + goto err; + + err = gen_pool_add_virt(cfv->genpool, (unsigned long)cfv->alloc_addr, + (phys_addr_t)virt_to_phys(cfv->alloc_addr), + cfv->allocsz, -1); + if (err) + goto err; + + /* Reserve some memory for low memory situations. If we hit the roof + * in the memory pool, we stop TX flow and release the reserve. + */ + cfv->reserved_size = num_possible_cpus() * cfv->ndev->mtu; + cfv->reserved_mem = gen_pool_alloc(cfv->genpool, + cfv->reserved_size); + if (!cfv->reserved_mem) { + err = -ENOMEM; + goto err; + } + + cfv->watermark_tx = virtqueue_get_vring_size(cfv->vq_tx); + return 0; +err: + cfv_destroy_genpool(cfv); + return err; +} + +/* Enable the CAIF interface and allocate the memory-pool */ +static int cfv_netdev_open(struct net_device *netdev) +{ + struct cfv_info *cfv = netdev_priv(netdev); + + if (cfv_create_genpool(cfv)) + return -ENOMEM; + + netif_carrier_on(netdev); + napi_enable(&cfv->napi); + + /* Schedule NAPI to read any pending packets */ + napi_schedule(&cfv->napi); + return 0; +} + +/* Disable the CAIF interface and free the memory-pool */ +static int cfv_netdev_close(struct net_device *netdev) +{ + struct cfv_info *cfv = netdev_priv(netdev); + unsigned long flags; + struct buf_info *buf_info; + + /* Disable interrupts, queues and NAPI polling */ + netif_carrier_off(netdev); + virtqueue_disable_cb(cfv->vq_tx); + vringh_notify_disable_kern(cfv->vr_rx); + napi_disable(&cfv->napi); + + /* Release any TX buffers on both used and available rings */ + cfv_release_used_buf(cfv->vq_tx); + spin_lock_irqsave(&cfv->tx_lock, flags); + while ((buf_info = virtqueue_detach_unused_buf(cfv->vq_tx))) + free_buf_info(cfv, buf_info); + spin_unlock_irqrestore(&cfv->tx_lock, flags); + + /* Release all dma allocated memory and destroy the pool */ + cfv_destroy_genpool(cfv); + return 0; +} + +/* Allocate a buffer in dma-memory and copy skb to it */ +static struct buf_info *cfv_alloc_and_copy_to_shm(struct cfv_info *cfv, + struct sk_buff *skb, + struct scatterlist *sg) +{ + struct caif_payload_info *info = (void *)&skb->cb; + struct buf_info *buf_info = NULL; + u8 pad_len, hdr_ofs; + + if (!cfv->genpool) + goto err; + + if (unlikely(cfv->tx_hr + skb->len + cfv->tx_tr > cfv->mtu)) { + netdev_warn(cfv->ndev, "Invalid packet len (%d > %d)\n", + cfv->tx_hr + skb->len + cfv->tx_tr, cfv->mtu); + goto err; + } + + buf_info = kmalloc(sizeof(struct buf_info), GFP_ATOMIC); + if (unlikely(!buf_info)) + goto err; + + /* Make the IP header aligned in the buffer */ + hdr_ofs = cfv->tx_hr + info->hdr_len; + pad_len = hdr_ofs & (IP_HDR_ALIGN - 1); + buf_info->size = cfv->tx_hr + skb->len + cfv->tx_tr + pad_len; + + /* allocate dma memory buffer */ + buf_info->vaddr = (void *)gen_pool_alloc(cfv->genpool, buf_info->size); + if (unlikely(!buf_info->vaddr)) + goto err; + + /* copy skbuf contents to send buffer */ + skb_copy_bits(skb, 0, buf_info->vaddr + cfv->tx_hr + pad_len, skb->len); + sg_init_one(sg, buf_info->vaddr + pad_len, + skb->len + cfv->tx_hr + cfv->rx_hr); + + return buf_info; +err: + kfree(buf_info); + return NULL; +} + +/* Put the CAIF packet on the virtio ring and kick the receiver */ +static netdev_tx_t cfv_netdev_tx(struct sk_buff *skb, struct net_device *netdev) +{ + struct cfv_info *cfv = netdev_priv(netdev); + struct buf_info *buf_info; + struct scatterlist sg; + unsigned long flags; + bool flow_off = false; + int ret; + + /* garbage collect released buffers */ + cfv_release_used_buf(cfv->vq_tx); + spin_lock_irqsave(&cfv->tx_lock, flags); + + /* Flow-off check takes into account number of cpus to make sure + * virtqueue will not be overfilled in any possible smp conditions. + * + * Flow-on is triggered when sufficient buffers are freed + */ + if (unlikely(cfv->vq_tx->num_free <= num_present_cpus())) { + flow_off = true; + cfv->stats.tx_full_ring++; + } + + /* If we run out of memory, we release the memory reserve and retry + * allocation. + */ + buf_info = cfv_alloc_and_copy_to_shm(cfv, skb, &sg); + if (unlikely(!buf_info)) { + cfv->stats.tx_no_mem++; + flow_off = true; + + if (cfv->reserved_mem && cfv->genpool) { + gen_pool_free(cfv->genpool, cfv->reserved_mem, + cfv->reserved_size); + cfv->reserved_mem = 0; + buf_info = cfv_alloc_and_copy_to_shm(cfv, skb, &sg); + } + } + + if (unlikely(flow_off)) { + /* Turn flow on when a 1/4 of the descriptors are released */ + cfv->watermark_tx = virtqueue_get_vring_size(cfv->vq_tx) / 4; + /* Enable notifications of recycled TX buffers */ + virtqueue_enable_cb(cfv->vq_tx); + netif_tx_stop_all_queues(netdev); + } + + if (unlikely(!buf_info)) { + /* If the memory reserve does it's job, this shouldn't happen */ + netdev_warn(cfv->ndev, "Out of gen_pool memory\n"); + goto err; + } + + ret = virtqueue_add_outbuf(cfv->vq_tx, &sg, 1, buf_info, GFP_ATOMIC); + if (unlikely((ret < 0))) { + /* If flow control works, this shouldn't happen */ + netdev_warn(cfv->ndev, "Failed adding buffer to TX vring:%d\n", + ret); + goto err; + } + + /* update netdev statistics */ + cfv->ndev->stats.tx_packets++; + cfv->ndev->stats.tx_bytes += skb->len; + spin_unlock_irqrestore(&cfv->tx_lock, flags); + + /* tell the remote processor it has a pending message to read */ + virtqueue_kick(cfv->vq_tx); + + dev_kfree_skb(skb); + return NETDEV_TX_OK; +err: + spin_unlock_irqrestore(&cfv->tx_lock, flags); + cfv->ndev->stats.tx_dropped++; + free_buf_info(cfv, buf_info); + dev_kfree_skb(skb); + return NETDEV_TX_OK; +} + +static void cfv_tx_release_tasklet(struct tasklet_struct *t) +{ + struct cfv_info *cfv = from_tasklet(cfv, t, tx_release_tasklet); + cfv_release_used_buf(cfv->vq_tx); +} + +static const struct net_device_ops cfv_netdev_ops = { + .ndo_open = cfv_netdev_open, + .ndo_stop = cfv_netdev_close, + .ndo_start_xmit = cfv_netdev_tx, +}; + +static void cfv_netdev_setup(struct net_device *netdev) +{ + netdev->netdev_ops = &cfv_netdev_ops; + netdev->type = ARPHRD_CAIF; + netdev->tx_queue_len = 100; + netdev->flags = IFF_POINTOPOINT | IFF_NOARP; + netdev->mtu = CFV_DEF_MTU_SIZE; + netdev->needs_free_netdev = true; +} + +/* Create debugfs counters for the device */ +static inline void debugfs_init(struct cfv_info *cfv) +{ + cfv->debugfs = debugfs_create_dir(netdev_name(cfv->ndev), NULL); + + debugfs_create_u32("rx-napi-complete", 0400, cfv->debugfs, + &cfv->stats.rx_napi_complete); + debugfs_create_u32("rx-napi-resched", 0400, cfv->debugfs, + &cfv->stats.rx_napi_resched); + debugfs_create_u32("rx-nomem", 0400, cfv->debugfs, + &cfv->stats.rx_nomem); + debugfs_create_u32("rx-kicks", 0400, cfv->debugfs, + &cfv->stats.rx_kicks); + debugfs_create_u32("tx-full-ring", 0400, cfv->debugfs, + &cfv->stats.tx_full_ring); + debugfs_create_u32("tx-no-mem", 0400, cfv->debugfs, + &cfv->stats.tx_no_mem); + debugfs_create_u32("tx-kicks", 0400, cfv->debugfs, + &cfv->stats.tx_kicks); + debugfs_create_u32("tx-flow-on", 0400, cfv->debugfs, + &cfv->stats.tx_flow_on); +} + +/* Setup CAIF for the a virtio device */ +static int cfv_probe(struct virtio_device *vdev) +{ + vq_callback_t *vq_cbs = cfv_release_cb; + vrh_callback_t *vrh_cbs = cfv_recv; + const char *names = "output"; + const char *cfv_netdev_name = "cfvrt"; + struct net_device *netdev; + struct cfv_info *cfv; + int err; + + netdev = alloc_netdev(sizeof(struct cfv_info), cfv_netdev_name, + NET_NAME_UNKNOWN, cfv_netdev_setup); + if (!netdev) + return -ENOMEM; + + cfv = netdev_priv(netdev); + cfv->vdev = vdev; + cfv->ndev = netdev; + + spin_lock_init(&cfv->tx_lock); + + /* Get the RX virtio ring. This is a "host side vring". */ + err = -ENODEV; + if (!vdev->vringh_config || !vdev->vringh_config->find_vrhs) + goto err; + + err = vdev->vringh_config->find_vrhs(vdev, 1, &cfv->vr_rx, &vrh_cbs); + if (err) + goto err; + + /* Get the TX virtio ring. This is a "guest side vring". */ + err = virtio_find_vqs(vdev, 1, &cfv->vq_tx, &vq_cbs, &names, NULL); + if (err) + goto err; + + /* Get the CAIF configuration from virtio config space, if available */ + if (vdev->config->get) { + virtio_cread(vdev, struct virtio_caif_transf_config, headroom, + &cfv->tx_hr); + virtio_cread(vdev, struct virtio_caif_transf_config, headroom, + &cfv->rx_hr); + virtio_cread(vdev, struct virtio_caif_transf_config, tailroom, + &cfv->tx_tr); + virtio_cread(vdev, struct virtio_caif_transf_config, tailroom, + &cfv->rx_tr); + virtio_cread(vdev, struct virtio_caif_transf_config, mtu, + &cfv->mtu); + virtio_cread(vdev, struct virtio_caif_transf_config, mtu, + &cfv->mru); + } else { + cfv->tx_hr = CFV_DEF_HEADROOM; + cfv->rx_hr = CFV_DEF_HEADROOM; + cfv->tx_tr = CFV_DEF_TAILROOM; + cfv->rx_tr = CFV_DEF_TAILROOM; + cfv->mtu = CFV_DEF_MTU_SIZE; + cfv->mru = CFV_DEF_MTU_SIZE; + } + + netdev->needed_headroom = cfv->tx_hr; + netdev->needed_tailroom = cfv->tx_tr; + + /* Disable buffer release interrupts unless we have stopped TX queues */ + virtqueue_disable_cb(cfv->vq_tx); + + netdev->mtu = cfv->mtu - cfv->tx_tr; + vdev->priv = cfv; + + /* Initialize NAPI poll context data */ + vringh_kiov_init(&cfv->ctx.riov, NULL, 0); + cfv->ctx.head = USHRT_MAX; + netif_napi_add_weight(netdev, &cfv->napi, cfv_rx_poll, + CFV_DEFAULT_QUOTA); + + tasklet_setup(&cfv->tx_release_tasklet, cfv_tx_release_tasklet); + + /* Carrier is off until netdevice is opened */ + netif_carrier_off(netdev); + + /* serialize netdev register + virtio_device_ready() with ndo_open() */ + rtnl_lock(); + + /* register Netdev */ + err = register_netdevice(netdev); + if (err) { + rtnl_unlock(); + dev_err(&vdev->dev, "Unable to register netdev (%d)\n", err); + goto err; + } + + virtio_device_ready(vdev); + + rtnl_unlock(); + + debugfs_init(cfv); + + return 0; +err: + netdev_warn(cfv->ndev, "CAIF Virtio probe failed:%d\n", err); + + if (cfv->vr_rx) + vdev->vringh_config->del_vrhs(cfv->vdev); + if (cfv->vdev) + vdev->config->del_vqs(cfv->vdev); + free_netdev(netdev); + return err; +} + +static void cfv_remove(struct virtio_device *vdev) +{ + struct cfv_info *cfv = vdev->priv; + + rtnl_lock(); + dev_close(cfv->ndev); + rtnl_unlock(); + + tasklet_kill(&cfv->tx_release_tasklet); + debugfs_remove_recursive(cfv->debugfs); + + vringh_kiov_cleanup(&cfv->ctx.riov); + virtio_reset_device(vdev); + vdev->vringh_config->del_vrhs(cfv->vdev); + cfv->vr_rx = NULL; + vdev->config->del_vqs(cfv->vdev); + unregister_netdev(cfv->ndev); +} + +static struct virtio_device_id id_table[] = { + { VIRTIO_ID_CAIF, VIRTIO_DEV_ANY_ID }, + { 0 }, +}; + +static unsigned int features[] = { +}; + +static struct virtio_driver caif_virtio_driver = { + .feature_table = features, + .feature_table_size = ARRAY_SIZE(features), + .driver.name = KBUILD_MODNAME, + .driver.owner = THIS_MODULE, + .id_table = id_table, + .probe = cfv_probe, + .remove = cfv_remove, +}; + +module_virtio_driver(caif_virtio_driver); +MODULE_DEVICE_TABLE(virtio, id_table); |