summaryrefslogtreecommitdiffstats
path: root/drivers/net/xen-netback/netback.c
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-08 04:15:07 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-08 04:15:07 +0000
commit7fd92ba31ac1c688b59aa93cc03b748f920df8fe (patch)
treebaa580b97c260c790730e2525483eb1953f3c39d /drivers/net/xen-netback/netback.c
parentAdding upstream version 4.19.269. (diff)
downloadlinux-7fd92ba31ac1c688b59aa93cc03b748f920df8fe.tar.xz
linux-7fd92ba31ac1c688b59aa93cc03b748f920df8fe.zip
Adding upstream version 4.19.282.upstream/4.19.282
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/net/xen-netback/netback.c')
-rw-r--r--drivers/net/xen-netback/netback.c31
1 files changed, 25 insertions, 6 deletions
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index fc389f2bb..d2b79d7c0 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -327,6 +327,7 @@ static int xenvif_count_requests(struct xenvif_queue *queue,
struct xenvif_tx_cb {
u16 copy_pending_idx[XEN_NETBK_LEGACY_SLOTS_MAX + 1];
u8 copy_count;
+ u32 split_mask;
};
#define XENVIF_TX_CB(skb) ((struct xenvif_tx_cb *)(skb)->cb)
@@ -354,6 +355,8 @@ static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
struct sk_buff *skb =
alloc_skb(size + NET_SKB_PAD + NET_IP_ALIGN,
GFP_ATOMIC | __GFP_NOWARN);
+
+ BUILD_BUG_ON(sizeof(*XENVIF_TX_CB(skb)) > sizeof(skb->cb));
if (unlikely(skb == NULL))
return NULL;
@@ -389,11 +392,13 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
nr_slots = shinfo->nr_frags + 1;
copy_count(skb) = 0;
+ XENVIF_TX_CB(skb)->split_mask = 0;
/* Create copy ops for exactly data_len bytes into the skb head. */
__skb_put(skb, data_len);
while (data_len > 0) {
int amount = data_len > txp->size ? txp->size : data_len;
+ bool split = false;
cop->source.u.ref = txp->gref;
cop->source.domid = queue->vif->domid;
@@ -406,6 +411,13 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
cop->dest.u.gmfn = virt_to_gfn(skb->data + skb_headlen(skb)
- data_len);
+ /* Don't cross local page boundary! */
+ if (cop->dest.offset + amount > XEN_PAGE_SIZE) {
+ amount = XEN_PAGE_SIZE - cop->dest.offset;
+ XENVIF_TX_CB(skb)->split_mask |= 1U << copy_count(skb);
+ split = true;
+ }
+
cop->len = amount;
cop->flags = GNTCOPY_source_gref;
@@ -413,7 +425,8 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
pending_idx = queue->pending_ring[index];
callback_param(queue, pending_idx).ctx = NULL;
copy_pending_idx(skb, copy_count(skb)) = pending_idx;
- copy_count(skb)++;
+ if (!split)
+ copy_count(skb)++;
cop++;
data_len -= amount;
@@ -434,7 +447,8 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
nr_slots--;
} else {
/* The copy op partially covered the tx_request.
- * The remainder will be mapped.
+ * The remainder will be mapped or copied in the next
+ * iteration.
*/
txp->offset += amount;
txp->size -= amount;
@@ -532,6 +546,13 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
pending_idx = copy_pending_idx(skb, i);
newerr = (*gopp_copy)->status;
+
+ /* Split copies need to be handled together. */
+ if (XENVIF_TX_CB(skb)->split_mask & (1U << i)) {
+ (*gopp_copy)++;
+ if (!newerr)
+ newerr = (*gopp_copy)->status;
+ }
if (likely(!newerr)) {
/* The first frag might still have this slot mapped */
if (i < copy_count(skb) - 1 || !sharedslot)
@@ -968,10 +989,8 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
/* No crossing a page as the payload mustn't fragment. */
if (unlikely((txreq.offset + txreq.size) > XEN_PAGE_SIZE)) {
- netdev_err(queue->vif->dev,
- "txreq.offset: %u, size: %u, end: %lu\n",
- txreq.offset, txreq.size,
- (unsigned long)(txreq.offset&~XEN_PAGE_MASK) + txreq.size);
+ netdev_err(queue->vif->dev, "Cross page boundary, txreq.offset: %u, size: %u\n",
+ txreq.offset, txreq.size);
xenvif_fatal_tx_err(queue->vif);
break;
}