summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/google/gve
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/google/gve')
-rw-r--r--drivers/net/ethernet/google/gve/gve.h8
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.c88
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.h3
-rw-r--r--drivers/net/ethernet/google/gve/gve_dqo.h3
-rw-r--r--drivers/net/ethernet/google/gve/gve_ethtool.c2
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c17
-rw-r--r--drivers/net/ethernet/google/gve/gve_register.h9
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx.c17
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx.c2
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx_dqo.c37
10 files changed, 112 insertions, 74 deletions
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
index 0d1e681be2..b803491546 100644
--- a/drivers/net/ethernet/google/gve/gve.h
+++ b/drivers/net/ethernet/google/gve/gve.h
@@ -8,6 +8,7 @@
#define _GVE_H_
#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/u64_stats_sync.h>
@@ -41,12 +42,16 @@
#define NIC_TX_STATS_REPORT_NUM 0
#define NIC_RX_STATS_REPORT_NUM 4
+#define GVE_ADMINQ_BUFFER_SIZE 4096
+
#define GVE_DATA_SLOT_ADDR_PAGE_MASK (~(PAGE_SIZE - 1))
/* PTYPEs are always 10 bits. */
#define GVE_NUM_PTYPES 1024
-#define GVE_RX_BUFFER_SIZE_DQO 2048
+#define GVE_DEFAULT_RX_BUFFER_SIZE 2048
+
+#define GVE_DEFAULT_RX_BUFFER_OFFSET 2048
#define GVE_XDP_ACTIONS 5
@@ -672,6 +677,7 @@ struct gve_priv {
/* Admin queue - see gve_adminq.h*/
union gve_adminq_command *adminq;
dma_addr_t adminq_bus_addr;
+ struct dma_pool *adminq_pool;
u32 adminq_mask; /* masks prod_cnt to adminq size */
u32 adminq_prod_cnt; /* free-running count of AQ cmds executed */
u32 adminq_cmd_fail; /* free-running count of AQ cmds failed */
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c
index 79db7a6d42..12fbd723ec 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.c
+++ b/drivers/net/ethernet/google/gve/gve_adminq.c
@@ -194,12 +194,19 @@ gve_process_device_options(struct gve_priv *priv,
int gve_adminq_alloc(struct device *dev, struct gve_priv *priv)
{
- priv->adminq = dma_alloc_coherent(dev, PAGE_SIZE,
- &priv->adminq_bus_addr, GFP_KERNEL);
- if (unlikely(!priv->adminq))
+ priv->adminq_pool = dma_pool_create("adminq_pool", dev,
+ GVE_ADMINQ_BUFFER_SIZE, 0, 0);
+ if (unlikely(!priv->adminq_pool))
return -ENOMEM;
+ priv->adminq = dma_pool_alloc(priv->adminq_pool, GFP_KERNEL,
+ &priv->adminq_bus_addr);
+ if (unlikely(!priv->adminq)) {
+ dma_pool_destroy(priv->adminq_pool);
+ return -ENOMEM;
+ }
- priv->adminq_mask = (PAGE_SIZE / sizeof(union gve_adminq_command)) - 1;
+ priv->adminq_mask =
+ (GVE_ADMINQ_BUFFER_SIZE / sizeof(union gve_adminq_command)) - 1;
priv->adminq_prod_cnt = 0;
priv->adminq_cmd_fail = 0;
priv->adminq_timeouts = 0;
@@ -218,9 +225,20 @@ int gve_adminq_alloc(struct device *dev, struct gve_priv *priv)
priv->adminq_get_ptype_map_cnt = 0;
/* Setup Admin queue with the device */
- iowrite32be(priv->adminq_bus_addr / PAGE_SIZE,
- &priv->reg_bar0->adminq_pfn);
-
+ if (priv->pdev->revision < 0x1) {
+ iowrite32be(priv->adminq_bus_addr / PAGE_SIZE,
+ &priv->reg_bar0->adminq_pfn);
+ } else {
+ iowrite16be(GVE_ADMINQ_BUFFER_SIZE,
+ &priv->reg_bar0->adminq_length);
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ iowrite32be(priv->adminq_bus_addr >> 32,
+ &priv->reg_bar0->adminq_base_address_hi);
+#endif
+ iowrite32be(priv->adminq_bus_addr,
+ &priv->reg_bar0->adminq_base_address_lo);
+ iowrite32be(GVE_DRIVER_STATUS_RUN_MASK, &priv->reg_bar0->driver_status);
+ }
gve_set_admin_queue_ok(priv);
return 0;
}
@@ -230,16 +248,27 @@ void gve_adminq_release(struct gve_priv *priv)
int i = 0;
/* Tell the device the adminq is leaving */
- iowrite32be(0x0, &priv->reg_bar0->adminq_pfn);
- while (ioread32be(&priv->reg_bar0->adminq_pfn)) {
- /* If this is reached the device is unrecoverable and still
- * holding memory. Continue looping to avoid memory corruption,
- * but WARN so it is visible what is going on.
- */
- if (i == GVE_MAX_ADMINQ_RELEASE_CHECK)
- WARN(1, "Unrecoverable platform error!");
- i++;
- msleep(GVE_ADMINQ_SLEEP_LEN);
+ if (priv->pdev->revision < 0x1) {
+ iowrite32be(0x0, &priv->reg_bar0->adminq_pfn);
+ while (ioread32be(&priv->reg_bar0->adminq_pfn)) {
+ /* If this is reached the device is unrecoverable and still
+ * holding memory. Continue looping to avoid memory corruption,
+ * but WARN so it is visible what is going on.
+ */
+ if (i == GVE_MAX_ADMINQ_RELEASE_CHECK)
+ WARN(1, "Unrecoverable platform error!");
+ i++;
+ msleep(GVE_ADMINQ_SLEEP_LEN);
+ }
+ } else {
+ iowrite32be(GVE_DRIVER_STATUS_RESET_MASK, &priv->reg_bar0->driver_status);
+ while (!(ioread32be(&priv->reg_bar0->device_status)
+ & GVE_DEVICE_STATUS_DEVICE_IS_RESET)) {
+ if (i == GVE_MAX_ADMINQ_RELEASE_CHECK)
+ WARN(1, "Unrecoverable platform error!");
+ i++;
+ msleep(GVE_ADMINQ_SLEEP_LEN);
+ }
}
gve_clear_device_rings_ok(priv);
gve_clear_device_resources_ok(priv);
@@ -251,7 +280,8 @@ void gve_adminq_free(struct device *dev, struct gve_priv *priv)
if (!gve_get_admin_queue_ok(priv))
return;
gve_adminq_release(priv);
- dma_free_coherent(dev, PAGE_SIZE, priv->adminq, priv->adminq_bus_addr);
+ dma_pool_free(priv->adminq_pool, priv->adminq, priv->adminq_bus_addr);
+ dma_pool_destroy(priv->adminq_pool);
gve_clear_admin_queue_ok(priv);
}
@@ -697,18 +727,7 @@ static int gve_set_desc_cnt(struct gve_priv *priv,
struct gve_device_descriptor *descriptor)
{
priv->tx_desc_cnt = be16_to_cpu(descriptor->tx_queue_entries);
- if (priv->tx_desc_cnt * sizeof(priv->tx->desc[0]) < PAGE_SIZE) {
- dev_err(&priv->pdev->dev, "Tx desc count %d too low\n",
- priv->tx_desc_cnt);
- return -EINVAL;
- }
priv->rx_desc_cnt = be16_to_cpu(descriptor->rx_queue_entries);
- if (priv->rx_desc_cnt * sizeof(priv->rx->desc.desc_ring[0])
- < PAGE_SIZE) {
- dev_err(&priv->pdev->dev, "Rx desc count %d too low\n",
- priv->rx_desc_cnt);
- return -EINVAL;
- }
return 0;
}
@@ -778,8 +797,8 @@ int gve_adminq_describe_device(struct gve_priv *priv)
u16 mtu;
memset(&cmd, 0, sizeof(cmd));
- descriptor = dma_alloc_coherent(&priv->pdev->dev, PAGE_SIZE,
- &descriptor_bus, GFP_KERNEL);
+ descriptor = dma_pool_alloc(priv->adminq_pool, GFP_KERNEL,
+ &descriptor_bus);
if (!descriptor)
return -ENOMEM;
cmd.opcode = cpu_to_be32(GVE_ADMINQ_DESCRIBE_DEVICE);
@@ -787,7 +806,8 @@ int gve_adminq_describe_device(struct gve_priv *priv)
cpu_to_be64(descriptor_bus);
cmd.describe_device.device_descriptor_version =
cpu_to_be32(GVE_ADMINQ_DEVICE_DESCRIPTOR_VERSION);
- cmd.describe_device.available_length = cpu_to_be32(PAGE_SIZE);
+ cmd.describe_device.available_length =
+ cpu_to_be32(GVE_ADMINQ_BUFFER_SIZE);
err = gve_adminq_execute_cmd(priv, &cmd);
if (err)
@@ -868,8 +888,7 @@ int gve_adminq_describe_device(struct gve_priv *priv)
dev_op_jumbo_frames, dev_op_dqo_qpl);
free_device_descriptor:
- dma_free_coherent(&priv->pdev->dev, PAGE_SIZE, descriptor,
- descriptor_bus);
+ dma_pool_free(priv->adminq_pool, descriptor, descriptor_bus);
return err;
}
@@ -898,6 +917,7 @@ int gve_adminq_register_page_list(struct gve_priv *priv,
.page_list_id = cpu_to_be32(qpl->id),
.num_pages = cpu_to_be32(num_entries),
.page_address_list_addr = cpu_to_be64(page_list_bus),
+ .page_size = cpu_to_be64(PAGE_SIZE),
};
err = gve_adminq_execute_cmd(priv, &cmd);
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.h b/drivers/net/ethernet/google/gve/gve_adminq.h
index 38a22279e8..5865ccdccb 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.h
+++ b/drivers/net/ethernet/google/gve/gve_adminq.h
@@ -219,9 +219,10 @@ struct gve_adminq_register_page_list {
__be32 page_list_id;
__be32 num_pages;
__be64 page_address_list_addr;
+ __be64 page_size;
};
-static_assert(sizeof(struct gve_adminq_register_page_list) == 16);
+static_assert(sizeof(struct gve_adminq_register_page_list) == 24);
struct gve_adminq_unregister_page_list {
__be32 page_list_id;
diff --git a/drivers/net/ethernet/google/gve/gve_dqo.h b/drivers/net/ethernet/google/gve/gve_dqo.h
index 1eb4d5fd85..c36b93f0de 100644
--- a/drivers/net/ethernet/google/gve/gve_dqo.h
+++ b/drivers/net/ethernet/google/gve/gve_dqo.h
@@ -33,6 +33,9 @@
#define GVE_DEALLOCATE_COMPL_TIMEOUT 60
netdev_tx_t gve_tx_dqo(struct sk_buff *skb, struct net_device *dev);
+netdev_features_t gve_features_check_dqo(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features);
bool gve_tx_poll_dqo(struct gve_notify_block *block, bool do_clean);
int gve_rx_poll_dqo(struct gve_notify_block *block, int budget);
int gve_tx_alloc_rings_dqo(struct gve_priv *priv);
diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c
index 233e594690..e5397aa1e4 100644
--- a/drivers/net/ethernet/google/gve/gve_ethtool.c
+++ b/drivers/net/ethernet/google/gve/gve_ethtool.c
@@ -519,7 +519,7 @@ static int gve_set_tunable(struct net_device *netdev,
case ETHTOOL_RX_COPYBREAK:
{
u32 max_copybreak = gve_is_gqi(priv) ?
- (PAGE_SIZE / 2) : priv->data_buffer_size_dqo;
+ GVE_DEFAULT_RX_BUFFER_SIZE : priv->data_buffer_size_dqo;
len = *(u32 *)value;
if (len > max_copybreak)
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 2d42e73383..619bf63ec9 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -79,6 +79,18 @@ static int gve_verify_driver_compatibility(struct gve_priv *priv)
return err;
}
+static netdev_features_t gve_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
+{
+ struct gve_priv *priv = netdev_priv(dev);
+
+ if (!gve_is_gqi(priv))
+ return gve_features_check_dqo(skb, dev, features);
+
+ return features;
+}
+
static netdev_tx_t gve_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct gve_priv *priv = netdev_priv(dev);
@@ -1316,7 +1328,7 @@ static int gve_open(struct net_device *dev)
/* Hard code this for now. This may be tuned in the future for
* performance.
*/
- priv->data_buffer_size_dqo = GVE_RX_BUFFER_SIZE_DQO;
+ priv->data_buffer_size_dqo = GVE_DEFAULT_RX_BUFFER_SIZE;
}
err = gve_create_rings(priv);
if (err)
@@ -1652,7 +1664,7 @@ static int verify_xdp_configuration(struct net_device *dev)
return -EOPNOTSUPP;
}
- if (dev->mtu > (PAGE_SIZE / 2) - sizeof(struct ethhdr) - GVE_RX_PAD) {
+ if (dev->mtu > GVE_DEFAULT_RX_BUFFER_SIZE - sizeof(struct ethhdr) - GVE_RX_PAD) {
netdev_warn(dev, "XDP is not supported for mtu %d.\n",
dev->mtu);
return -EOPNOTSUPP;
@@ -1879,6 +1891,7 @@ err:
static const struct net_device_ops gve_netdev_ops = {
.ndo_start_xmit = gve_start_xmit,
+ .ndo_features_check = gve_features_check,
.ndo_open = gve_open,
.ndo_stop = gve_close,
.ndo_get_stats64 = gve_get_stats,
diff --git a/drivers/net/ethernet/google/gve/gve_register.h b/drivers/net/ethernet/google/gve/gve_register.h
index fb655463c3..8e72b97008 100644
--- a/drivers/net/ethernet/google/gve/gve_register.h
+++ b/drivers/net/ethernet/google/gve/gve_register.h
@@ -18,11 +18,20 @@ struct gve_registers {
__be32 adminq_event_counter;
u8 reserved[3];
u8 driver_version;
+ __be32 adminq_base_address_hi;
+ __be32 adminq_base_address_lo;
+ __be16 adminq_length;
};
enum gve_device_status_flags {
GVE_DEVICE_STATUS_RESET_MASK = BIT(1),
GVE_DEVICE_STATUS_LINK_STATUS_MASK = BIT(2),
GVE_DEVICE_STATUS_REPORT_STATS_MASK = BIT(3),
+ GVE_DEVICE_STATUS_DEVICE_IS_RESET = BIT(4),
+};
+
+enum gve_driver_status_flags {
+ GVE_DRIVER_STATUS_RUN_MASK = BIT(0),
+ GVE_DRIVER_STATUS_RESET_MASK = BIT(1),
};
#endif /* _GVE_REGISTER_H_ */
diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c
index 93ff7c8ec9..76615d47e0 100644
--- a/drivers/net/ethernet/google/gve/gve_rx.c
+++ b/drivers/net/ethernet/google/gve/gve_rx.c
@@ -211,9 +211,9 @@ static int gve_rx_alloc_ring(struct gve_priv *priv, int idx)
{
struct gve_rx_ring *rx = &priv->rx[idx];
struct device *hdev = &priv->pdev->dev;
- u32 slots, npages;
int filled_pages;
size_t bytes;
+ u32 slots;
int err;
netif_dbg(priv, drv, priv->dev, "allocating rx ring\n");
@@ -270,12 +270,6 @@ static int gve_rx_alloc_ring(struct gve_priv *priv, int idx)
/* alloc rx desc ring */
bytes = sizeof(struct gve_rx_desc) * priv->rx_desc_cnt;
- npages = bytes / PAGE_SIZE;
- if (npages * PAGE_SIZE != bytes) {
- err = -EIO;
- goto abort_with_q_resources;
- }
-
rx->desc.desc_ring = dma_alloc_coherent(hdev, bytes, &rx->desc.bus,
GFP_KERNEL);
if (!rx->desc.desc_ring) {
@@ -289,7 +283,7 @@ static int gve_rx_alloc_ring(struct gve_priv *priv, int idx)
/* Allocating half-page buffers allows page-flipping which is faster
* than copying or allocating new pages.
*/
- rx->packet_buffer_size = PAGE_SIZE / 2;
+ rx->packet_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE;
gve_rx_ctx_clear(&rx->ctx);
gve_rx_add_to_block(priv, idx);
@@ -405,10 +399,10 @@ static struct sk_buff *gve_rx_add_frags(struct napi_struct *napi,
static void gve_rx_flip_buff(struct gve_rx_slot_page_info *page_info, __be64 *slot_addr)
{
- const __be64 offset = cpu_to_be64(PAGE_SIZE / 2);
+ const __be64 offset = cpu_to_be64(GVE_DEFAULT_RX_BUFFER_OFFSET);
/* "flip" to other packet buffer on this page */
- page_info->page_offset ^= PAGE_SIZE / 2;
+ page_info->page_offset ^= GVE_DEFAULT_RX_BUFFER_OFFSET;
*(slot_addr) ^= offset;
}
@@ -513,8 +507,7 @@ static struct sk_buff *gve_rx_copy_to_pool(struct gve_rx_ring *rx,
return NULL;
gve_dec_pagecnt_bias(copy_page_info);
- copy_page_info->page_offset += rx->packet_buffer_size;
- copy_page_info->page_offset &= (PAGE_SIZE - 1);
+ copy_page_info->page_offset ^= GVE_DEFAULT_RX_BUFFER_OFFSET;
if (copy_page_info->can_flip) {
/* We have used both halves of this copy page, it
diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c
index 9f6ffc4a54..07ba124780 100644
--- a/drivers/net/ethernet/google/gve/gve_tx.c
+++ b/drivers/net/ethernet/google/gve/gve_tx.c
@@ -819,7 +819,7 @@ int gve_xdp_xmit_one(struct gve_priv *priv, struct gve_tx_ring *tx,
return 0;
}
-#define GVE_TX_START_THRESH PAGE_SIZE
+#define GVE_TX_START_THRESH 4096
static int gve_clean_tx_done(struct gve_priv *priv, struct gve_tx_ring *tx,
u32 to_do, bool try_to_wake)
diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
index 1e19b834a6..f59c4710f1 100644
--- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
@@ -843,6 +843,16 @@ static bool gve_can_send_tso(const struct sk_buff *skb)
return true;
}
+netdev_features_t gve_features_check_dqo(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
+{
+ if (skb_is_gso(skb) && !gve_can_send_tso(skb))
+ return features & ~NETIF_F_GSO_MASK;
+
+ return features;
+}
+
/* Attempt to transmit specified SKB.
*
* Returns 0 if the SKB was transmitted or dropped.
@@ -854,11 +864,10 @@ static int gve_try_tx_skb(struct gve_priv *priv, struct gve_tx_ring *tx,
int num_buffer_descs;
int total_num_descs;
- if (tx->dqo.qpl) {
- if (skb_is_gso(skb))
- if (unlikely(ipv6_hopopt_jumbo_remove(skb)))
- goto drop;
+ if (skb_is_gso(skb) && unlikely(ipv6_hopopt_jumbo_remove(skb)))
+ goto drop;
+ if (tx->dqo.qpl) {
/* We do not need to verify the number of buffers used per
* packet or per segment in case of TSO as with 2K size buffers
* none of the TX packet rules would be violated.
@@ -868,24 +877,8 @@ static int gve_try_tx_skb(struct gve_priv *priv, struct gve_tx_ring *tx,
*/
num_buffer_descs = DIV_ROUND_UP(skb->len, GVE_TX_BUF_SIZE_DQO);
} else {
- if (skb_is_gso(skb)) {
- /* If TSO doesn't meet HW requirements, attempt to linearize the
- * packet.
- */
- if (unlikely(!gve_can_send_tso(skb) &&
- skb_linearize(skb) < 0)) {
- net_err_ratelimited("%s: Failed to transmit TSO packet\n",
- priv->dev->name);
- goto drop;
- }
-
- if (unlikely(ipv6_hopopt_jumbo_remove(skb)))
- goto drop;
-
- num_buffer_descs = gve_num_buffer_descs_needed(skb);
- } else {
- num_buffer_descs = gve_num_buffer_descs_needed(skb);
-
+ num_buffer_descs = gve_num_buffer_descs_needed(skb);
+ if (!skb_is_gso(skb)) {
if (unlikely(num_buffer_descs > GVE_TX_MAX_DATA_DESCS)) {
if (unlikely(skb_linearize(skb) < 0))
goto drop;