summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/google
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/google')
-rw-r--r--drivers/net/ethernet/google/gve/gve.h97
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.c229
-rw-r--r--drivers/net/ethernet/google/gve/gve_adminq.h50
-rw-r--r--drivers/net/ethernet/google/gve/gve_dqo.h6
-rw-r--r--drivers/net/ethernet/google/gve/gve_ethtool.c164
-rw-r--r--drivers/net/ethernet/google/gve/gve_main.c631
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx.c138
-rw-r--r--drivers/net/ethernet/google/gve/gve_rx_dqo.c148
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx.c36
-rw-r--r--drivers/net/ethernet/google/gve/gve_tx_dqo.c64
10 files changed, 902 insertions, 661 deletions
diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h
index 4814c96d5f..ae1e21c9b0 100644
--- a/drivers/net/ethernet/google/gve/gve.h
+++ b/drivers/net/ethernet/google/gve/gve.h
@@ -50,6 +50,10 @@
/* PTYPEs are always 10 bits. */
#define GVE_NUM_PTYPES 1024
+/* Default minimum ring size */
+#define GVE_DEFAULT_MIN_TX_RING_SIZE 256
+#define GVE_DEFAULT_MIN_RX_RING_SIZE 512
+
#define GVE_DEFAULT_RX_BUFFER_SIZE 2048
#define GVE_MAX_RX_BUFFER_SIZE 4096
@@ -63,7 +67,6 @@
#define GVE_DEFAULT_HEADER_BUFFER_SIZE 128
#define DQO_QPL_DEFAULT_TX_PAGES 512
-#define DQO_QPL_DEFAULT_RX_PAGES 2048
/* Maximum TSO size supported on DQO */
#define GVE_DQO_TX_MAX 0x3FFFF
@@ -607,6 +610,7 @@ struct gve_notify_block {
struct gve_priv *priv;
struct gve_tx_ring *tx; /* tx rings on this block */
struct gve_rx_ring *rx; /* rx rings on this block */
+ u32 irq;
};
/* Tracks allowed and current queue settings */
@@ -621,11 +625,6 @@ struct gve_qpl_config {
unsigned long *qpl_id_map; /* bitmap of used qpl ids */
};
-struct gve_options_dqo_rda {
- u16 tx_comp_ring_entries; /* number of tx_comp descriptors */
- u16 rx_buff_ring_entries; /* number of rx_buff descriptors */
-};
-
struct gve_irq_db {
__be32 index;
} ____cacheline_aligned;
@@ -639,28 +638,10 @@ struct gve_ptype_lut {
struct gve_ptype ptypes[GVE_NUM_PTYPES];
};
-/* Parameters for allocating queue page lists */
-struct gve_qpls_alloc_cfg {
- struct gve_qpl_config *qpl_cfg;
- struct gve_queue_config *tx_cfg;
- struct gve_queue_config *rx_cfg;
-
- u16 num_xdp_queues;
- bool raw_addressing;
- bool is_gqi;
-
- /* Allocated resources are returned here */
- struct gve_queue_page_list *qpls;
-};
-
/* Parameters for allocating resources for tx queues */
struct gve_tx_alloc_rings_cfg {
struct gve_queue_config *qcfg;
- /* qpls and qpl_cfg must already be allocated */
- struct gve_queue_page_list *qpls;
- struct gve_qpl_config *qpl_cfg;
-
u16 ring_size;
u16 start_idx;
u16 num_rings;
@@ -676,10 +657,6 @@ struct gve_rx_alloc_rings_cfg {
struct gve_queue_config *qcfg;
struct gve_queue_config *qcfg_tx;
- /* qpls and qpl_cfg must already be allocated */
- struct gve_queue_page_list *qpls;
- struct gve_qpl_config *qpl_cfg;
-
u16 ring_size;
u16 packet_buffer_size;
bool raw_addressing;
@@ -705,7 +682,6 @@ struct gve_priv {
struct net_device *dev;
struct gve_tx_ring *tx; /* array of tx_cfg.num_queues */
struct gve_rx_ring *rx; /* array of rx_cfg.num_queues */
- struct gve_queue_page_list *qpls; /* array of num qpls */
struct gve_notify_block *ntfy_blocks; /* array of num_ntfy_blks */
struct gve_irq_db *irq_db_indices; /* array of num_ntfy_blks */
dma_addr_t irq_db_indices_bus;
@@ -718,9 +694,13 @@ struct gve_priv {
u16 num_event_counters;
u16 tx_desc_cnt; /* num desc per ring */
u16 rx_desc_cnt; /* num desc per ring */
+ u16 max_tx_desc_cnt;
+ u16 max_rx_desc_cnt;
+ u16 min_tx_desc_cnt;
+ u16 min_rx_desc_cnt;
+ bool modify_ring_size_enabled;
+ bool default_min_ring_size;
u16 tx_pages_per_qpl; /* Suggested number of pages per qpl for TX queues by NIC */
- u16 rx_pages_per_qpl; /* Suggested number of pages per qpl for RX queues by NIC */
- u16 rx_data_slot_cnt; /* rx buffer length */
u64 max_registered_pages;
u64 num_registered_pages; /* num pages registered with NIC */
struct bpf_prog *xdp_prog; /* XDP BPF program */
@@ -730,7 +710,6 @@ struct gve_priv {
u16 num_xdp_queues;
struct gve_queue_config tx_cfg;
struct gve_queue_config rx_cfg;
- struct gve_qpl_config qpl_cfg; /* map used QPL ids */
u32 num_ntfy_blks; /* spilt between TX and RX so must be even */
struct gve_registers __iomem *reg_bar0; /* see gve_register.h */
@@ -792,7 +771,6 @@ struct gve_priv {
u64 link_speed;
bool up_before_suspend; /* True if dev was up before suspend */
- struct gve_options_dqo_rda options_dqo_rda;
struct gve_ptype_lut *ptype_lut_dqo;
/* Must be a power of two. */
@@ -1027,7 +1005,6 @@ static inline u32 gve_rx_qpl_id(struct gve_priv *priv, int rx_qid)
return priv->tx_cfg.max_queues + rx_qid;
}
-/* Returns the index into priv->qpls where a certain rx queue's QPL resides */
static inline u32 gve_get_rx_qpl_id(const struct gve_queue_config *tx_cfg, int rx_qid)
{
return tx_cfg->max_queues + rx_qid;
@@ -1038,41 +1015,17 @@ static inline u32 gve_tx_start_qpl_id(struct gve_priv *priv)
return gve_tx_qpl_id(priv, 0);
}
-/* Returns the index into priv->qpls where the first rx queue's QPL resides */
static inline u32 gve_rx_start_qpl_id(const struct gve_queue_config *tx_cfg)
{
return gve_get_rx_qpl_id(tx_cfg, 0);
}
-/* Returns a pointer to the next available tx qpl in the list of qpls */
-static inline
-struct gve_queue_page_list *gve_assign_tx_qpl(struct gve_tx_alloc_rings_cfg *cfg,
- int tx_qid)
-{
- /* QPL already in use */
- if (test_bit(tx_qid, cfg->qpl_cfg->qpl_id_map))
- return NULL;
- set_bit(tx_qid, cfg->qpl_cfg->qpl_id_map);
- return &cfg->qpls[tx_qid];
-}
-
-/* Returns a pointer to the next available rx qpl in the list of qpls */
-static inline
-struct gve_queue_page_list *gve_assign_rx_qpl(struct gve_rx_alloc_rings_cfg *cfg,
- int rx_qid)
+static inline u32 gve_get_rx_pages_per_qpl_dqo(u32 rx_desc_cnt)
{
- int id = gve_get_rx_qpl_id(cfg->qcfg_tx, rx_qid);
- /* QPL already in use */
- if (test_bit(id, cfg->qpl_cfg->qpl_id_map))
- return NULL;
- set_bit(id, cfg->qpl_cfg->qpl_id_map);
- return &cfg->qpls[id];
-}
-
-/* Unassigns the qpl with the given id */
-static inline void gve_unassign_qpl(struct gve_qpl_config *qpl_cfg, int id)
-{
- clear_bit(id, qpl_cfg->qpl_id_map);
+ /* For DQO, page count should be more than ring size for
+ * out-of-order completions. Set it to two times of ring size.
+ */
+ return 2 * rx_desc_cnt;
}
/* Returns the correct dma direction for tx and rx qpls */
@@ -1115,6 +1068,12 @@ int gve_alloc_page(struct gve_priv *priv, struct device *dev,
enum dma_data_direction, gfp_t gfp_flags);
void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
enum dma_data_direction);
+/* qpls */
+struct gve_queue_page_list *gve_alloc_queue_page_list(struct gve_priv *priv,
+ u32 id, int pages);
+void gve_free_queue_page_list(struct gve_priv *priv,
+ struct gve_queue_page_list *qpl,
+ u32 id);
/* tx handling */
netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev);
int gve_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
@@ -1137,6 +1096,12 @@ bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx);
void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx);
int gve_rx_poll(struct gve_notify_block *block, int budget);
bool gve_rx_work_pending(struct gve_rx_ring *rx);
+int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
+ struct gve_rx_alloc_rings_cfg *cfg,
+ struct gve_rx_ring *rx,
+ int idx);
+void gve_rx_free_ring_gqi(struct gve_priv *priv, struct gve_rx_ring *rx,
+ struct gve_rx_alloc_rings_cfg *cfg);
int gve_rx_alloc_rings(struct gve_priv *priv);
int gve_rx_alloc_rings_gqi(struct gve_priv *priv,
struct gve_rx_alloc_rings_cfg *cfg);
@@ -1150,6 +1115,12 @@ int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split);
/* Reset */
void gve_schedule_reset(struct gve_priv *priv);
int gve_reset(struct gve_priv *priv, bool attempt_teardown);
+void gve_get_curr_alloc_cfgs(struct gve_priv *priv,
+ struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
+ struct gve_rx_alloc_rings_cfg *rx_alloc_cfg);
+int gve_adjust_config(struct gve_priv *priv,
+ struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
+ struct gve_rx_alloc_rings_cfg *rx_alloc_cfg);
int gve_adjust_queues(struct gve_priv *priv,
struct gve_queue_config new_rx_config,
struct gve_queue_config new_tx_config);
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c
index ae12ac38e1..8ca0def176 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.c
+++ b/drivers/net/ethernet/google/gve/gve_adminq.c
@@ -32,6 +32,8 @@ struct gve_device_option *gve_get_next_option(struct gve_device_descriptor *desc
return option_end > descriptor_end ? NULL : (struct gve_device_option *)option_end;
}
+#define GVE_DEVICE_OPTION_NO_MIN_RING_SIZE 8
+
static
void gve_parse_device_option(struct gve_priv *priv,
struct gve_device_descriptor *device_descriptor,
@@ -41,7 +43,8 @@ void gve_parse_device_option(struct gve_priv *priv,
struct gve_device_option_dqo_rda **dev_op_dqo_rda,
struct gve_device_option_jumbo_frames **dev_op_jumbo_frames,
struct gve_device_option_dqo_qpl **dev_op_dqo_qpl,
- struct gve_device_option_buffer_sizes **dev_op_buffer_sizes)
+ struct gve_device_option_buffer_sizes **dev_op_buffer_sizes,
+ struct gve_device_option_modify_ring **dev_op_modify_ring)
{
u32 req_feat_mask = be32_to_cpu(option->required_features_mask);
u16 option_length = be16_to_cpu(option->option_length);
@@ -165,6 +168,27 @@ void gve_parse_device_option(struct gve_priv *priv,
"Buffer Sizes");
*dev_op_buffer_sizes = (void *)(option + 1);
break;
+ case GVE_DEV_OPT_ID_MODIFY_RING:
+ if (option_length < GVE_DEVICE_OPTION_NO_MIN_RING_SIZE ||
+ req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_MODIFY_RING) {
+ dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
+ "Modify Ring", (int)sizeof(**dev_op_modify_ring),
+ GVE_DEV_OPT_REQ_FEAT_MASK_MODIFY_RING,
+ option_length, req_feat_mask);
+ break;
+ }
+
+ if (option_length > sizeof(**dev_op_modify_ring)) {
+ dev_warn(&priv->pdev->dev,
+ GVE_DEVICE_OPTION_TOO_BIG_FMT, "Modify Ring");
+ }
+
+ *dev_op_modify_ring = (void *)(option + 1);
+
+ /* device has not provided min ring size */
+ if (option_length == GVE_DEVICE_OPTION_NO_MIN_RING_SIZE)
+ priv->default_min_ring_size = true;
+ break;
default:
/* If we don't recognize the option just continue
* without doing anything.
@@ -183,7 +207,8 @@ gve_process_device_options(struct gve_priv *priv,
struct gve_device_option_dqo_rda **dev_op_dqo_rda,
struct gve_device_option_jumbo_frames **dev_op_jumbo_frames,
struct gve_device_option_dqo_qpl **dev_op_dqo_qpl,
- struct gve_device_option_buffer_sizes **dev_op_buffer_sizes)
+ struct gve_device_option_buffer_sizes **dev_op_buffer_sizes,
+ struct gve_device_option_modify_ring **dev_op_modify_ring)
{
const int num_options = be16_to_cpu(descriptor->num_device_options);
struct gve_device_option *dev_opt;
@@ -204,7 +229,8 @@ gve_process_device_options(struct gve_priv *priv,
gve_parse_device_option(priv, descriptor, dev_opt,
dev_op_gqi_rda, dev_op_gqi_qpl,
dev_op_dqo_rda, dev_op_jumbo_frames,
- dev_op_dqo_qpl, dev_op_buffer_sizes);
+ dev_op_dqo_qpl, dev_op_buffer_sizes,
+ dev_op_modify_ring);
dev_opt = next_opt;
}
@@ -565,6 +591,7 @@ static int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_index)
cpu_to_be64(tx->q_resources_bus),
.tx_ring_addr = cpu_to_be64(tx->bus),
.ntfy_id = cpu_to_be32(tx->ntfy_id),
+ .tx_ring_size = cpu_to_be16(priv->tx_desc_cnt),
};
if (gve_is_gqi(priv)) {
@@ -573,24 +600,17 @@ static int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_index)
cmd.create_tx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
} else {
- u16 comp_ring_size;
u32 qpl_id = 0;
- if (priv->queue_format == GVE_DQO_RDA_FORMAT) {
+ if (priv->queue_format == GVE_DQO_RDA_FORMAT)
qpl_id = GVE_RAW_ADDRESSING_QPL_ID;
- comp_ring_size =
- priv->options_dqo_rda.tx_comp_ring_entries;
- } else {
+ else
qpl_id = tx->dqo.qpl->id;
- comp_ring_size = priv->tx_desc_cnt;
- }
cmd.create_tx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
- cmd.create_tx_queue.tx_ring_size =
- cpu_to_be16(priv->tx_desc_cnt);
cmd.create_tx_queue.tx_comp_ring_addr =
cpu_to_be64(tx->complq_bus_dqo);
cmd.create_tx_queue.tx_comp_ring_size =
- cpu_to_be16(comp_ring_size);
+ cpu_to_be16(priv->tx_desc_cnt);
}
return gve_adminq_issue_cmd(priv, &cmd);
@@ -610,63 +630,73 @@ int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_que
return gve_adminq_kick_and_wait(priv);
}
-static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index)
+static void gve_adminq_get_create_rx_queue_cmd(struct gve_priv *priv,
+ union gve_adminq_command *cmd,
+ u32 queue_index)
{
struct gve_rx_ring *rx = &priv->rx[queue_index];
- union gve_adminq_command cmd;
- memset(&cmd, 0, sizeof(cmd));
- cmd.opcode = cpu_to_be32(GVE_ADMINQ_CREATE_RX_QUEUE);
- cmd.create_rx_queue = (struct gve_adminq_create_rx_queue) {
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->opcode = cpu_to_be32(GVE_ADMINQ_CREATE_RX_QUEUE);
+ cmd->create_rx_queue = (struct gve_adminq_create_rx_queue) {
.queue_id = cpu_to_be32(queue_index),
.ntfy_id = cpu_to_be32(rx->ntfy_id),
.queue_resources_addr = cpu_to_be64(rx->q_resources_bus),
+ .rx_ring_size = cpu_to_be16(priv->rx_desc_cnt),
};
if (gve_is_gqi(priv)) {
u32 qpl_id = priv->queue_format == GVE_GQI_RDA_FORMAT ?
GVE_RAW_ADDRESSING_QPL_ID : rx->data.qpl->id;
- cmd.create_rx_queue.rx_desc_ring_addr =
- cpu_to_be64(rx->desc.bus),
- cmd.create_rx_queue.rx_data_ring_addr =
- cpu_to_be64(rx->data.data_bus),
- cmd.create_rx_queue.index = cpu_to_be32(queue_index);
- cmd.create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
- cmd.create_rx_queue.packet_buffer_size = cpu_to_be16(rx->packet_buffer_size);
+ cmd->create_rx_queue.rx_desc_ring_addr =
+ cpu_to_be64(rx->desc.bus);
+ cmd->create_rx_queue.rx_data_ring_addr =
+ cpu_to_be64(rx->data.data_bus);
+ cmd->create_rx_queue.index = cpu_to_be32(queue_index);
+ cmd->create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
+ cmd->create_rx_queue.packet_buffer_size = cpu_to_be16(rx->packet_buffer_size);
} else {
- u16 rx_buff_ring_entries;
u32 qpl_id = 0;
- if (priv->queue_format == GVE_DQO_RDA_FORMAT) {
+ if (priv->queue_format == GVE_DQO_RDA_FORMAT)
qpl_id = GVE_RAW_ADDRESSING_QPL_ID;
- rx_buff_ring_entries =
- priv->options_dqo_rda.rx_buff_ring_entries;
- } else {
+ else
qpl_id = rx->dqo.qpl->id;
- rx_buff_ring_entries = priv->rx_desc_cnt;
- }
- cmd.create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
- cmd.create_rx_queue.rx_ring_size =
- cpu_to_be16(priv->rx_desc_cnt);
- cmd.create_rx_queue.rx_desc_ring_addr =
+ cmd->create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
+ cmd->create_rx_queue.rx_desc_ring_addr =
cpu_to_be64(rx->dqo.complq.bus);
- cmd.create_rx_queue.rx_data_ring_addr =
+ cmd->create_rx_queue.rx_data_ring_addr =
cpu_to_be64(rx->dqo.bufq.bus);
- cmd.create_rx_queue.packet_buffer_size =
+ cmd->create_rx_queue.packet_buffer_size =
cpu_to_be16(priv->data_buffer_size_dqo);
- cmd.create_rx_queue.rx_buff_ring_size =
- cpu_to_be16(rx_buff_ring_entries);
- cmd.create_rx_queue.enable_rsc =
+ cmd->create_rx_queue.rx_buff_ring_size =
+ cpu_to_be16(priv->rx_desc_cnt);
+ cmd->create_rx_queue.enable_rsc =
!!(priv->dev->features & NETIF_F_LRO);
if (priv->header_split_enabled)
- cmd.create_rx_queue.header_buffer_size =
+ cmd->create_rx_queue.header_buffer_size =
cpu_to_be16(priv->header_buf_size);
}
+}
+
+static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index)
+{
+ union gve_adminq_command cmd;
+ gve_adminq_get_create_rx_queue_cmd(priv, &cmd, queue_index);
return gve_adminq_issue_cmd(priv, &cmd);
}
+/* Unlike gve_adminq_create_rx_queue, this actually rings the doorbell */
+int gve_adminq_create_single_rx_queue(struct gve_priv *priv, u32 queue_index)
+{
+ union gve_adminq_command cmd;
+
+ gve_adminq_get_create_rx_queue_cmd(priv, &cmd, queue_index);
+ return gve_adminq_execute_cmd(priv, &cmd);
+}
+
int gve_adminq_create_rx_queues(struct gve_priv *priv, u32 num_queues)
{
int err;
@@ -713,22 +743,31 @@ int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_qu
return gve_adminq_kick_and_wait(priv);
}
+static void gve_adminq_make_destroy_rx_queue_cmd(union gve_adminq_command *cmd,
+ u32 queue_index)
+{
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->opcode = cpu_to_be32(GVE_ADMINQ_DESTROY_RX_QUEUE);
+ cmd->destroy_rx_queue = (struct gve_adminq_destroy_rx_queue) {
+ .queue_id = cpu_to_be32(queue_index),
+ };
+}
+
static int gve_adminq_destroy_rx_queue(struct gve_priv *priv, u32 queue_index)
{
union gve_adminq_command cmd;
- int err;
- memset(&cmd, 0, sizeof(cmd));
- cmd.opcode = cpu_to_be32(GVE_ADMINQ_DESTROY_RX_QUEUE);
- cmd.destroy_rx_queue = (struct gve_adminq_destroy_rx_queue) {
- .queue_id = cpu_to_be32(queue_index),
- };
+ gve_adminq_make_destroy_rx_queue_cmd(&cmd, queue_index);
+ return gve_adminq_issue_cmd(priv, &cmd);
+}
- err = gve_adminq_issue_cmd(priv, &cmd);
- if (err)
- return err;
+/* Unlike gve_adminq_destroy_rx_queue, this actually rings the doorbell */
+int gve_adminq_destroy_single_rx_queue(struct gve_priv *priv, u32 queue_index)
+{
+ union gve_adminq_command cmd;
- return 0;
+ gve_adminq_make_destroy_rx_queue_cmd(&cmd, queue_index);
+ return gve_adminq_execute_cmd(priv, &cmd);
}
int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 num_queues)
@@ -745,31 +784,17 @@ int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 num_queues)
return gve_adminq_kick_and_wait(priv);
}
-static int gve_set_desc_cnt(struct gve_priv *priv,
- struct gve_device_descriptor *descriptor)
-{
- priv->tx_desc_cnt = be16_to_cpu(descriptor->tx_queue_entries);
- priv->rx_desc_cnt = be16_to_cpu(descriptor->rx_queue_entries);
- return 0;
-}
-
-static int
-gve_set_desc_cnt_dqo(struct gve_priv *priv,
- const struct gve_device_descriptor *descriptor,
- const struct gve_device_option_dqo_rda *dev_op_dqo_rda)
+static void gve_set_default_desc_cnt(struct gve_priv *priv,
+ const struct gve_device_descriptor *descriptor)
{
priv->tx_desc_cnt = be16_to_cpu(descriptor->tx_queue_entries);
priv->rx_desc_cnt = be16_to_cpu(descriptor->rx_queue_entries);
- if (priv->queue_format == GVE_DQO_QPL_FORMAT)
- return 0;
-
- priv->options_dqo_rda.tx_comp_ring_entries =
- be16_to_cpu(dev_op_dqo_rda->tx_comp_ring_entries);
- priv->options_dqo_rda.rx_buff_ring_entries =
- be16_to_cpu(dev_op_dqo_rda->rx_buff_ring_entries);
-
- return 0;
+ /* set default ranges */
+ priv->max_tx_desc_cnt = priv->tx_desc_cnt;
+ priv->max_rx_desc_cnt = priv->rx_desc_cnt;
+ priv->min_tx_desc_cnt = priv->tx_desc_cnt;
+ priv->min_rx_desc_cnt = priv->rx_desc_cnt;
}
static void gve_enable_supported_features(struct gve_priv *priv,
@@ -779,7 +804,9 @@ static void gve_enable_supported_features(struct gve_priv *priv,
const struct gve_device_option_dqo_qpl
*dev_op_dqo_qpl,
const struct gve_device_option_buffer_sizes
- *dev_op_buffer_sizes)
+ *dev_op_buffer_sizes,
+ const struct gve_device_option_modify_ring
+ *dev_op_modify_ring)
{
/* Before control reaches this point, the page-size-capped max MTU from
* the gve_device_descriptor field has already been stored in
@@ -796,12 +823,8 @@ static void gve_enable_supported_features(struct gve_priv *priv,
if (dev_op_dqo_qpl) {
priv->tx_pages_per_qpl =
be16_to_cpu(dev_op_dqo_qpl->tx_pages_per_qpl);
- priv->rx_pages_per_qpl =
- be16_to_cpu(dev_op_dqo_qpl->rx_pages_per_qpl);
if (priv->tx_pages_per_qpl == 0)
priv->tx_pages_per_qpl = DQO_QPL_DEFAULT_TX_PAGES;
- if (priv->rx_pages_per_qpl == 0)
- priv->rx_pages_per_qpl = DQO_QPL_DEFAULT_RX_PAGES;
}
if (dev_op_buffer_sizes &&
@@ -814,12 +837,33 @@ static void gve_enable_supported_features(struct gve_priv *priv,
"BUFFER SIZES device option enabled with max_rx_buffer_size of %u, header_buf_size of %u.\n",
priv->max_rx_buffer_size, priv->header_buf_size);
}
+
+ /* Read and store ring size ranges given by device */
+ if (dev_op_modify_ring &&
+ (supported_features_mask & GVE_SUP_MODIFY_RING_MASK)) {
+ priv->modify_ring_size_enabled = true;
+
+ /* max ring size for DQO QPL should not be overwritten because of device limit */
+ if (priv->queue_format != GVE_DQO_QPL_FORMAT) {
+ priv->max_rx_desc_cnt = be16_to_cpu(dev_op_modify_ring->max_rx_ring_size);
+ priv->max_tx_desc_cnt = be16_to_cpu(dev_op_modify_ring->max_tx_ring_size);
+ }
+ if (priv->default_min_ring_size) {
+ /* If device hasn't provided minimums, use default minimums */
+ priv->min_tx_desc_cnt = GVE_DEFAULT_MIN_TX_RING_SIZE;
+ priv->min_rx_desc_cnt = GVE_DEFAULT_MIN_RX_RING_SIZE;
+ } else {
+ priv->min_rx_desc_cnt = be16_to_cpu(dev_op_modify_ring->min_rx_ring_size);
+ priv->min_tx_desc_cnt = be16_to_cpu(dev_op_modify_ring->min_tx_ring_size);
+ }
+ }
}
int gve_adminq_describe_device(struct gve_priv *priv)
{
struct gve_device_option_buffer_sizes *dev_op_buffer_sizes = NULL;
struct gve_device_option_jumbo_frames *dev_op_jumbo_frames = NULL;
+ struct gve_device_option_modify_ring *dev_op_modify_ring = NULL;
struct gve_device_option_gqi_rda *dev_op_gqi_rda = NULL;
struct gve_device_option_gqi_qpl *dev_op_gqi_qpl = NULL;
struct gve_device_option_dqo_rda *dev_op_dqo_rda = NULL;
@@ -851,9 +895,9 @@ int gve_adminq_describe_device(struct gve_priv *priv)
err = gve_process_device_options(priv, descriptor, &dev_op_gqi_rda,
&dev_op_gqi_qpl, &dev_op_dqo_rda,
- &dev_op_jumbo_frames,
- &dev_op_dqo_qpl,
- &dev_op_buffer_sizes);
+ &dev_op_jumbo_frames, &dev_op_dqo_qpl,
+ &dev_op_buffer_sizes,
+ &dev_op_modify_ring);
if (err)
goto free_device_descriptor;
@@ -888,15 +932,13 @@ int gve_adminq_describe_device(struct gve_priv *priv)
dev_info(&priv->pdev->dev,
"Driver is running with GQI QPL queue format.\n");
}
- if (gve_is_gqi(priv)) {
- err = gve_set_desc_cnt(priv, descriptor);
- } else {
- /* DQO supports LRO. */
+
+ /* set default descriptor counts */
+ gve_set_default_desc_cnt(priv, descriptor);
+
+ /* DQO supports LRO. */
+ if (!gve_is_gqi(priv))
priv->dev->hw_features |= NETIF_F_LRO;
- err = gve_set_desc_cnt_dqo(priv, descriptor, dev_op_dqo_rda);
- }
- if (err)
- goto free_device_descriptor;
priv->max_registered_pages =
be64_to_cpu(descriptor->max_registered_pages);
@@ -912,18 +954,11 @@ int gve_adminq_describe_device(struct gve_priv *priv)
mac = descriptor->mac;
dev_info(&priv->pdev->dev, "MAC addr: %pM\n", mac);
priv->tx_pages_per_qpl = be16_to_cpu(descriptor->tx_pages_per_qpl);
- priv->rx_data_slot_cnt = be16_to_cpu(descriptor->rx_pages_per_qpl);
-
- if (gve_is_gqi(priv) && priv->rx_data_slot_cnt < priv->rx_desc_cnt) {
- dev_err(&priv->pdev->dev, "rx_data_slot_cnt cannot be smaller than rx_desc_cnt, setting rx_desc_cnt down to %d.\n",
- priv->rx_data_slot_cnt);
- priv->rx_desc_cnt = priv->rx_data_slot_cnt;
- }
priv->default_num_queues = be16_to_cpu(descriptor->default_num_queues);
gve_enable_supported_features(priv, supported_features_mask,
dev_op_jumbo_frames, dev_op_dqo_qpl,
- dev_op_buffer_sizes);
+ dev_op_buffer_sizes, dev_op_modify_ring);
free_device_descriptor:
dma_pool_free(priv->adminq_pool, descriptor, descriptor_bus);
diff --git a/drivers/net/ethernet/google/gve/gve_adminq.h b/drivers/net/ethernet/google/gve/gve_adminq.h
index 5ac972e45f..e64f0dbe74 100644
--- a/drivers/net/ethernet/google/gve/gve_adminq.h
+++ b/drivers/net/ethernet/google/gve/gve_adminq.h
@@ -103,8 +103,7 @@ static_assert(sizeof(struct gve_device_option_gqi_qpl) == 4);
struct gve_device_option_dqo_rda {
__be32 supported_features_mask;
- __be16 tx_comp_ring_entries;
- __be16 rx_buff_ring_entries;
+ __be32 reserved;
};
static_assert(sizeof(struct gve_device_option_dqo_rda) == 8);
@@ -134,6 +133,16 @@ struct gve_device_option_buffer_sizes {
static_assert(sizeof(struct gve_device_option_buffer_sizes) == 8);
+struct gve_device_option_modify_ring {
+ __be32 supported_featured_mask;
+ __be16 max_rx_ring_size;
+ __be16 max_tx_ring_size;
+ __be16 min_rx_ring_size;
+ __be16 min_tx_ring_size;
+};
+
+static_assert(sizeof(struct gve_device_option_modify_ring) == 12);
+
/* Terminology:
*
* RDA - Raw DMA Addressing - Buffers associated with SKBs are directly DMA
@@ -143,28 +152,31 @@ static_assert(sizeof(struct gve_device_option_buffer_sizes) == 8);
* the device for read/write and data is copied from/to SKBs.
*/
enum gve_dev_opt_id {
- GVE_DEV_OPT_ID_GQI_RAW_ADDRESSING = 0x1,
- GVE_DEV_OPT_ID_GQI_RDA = 0x2,
- GVE_DEV_OPT_ID_GQI_QPL = 0x3,
- GVE_DEV_OPT_ID_DQO_RDA = 0x4,
- GVE_DEV_OPT_ID_DQO_QPL = 0x7,
- GVE_DEV_OPT_ID_JUMBO_FRAMES = 0x8,
- GVE_DEV_OPT_ID_BUFFER_SIZES = 0xa,
+ GVE_DEV_OPT_ID_GQI_RAW_ADDRESSING = 0x1,
+ GVE_DEV_OPT_ID_GQI_RDA = 0x2,
+ GVE_DEV_OPT_ID_GQI_QPL = 0x3,
+ GVE_DEV_OPT_ID_DQO_RDA = 0x4,
+ GVE_DEV_OPT_ID_MODIFY_RING = 0x6,
+ GVE_DEV_OPT_ID_DQO_QPL = 0x7,
+ GVE_DEV_OPT_ID_JUMBO_FRAMES = 0x8,
+ GVE_DEV_OPT_ID_BUFFER_SIZES = 0xa,
};
enum gve_dev_opt_req_feat_mask {
- GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RAW_ADDRESSING = 0x0,
- GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RDA = 0x0,
- GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL = 0x0,
- GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA = 0x0,
- GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES = 0x0,
- GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL = 0x0,
- GVE_DEV_OPT_REQ_FEAT_MASK_BUFFER_SIZES = 0x0,
+ GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RAW_ADDRESSING = 0x0,
+ GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RDA = 0x0,
+ GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL = 0x0,
+ GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA = 0x0,
+ GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES = 0x0,
+ GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL = 0x0,
+ GVE_DEV_OPT_REQ_FEAT_MASK_BUFFER_SIZES = 0x0,
+ GVE_DEV_OPT_REQ_FEAT_MASK_MODIFY_RING = 0x0,
};
enum gve_sup_feature_mask {
- GVE_SUP_JUMBO_FRAMES_MASK = 1 << 2,
- GVE_SUP_BUFFER_SIZES_MASK = 1 << 4,
+ GVE_SUP_MODIFY_RING_MASK = 1 << 0,
+ GVE_SUP_JUMBO_FRAMES_MASK = 1 << 2,
+ GVE_SUP_BUFFER_SIZES_MASK = 1 << 4,
};
#define GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING 0x0
@@ -439,7 +451,9 @@ int gve_adminq_configure_device_resources(struct gve_priv *priv,
int gve_adminq_deconfigure_device_resources(struct gve_priv *priv);
int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues);
int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues);
+int gve_adminq_create_single_rx_queue(struct gve_priv *priv, u32 queue_index);
int gve_adminq_create_rx_queues(struct gve_priv *priv, u32 num_queues);
+int gve_adminq_destroy_single_rx_queue(struct gve_priv *priv, u32 queue_index);
int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 queue_id);
int gve_adminq_register_page_list(struct gve_priv *priv,
struct gve_queue_page_list *qpl);
diff --git a/drivers/net/ethernet/google/gve/gve_dqo.h b/drivers/net/ethernet/google/gve/gve_dqo.h
index b81584829c..e83773fb89 100644
--- a/drivers/net/ethernet/google/gve/gve_dqo.h
+++ b/drivers/net/ethernet/google/gve/gve_dqo.h
@@ -44,6 +44,12 @@ void gve_tx_free_rings_dqo(struct gve_priv *priv,
struct gve_tx_alloc_rings_cfg *cfg);
void gve_tx_start_ring_dqo(struct gve_priv *priv, int idx);
void gve_tx_stop_ring_dqo(struct gve_priv *priv, int idx);
+int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
+ struct gve_rx_alloc_rings_cfg *cfg,
+ struct gve_rx_ring *rx,
+ int idx);
+void gve_rx_free_ring_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
+ struct gve_rx_alloc_rings_cfg *cfg);
int gve_rx_alloc_rings_dqo(struct gve_priv *priv,
struct gve_rx_alloc_rings_cfg *cfg);
void gve_rx_free_rings_dqo(struct gve_priv *priv,
diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c
index 9aebfb843d..cf816ede05 100644
--- a/drivers/net/ethernet/google/gve/gve_ethtool.c
+++ b/drivers/net/ethernet/google/gve/gve_ethtool.c
@@ -8,6 +8,7 @@
#include "gve.h"
#include "gve_adminq.h"
#include "gve_dqo.h"
+#include "gve_utils.h"
static void gve_get_drvinfo(struct net_device *netdev,
struct ethtool_drvinfo *info)
@@ -73,7 +74,7 @@ static const char gve_gstrings_adminq_stats[][ETH_GSTRING_LEN] = {
"adminq_create_tx_queue_cnt", "adminq_create_rx_queue_cnt",
"adminq_destroy_tx_queue_cnt", "adminq_destroy_rx_queue_cnt",
"adminq_dcfg_device_resources_cnt", "adminq_set_driver_parameter_cnt",
- "adminq_report_stats_cnt", "adminq_report_link_speed_cnt"
+ "adminq_report_stats_cnt", "adminq_report_link_speed_cnt", "adminq_get_ptype_map_cnt"
};
static const char gve_gstrings_priv_flags[][ETH_GSTRING_LEN] = {
@@ -89,42 +90,34 @@ static const char gve_gstrings_priv_flags[][ETH_GSTRING_LEN] = {
static void gve_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
{
struct gve_priv *priv = netdev_priv(netdev);
- char *s = (char *)data;
+ u8 *s = (char *)data;
int num_tx_queues;
int i, j;
num_tx_queues = gve_num_tx_queues(priv);
switch (stringset) {
case ETH_SS_STATS:
- memcpy(s, *gve_gstrings_main_stats,
- sizeof(gve_gstrings_main_stats));
- s += sizeof(gve_gstrings_main_stats);
-
- for (i = 0; i < priv->rx_cfg.num_queues; i++) {
- for (j = 0; j < NUM_GVE_RX_CNTS; j++) {
- snprintf(s, ETH_GSTRING_LEN,
- gve_gstrings_rx_stats[j], i);
- s += ETH_GSTRING_LEN;
- }
- }
+ for (i = 0; i < ARRAY_SIZE(gve_gstrings_main_stats); i++)
+ ethtool_puts(&s, gve_gstrings_main_stats[i]);
- for (i = 0; i < num_tx_queues; i++) {
- for (j = 0; j < NUM_GVE_TX_CNTS; j++) {
- snprintf(s, ETH_GSTRING_LEN,
- gve_gstrings_tx_stats[j], i);
- s += ETH_GSTRING_LEN;
- }
- }
+ for (i = 0; i < priv->rx_cfg.num_queues; i++)
+ for (j = 0; j < NUM_GVE_RX_CNTS; j++)
+ ethtool_sprintf(&s, gve_gstrings_rx_stats[j],
+ i);
+
+ for (i = 0; i < num_tx_queues; i++)
+ for (j = 0; j < NUM_GVE_TX_CNTS; j++)
+ ethtool_sprintf(&s, gve_gstrings_tx_stats[j],
+ i);
+
+ for (i = 0; i < ARRAY_SIZE(gve_gstrings_adminq_stats); i++)
+ ethtool_puts(&s, gve_gstrings_adminq_stats[i]);
- memcpy(s, *gve_gstrings_adminq_stats,
- sizeof(gve_gstrings_adminq_stats));
- s += sizeof(gve_gstrings_adminq_stats);
break;
case ETH_SS_PRIV_FLAGS:
- memcpy(s, *gve_gstrings_priv_flags,
- sizeof(gve_gstrings_priv_flags));
- s += sizeof(gve_gstrings_priv_flags);
+ for (i = 0; i < ARRAY_SIZE(gve_gstrings_priv_flags); i++)
+ ethtool_puts(&s, gve_gstrings_priv_flags[i]);
break;
default:
@@ -165,6 +158,8 @@ gve_get_ethtool_stats(struct net_device *netdev,
struct stats *report_stats;
int *rx_qid_to_stats_idx;
int *tx_qid_to_stats_idx;
+ int num_stopped_rxqs = 0;
+ int num_stopped_txqs = 0;
struct gve_priv *priv;
bool skip_nic_stats;
unsigned int start;
@@ -181,12 +176,23 @@ gve_get_ethtool_stats(struct net_device *netdev,
sizeof(int), GFP_KERNEL);
if (!rx_qid_to_stats_idx)
return;
+ for (ring = 0; ring < priv->rx_cfg.num_queues; ring++) {
+ rx_qid_to_stats_idx[ring] = -1;
+ if (!gve_rx_was_added_to_block(priv, ring))
+ num_stopped_rxqs++;
+ }
tx_qid_to_stats_idx = kmalloc_array(num_tx_queues,
sizeof(int), GFP_KERNEL);
if (!tx_qid_to_stats_idx) {
kfree(rx_qid_to_stats_idx);
return;
}
+ for (ring = 0; ring < num_tx_queues; ring++) {
+ tx_qid_to_stats_idx[ring] = -1;
+ if (!gve_tx_was_added_to_block(priv, ring))
+ num_stopped_txqs++;
+ }
+
for (rx_pkts = 0, rx_bytes = 0, rx_hsplit_pkt = 0,
rx_skb_alloc_fail = 0, rx_buf_alloc_fail = 0,
rx_desc_err_dropped_pkt = 0, rx_hsplit_unsplit_pkt = 0,
@@ -260,7 +266,13 @@ gve_get_ethtool_stats(struct net_device *netdev,
/* For rx cross-reporting stats, start from nic rx stats in report */
base_stats_idx = GVE_TX_STATS_REPORT_NUM * num_tx_queues +
GVE_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues;
- max_stats_idx = NIC_RX_STATS_REPORT_NUM * priv->rx_cfg.num_queues +
+ /* The boundary between driver stats and NIC stats shifts if there are
+ * stopped queues.
+ */
+ base_stats_idx += NIC_RX_STATS_REPORT_NUM * num_stopped_rxqs +
+ NIC_TX_STATS_REPORT_NUM * num_stopped_txqs;
+ max_stats_idx = NIC_RX_STATS_REPORT_NUM *
+ (priv->rx_cfg.num_queues - num_stopped_rxqs) +
base_stats_idx;
/* Preprocess the stats report for rx, map queue id to start index */
skip_nic_stats = false;
@@ -274,6 +286,10 @@ gve_get_ethtool_stats(struct net_device *netdev,
skip_nic_stats = true;
break;
}
+ if (queue_id < 0 || queue_id >= priv->rx_cfg.num_queues) {
+ net_err_ratelimited("Invalid rxq id in NIC stats\n");
+ continue;
+ }
rx_qid_to_stats_idx[queue_id] = stats_idx;
}
/* walk RX rings */
@@ -308,11 +324,11 @@ gve_get_ethtool_stats(struct net_device *netdev,
data[i++] = rx->rx_copybreak_pkt;
data[i++] = rx->rx_copied_pkt;
/* stats from NIC */
- if (skip_nic_stats) {
+ stats_idx = rx_qid_to_stats_idx[ring];
+ if (skip_nic_stats || stats_idx < 0) {
/* skip NIC rx stats */
i += NIC_RX_STATS_REPORT_NUM;
} else {
- stats_idx = rx_qid_to_stats_idx[ring];
for (j = 0; j < NIC_RX_STATS_REPORT_NUM; j++) {
u64 value =
be64_to_cpu(report_stats[stats_idx + j].value);
@@ -338,7 +354,8 @@ gve_get_ethtool_stats(struct net_device *netdev,
/* For tx cross-reporting stats, start from nic tx stats in report */
base_stats_idx = max_stats_idx;
- max_stats_idx = NIC_TX_STATS_REPORT_NUM * num_tx_queues +
+ max_stats_idx = NIC_TX_STATS_REPORT_NUM *
+ (num_tx_queues - num_stopped_txqs) +
max_stats_idx;
/* Preprocess the stats report for tx, map queue id to start index */
skip_nic_stats = false;
@@ -352,6 +369,10 @@ gve_get_ethtool_stats(struct net_device *netdev,
skip_nic_stats = true;
break;
}
+ if (queue_id < 0 || queue_id >= num_tx_queues) {
+ net_err_ratelimited("Invalid txq id in NIC stats\n");
+ continue;
+ }
tx_qid_to_stats_idx[queue_id] = stats_idx;
}
/* walk TX rings */
@@ -383,11 +404,11 @@ gve_get_ethtool_stats(struct net_device *netdev,
data[i++] = gve_tx_load_event_counter(priv, tx);
data[i++] = tx->dma_mapping_error;
/* stats from NIC */
- if (skip_nic_stats) {
+ stats_idx = tx_qid_to_stats_idx[ring];
+ if (skip_nic_stats || stats_idx < 0) {
/* skip NIC tx stats */
i += NIC_TX_STATS_REPORT_NUM;
} else {
- stats_idx = tx_qid_to_stats_idx[ring];
for (j = 0; j < NIC_TX_STATS_REPORT_NUM; j++) {
u64 value =
be64_to_cpu(report_stats[stats_idx + j].value);
@@ -428,6 +449,7 @@ gve_get_ethtool_stats(struct net_device *netdev,
data[i++] = priv->adminq_set_driver_parameter_cnt;
data[i++] = priv->adminq_report_stats_cnt;
data[i++] = priv->adminq_report_link_speed_cnt;
+ data[i++] = priv->adminq_get_ptype_map_cnt;
}
static void gve_get_channels(struct net_device *netdev,
@@ -470,7 +492,7 @@ static int gve_set_channels(struct net_device *netdev,
return -EINVAL;
}
- if (!netif_carrier_ok(netdev)) {
+ if (!netif_running(netdev)) {
priv->tx_cfg.num_queues = new_tx;
priv->rx_cfg.num_queues = new_rx;
return 0;
@@ -489,8 +511,8 @@ static void gve_get_ringparam(struct net_device *netdev,
{
struct gve_priv *priv = netdev_priv(netdev);
- cmd->rx_max_pending = priv->rx_desc_cnt;
- cmd->tx_max_pending = priv->tx_desc_cnt;
+ cmd->rx_max_pending = priv->max_rx_desc_cnt;
+ cmd->tx_max_pending = priv->max_tx_desc_cnt;
cmd->rx_pending = priv->rx_desc_cnt;
cmd->tx_pending = priv->tx_desc_cnt;
@@ -502,20 +524,81 @@ static void gve_get_ringparam(struct net_device *netdev,
kernel_cmd->tcp_data_split = ETHTOOL_TCP_DATA_SPLIT_DISABLED;
}
+static int gve_adjust_ring_sizes(struct gve_priv *priv,
+ u16 new_tx_desc_cnt,
+ u16 new_rx_desc_cnt)
+{
+ struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
+ struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
+ int err;
+
+ /* get current queue configuration */
+ gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
+
+ /* copy over the new ring_size from ethtool */
+ tx_alloc_cfg.ring_size = new_tx_desc_cnt;
+ rx_alloc_cfg.ring_size = new_rx_desc_cnt;
+
+ if (netif_running(priv->dev)) {
+ err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg);
+ if (err)
+ return err;
+ }
+
+ /* Set new ring_size for the next up */
+ priv->tx_desc_cnt = new_tx_desc_cnt;
+ priv->rx_desc_cnt = new_rx_desc_cnt;
+
+ return 0;
+}
+
+static int gve_validate_req_ring_size(struct gve_priv *priv, u16 new_tx_desc_cnt,
+ u16 new_rx_desc_cnt)
+{
+ /* check for valid range */
+ if (new_tx_desc_cnt < priv->min_tx_desc_cnt ||
+ new_tx_desc_cnt > priv->max_tx_desc_cnt ||
+ new_rx_desc_cnt < priv->min_rx_desc_cnt ||
+ new_rx_desc_cnt > priv->max_rx_desc_cnt) {
+ dev_err(&priv->pdev->dev, "Requested descriptor count out of range\n");
+ return -EINVAL;
+ }
+
+ if (!is_power_of_2(new_tx_desc_cnt) || !is_power_of_2(new_rx_desc_cnt)) {
+ dev_err(&priv->pdev->dev, "Requested descriptor count has to be a power of 2\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
static int gve_set_ringparam(struct net_device *netdev,
struct ethtool_ringparam *cmd,
struct kernel_ethtool_ringparam *kernel_cmd,
struct netlink_ext_ack *extack)
{
struct gve_priv *priv = netdev_priv(netdev);
+ u16 new_tx_cnt, new_rx_cnt;
+ int err;
- if (priv->tx_desc_cnt != cmd->tx_pending ||
- priv->rx_desc_cnt != cmd->rx_pending) {
- dev_info(&priv->pdev->dev, "Modify ring size is not supported.\n");
+ err = gve_set_hsplit_config(priv, kernel_cmd->tcp_data_split);
+ if (err)
+ return err;
+
+ if (cmd->tx_pending == priv->tx_desc_cnt && cmd->rx_pending == priv->rx_desc_cnt)
+ return 0;
+
+ if (!priv->modify_ring_size_enabled) {
+ dev_err(&priv->pdev->dev, "Modify ring size is not supported.\n");
return -EOPNOTSUPP;
}
- return gve_set_hsplit_config(priv, kernel_cmd->tcp_data_split);
+ new_tx_cnt = cmd->tx_pending;
+ new_rx_cnt = cmd->rx_pending;
+
+ if (gve_validate_req_ring_size(priv, new_tx_cnt, new_rx_cnt))
+ return -EINVAL;
+
+ return gve_adjust_ring_sizes(priv, new_tx_cnt, new_rx_cnt);
}
static int gve_user_reset(struct net_device *netdev, u32 *flags)
@@ -710,5 +793,6 @@ const struct ethtool_ops gve_ethtool_ops = {
.set_tunable = gve_set_tunable,
.get_priv_flags = gve_get_priv_flags,
.set_priv_flags = gve_set_priv_flags,
- .get_link_ksettings = gve_get_link_ksettings
+ .get_link_ksettings = gve_get_link_ksettings,
+ .get_ts_info = ethtool_op_get_ts_info,
};
diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c
index 166bd827a6..8b14efd14a 100644
--- a/drivers/net/ethernet/google/gve/gve_main.c
+++ b/drivers/net/ethernet/google/gve/gve_main.c
@@ -9,6 +9,7 @@
#include <linux/etherdevice.h>
#include <linux/filter.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/sched.h>
@@ -16,6 +17,7 @@
#include <linux/workqueue.h>
#include <linux/utsname.h>
#include <linux/version.h>
+#include <net/netdev_queues.h>
#include <net/sch_generic.h>
#include <net/xdp_sock_drv.h>
#include "gve.h"
@@ -253,6 +255,18 @@ static irqreturn_t gve_intr_dqo(int irq, void *arg)
return IRQ_HANDLED;
}
+static int gve_is_napi_on_home_cpu(struct gve_priv *priv, u32 irq)
+{
+ int cpu_curr = smp_processor_id();
+ const struct cpumask *aff_mask;
+
+ aff_mask = irq_get_effective_affinity_mask(irq);
+ if (unlikely(!aff_mask))
+ return 1;
+
+ return cpumask_test_cpu(cpu_curr, aff_mask);
+}
+
int gve_napi_poll(struct napi_struct *napi, int budget)
{
struct gve_notify_block *block;
@@ -322,8 +336,21 @@ int gve_napi_poll_dqo(struct napi_struct *napi, int budget)
reschedule |= work_done == budget;
}
- if (reschedule)
- return budget;
+ if (reschedule) {
+ /* Reschedule by returning budget only if already on the correct
+ * cpu.
+ */
+ if (likely(gve_is_napi_on_home_cpu(priv, block->irq)))
+ return budget;
+
+ /* If not on the cpu with which this queue's irq has affinity
+ * with, we avoid rescheduling napi and arm the irq instead so
+ * that napi gets rescheduled back eventually onto the right
+ * cpu.
+ */
+ if (work_done == budget)
+ work_done--;
+ }
if (likely(napi_complete_done(napi, work_done))) {
/* Enable interrupts again.
@@ -428,6 +455,7 @@ static int gve_alloc_notify_blocks(struct gve_priv *priv)
"Failed to receive msix vector %d\n", i);
goto abort_with_some_ntfy_blocks;
}
+ block->irq = priv->msix_vectors[msix_idx].vector;
irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
get_cpu_mask(i % active_cpus));
block->irq_db_index = &priv->irq_db_indices[i].index;
@@ -441,6 +469,7 @@ abort_with_some_ntfy_blocks:
irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
NULL);
free_irq(priv->msix_vectors[msix_idx].vector, block);
+ block->irq = 0;
}
kvfree(priv->ntfy_blocks);
priv->ntfy_blocks = NULL;
@@ -474,6 +503,7 @@ static void gve_free_notify_blocks(struct gve_priv *priv)
irq_set_affinity_hint(priv->msix_vectors[msix_idx].vector,
NULL);
free_irq(priv->msix_vectors[msix_idx].vector, block);
+ block->irq = 0;
}
free_irq(priv->msix_vectors[priv->mgmt_msix_idx].vector, priv);
kvfree(priv->ntfy_blocks);
@@ -582,37 +612,36 @@ static void gve_teardown_device_resources(struct gve_priv *priv)
gve_clear_device_resources_ok(priv);
}
-static int gve_unregister_qpl(struct gve_priv *priv, u32 i)
+static int gve_unregister_qpl(struct gve_priv *priv,
+ struct gve_queue_page_list *qpl)
{
int err;
- err = gve_adminq_unregister_page_list(priv, priv->qpls[i].id);
+ if (!qpl)
+ return 0;
+
+ err = gve_adminq_unregister_page_list(priv, qpl->id);
if (err) {
netif_err(priv, drv, priv->dev,
"Failed to unregister queue page list %d\n",
- priv->qpls[i].id);
+ qpl->id);
return err;
}
- priv->num_registered_pages -= priv->qpls[i].num_entries;
+ priv->num_registered_pages -= qpl->num_entries;
return 0;
}
-static int gve_register_qpl(struct gve_priv *priv, u32 i)
+static int gve_register_qpl(struct gve_priv *priv,
+ struct gve_queue_page_list *qpl)
{
- int num_rx_qpls;
int pages;
int err;
- /* Rx QPLs succeed Tx QPLs in the priv->qpls array. */
- num_rx_qpls = gve_num_rx_qpls(&priv->rx_cfg, gve_is_qpl(priv));
- if (i >= gve_rx_start_qpl_id(&priv->tx_cfg) + num_rx_qpls) {
- netif_err(priv, drv, priv->dev,
- "Cannot register nonexisting QPL at index %d\n", i);
- return -EINVAL;
- }
+ if (!qpl)
+ return 0;
- pages = priv->qpls[i].num_entries;
+ pages = qpl->num_entries;
if (pages + priv->num_registered_pages > priv->max_registered_pages) {
netif_err(priv, drv, priv->dev,
@@ -622,14 +651,11 @@ static int gve_register_qpl(struct gve_priv *priv, u32 i)
return -EINVAL;
}
- err = gve_adminq_register_page_list(priv, &priv->qpls[i]);
+ err = gve_adminq_register_page_list(priv, qpl);
if (err) {
netif_err(priv, drv, priv->dev,
"failed to register queue page list %d\n",
- priv->qpls[i].id);
- /* This failure will trigger a reset - no need to clean
- * up
- */
+ qpl->id);
return err;
}
@@ -637,6 +663,26 @@ static int gve_register_qpl(struct gve_priv *priv, u32 i)
return 0;
}
+static struct gve_queue_page_list *gve_tx_get_qpl(struct gve_priv *priv, int idx)
+{
+ struct gve_tx_ring *tx = &priv->tx[idx];
+
+ if (gve_is_gqi(priv))
+ return tx->tx_fifo.qpl;
+ else
+ return tx->dqo.qpl;
+}
+
+static struct gve_queue_page_list *gve_rx_get_qpl(struct gve_priv *priv, int idx)
+{
+ struct gve_rx_ring *rx = &priv->rx[idx];
+
+ if (gve_is_gqi(priv))
+ return rx->data.qpl;
+ else
+ return rx->dqo.qpl;
+}
+
static int gve_register_xdp_qpls(struct gve_priv *priv)
{
int start_id;
@@ -645,7 +691,7 @@ static int gve_register_xdp_qpls(struct gve_priv *priv)
start_id = gve_xdp_tx_start_queue_id(priv);
for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++) {
- err = gve_register_qpl(priv, i);
+ err = gve_register_qpl(priv, gve_tx_get_qpl(priv, i));
/* This failure will trigger a reset - no need to clean up */
if (err)
return err;
@@ -656,7 +702,6 @@ static int gve_register_xdp_qpls(struct gve_priv *priv)
static int gve_register_qpls(struct gve_priv *priv)
{
int num_tx_qpls, num_rx_qpls;
- int start_id;
int err;
int i;
@@ -665,15 +710,13 @@ static int gve_register_qpls(struct gve_priv *priv)
num_rx_qpls = gve_num_rx_qpls(&priv->rx_cfg, gve_is_qpl(priv));
for (i = 0; i < num_tx_qpls; i++) {
- err = gve_register_qpl(priv, i);
+ err = gve_register_qpl(priv, gve_tx_get_qpl(priv, i));
if (err)
return err;
}
- /* there might be a gap between the tx and rx qpl ids */
- start_id = gve_rx_start_qpl_id(&priv->tx_cfg);
for (i = 0; i < num_rx_qpls; i++) {
- err = gve_register_qpl(priv, start_id + i);
+ err = gve_register_qpl(priv, gve_rx_get_qpl(priv, i));
if (err)
return err;
}
@@ -689,7 +732,7 @@ static int gve_unregister_xdp_qpls(struct gve_priv *priv)
start_id = gve_xdp_tx_start_queue_id(priv);
for (i = start_id; i < start_id + gve_num_xdp_qpls(priv); i++) {
- err = gve_unregister_qpl(priv, i);
+ err = gve_unregister_qpl(priv, gve_tx_get_qpl(priv, i));
/* This failure will trigger a reset - no need to clean */
if (err)
return err;
@@ -700,7 +743,6 @@ static int gve_unregister_xdp_qpls(struct gve_priv *priv)
static int gve_unregister_qpls(struct gve_priv *priv)
{
int num_tx_qpls, num_rx_qpls;
- int start_id;
int err;
int i;
@@ -709,15 +751,14 @@ static int gve_unregister_qpls(struct gve_priv *priv)
num_rx_qpls = gve_num_rx_qpls(&priv->rx_cfg, gve_is_qpl(priv));
for (i = 0; i < num_tx_qpls; i++) {
- err = gve_unregister_qpl(priv, i);
+ err = gve_unregister_qpl(priv, gve_tx_get_qpl(priv, i));
/* This failure will trigger a reset - no need to clean */
if (err)
return err;
}
- start_id = gve_rx_start_qpl_id(&priv->tx_cfg);
for (i = 0; i < num_rx_qpls; i++) {
- err = gve_unregister_qpl(priv, start_id + i);
+ err = gve_unregister_qpl(priv, gve_rx_get_qpl(priv, i));
/* This failure will trigger a reset - no need to clean */
if (err)
return err;
@@ -828,8 +869,6 @@ static void gve_tx_get_curr_alloc_cfg(struct gve_priv *priv,
{
cfg->qcfg = &priv->tx_cfg;
cfg->raw_addressing = !gve_is_qpl(priv);
- cfg->qpls = priv->qpls;
- cfg->qpl_cfg = &priv->qpl_cfg;
cfg->ring_size = priv->tx_desc_cnt;
cfg->start_idx = 0;
cfg->num_rings = gve_num_tx_queues(priv);
@@ -886,9 +925,9 @@ static int gve_alloc_xdp_rings(struct gve_priv *priv)
return 0;
}
-static int gve_alloc_rings(struct gve_priv *priv,
- struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
- struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
+static int gve_queues_mem_alloc(struct gve_priv *priv,
+ struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
+ struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
{
int err;
@@ -974,9 +1013,9 @@ static void gve_free_xdp_rings(struct gve_priv *priv)
}
}
-static void gve_free_rings(struct gve_priv *priv,
- struct gve_tx_alloc_rings_cfg *tx_cfg,
- struct gve_rx_alloc_rings_cfg *rx_cfg)
+static void gve_queues_mem_free(struct gve_priv *priv,
+ struct gve_tx_alloc_rings_cfg *tx_cfg,
+ struct gve_rx_alloc_rings_cfg *rx_cfg)
{
if (gve_is_gqi(priv)) {
gve_tx_free_rings_gqi(priv, tx_cfg);
@@ -1005,35 +1044,41 @@ int gve_alloc_page(struct gve_priv *priv, struct device *dev,
return 0;
}
-static int gve_alloc_queue_page_list(struct gve_priv *priv,
- struct gve_queue_page_list *qpl,
- u32 id, int pages)
+struct gve_queue_page_list *gve_alloc_queue_page_list(struct gve_priv *priv,
+ u32 id, int pages)
{
+ struct gve_queue_page_list *qpl;
int err;
int i;
+ qpl = kvzalloc(sizeof(*qpl), GFP_KERNEL);
+ if (!qpl)
+ return NULL;
+
qpl->id = id;
qpl->num_entries = 0;
qpl->pages = kvcalloc(pages, sizeof(*qpl->pages), GFP_KERNEL);
- /* caller handles clean up */
if (!qpl->pages)
- return -ENOMEM;
+ goto abort;
+
qpl->page_buses = kvcalloc(pages, sizeof(*qpl->page_buses), GFP_KERNEL);
- /* caller handles clean up */
if (!qpl->page_buses)
- return -ENOMEM;
+ goto abort;
for (i = 0; i < pages; i++) {
err = gve_alloc_page(priv, &priv->pdev->dev, &qpl->pages[i],
&qpl->page_buses[i],
gve_qpl_dma_dir(priv, id), GFP_KERNEL);
- /* caller handles clean up */
if (err)
- return -ENOMEM;
+ goto abort;
qpl->num_entries++;
}
- return 0;
+ return qpl;
+
+abort:
+ gve_free_queue_page_list(priv, qpl, id);
+ return NULL;
}
void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
@@ -1045,14 +1090,16 @@ void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
put_page(page);
}
-static void gve_free_queue_page_list(struct gve_priv *priv,
- struct gve_queue_page_list *qpl,
- int id)
+void gve_free_queue_page_list(struct gve_priv *priv,
+ struct gve_queue_page_list *qpl,
+ u32 id)
{
int i;
- if (!qpl->pages)
+ if (!qpl)
return;
+ if (!qpl->pages)
+ goto free_qpl;
if (!qpl->page_buses)
goto free_pages;
@@ -1065,120 +1112,8 @@ static void gve_free_queue_page_list(struct gve_priv *priv,
free_pages:
kvfree(qpl->pages);
qpl->pages = NULL;
-}
-
-static void gve_free_n_qpls(struct gve_priv *priv,
- struct gve_queue_page_list *qpls,
- int start_id,
- int num_qpls)
-{
- int i;
-
- for (i = start_id; i < start_id + num_qpls; i++)
- gve_free_queue_page_list(priv, &qpls[i], i);
-}
-
-static int gve_alloc_n_qpls(struct gve_priv *priv,
- struct gve_queue_page_list *qpls,
- int page_count,
- int start_id,
- int num_qpls)
-{
- int err;
- int i;
-
- for (i = start_id; i < start_id + num_qpls; i++) {
- err = gve_alloc_queue_page_list(priv, &qpls[i], i, page_count);
- if (err)
- goto free_qpls;
- }
-
- return 0;
-
-free_qpls:
- /* Must include the failing QPL too for gve_alloc_queue_page_list fails
- * without cleaning up.
- */
- gve_free_n_qpls(priv, qpls, start_id, i - start_id + 1);
- return err;
-}
-
-static int gve_alloc_qpls(struct gve_priv *priv,
- struct gve_qpls_alloc_cfg *cfg)
-{
- int max_queues = cfg->tx_cfg->max_queues + cfg->rx_cfg->max_queues;
- int rx_start_id, tx_num_qpls, rx_num_qpls;
- struct gve_queue_page_list *qpls;
- int page_count;
- int err;
-
- if (cfg->raw_addressing)
- return 0;
-
- qpls = kvcalloc(max_queues, sizeof(*qpls), GFP_KERNEL);
- if (!qpls)
- return -ENOMEM;
-
- cfg->qpl_cfg->qpl_map_size = BITS_TO_LONGS(max_queues) *
- sizeof(unsigned long) * BITS_PER_BYTE;
- cfg->qpl_cfg->qpl_id_map = kvcalloc(BITS_TO_LONGS(max_queues),
- sizeof(unsigned long), GFP_KERNEL);
- if (!cfg->qpl_cfg->qpl_id_map) {
- err = -ENOMEM;
- goto free_qpl_array;
- }
-
- /* Allocate TX QPLs */
- page_count = priv->tx_pages_per_qpl;
- tx_num_qpls = gve_num_tx_qpls(cfg->tx_cfg, cfg->num_xdp_queues,
- gve_is_qpl(priv));
- err = gve_alloc_n_qpls(priv, qpls, page_count, 0, tx_num_qpls);
- if (err)
- goto free_qpl_map;
-
- /* Allocate RX QPLs */
- rx_start_id = gve_rx_start_qpl_id(cfg->tx_cfg);
- /* For GQI_QPL number of pages allocated have 1:1 relationship with
- * number of descriptors. For DQO, number of pages required are
- * more than descriptors (because of out of order completions).
- */
- page_count = cfg->is_gqi ? priv->rx_data_slot_cnt : priv->rx_pages_per_qpl;
- rx_num_qpls = gve_num_rx_qpls(cfg->rx_cfg, gve_is_qpl(priv));
- err = gve_alloc_n_qpls(priv, qpls, page_count, rx_start_id, rx_num_qpls);
- if (err)
- goto free_tx_qpls;
-
- cfg->qpls = qpls;
- return 0;
-
-free_tx_qpls:
- gve_free_n_qpls(priv, qpls, 0, tx_num_qpls);
-free_qpl_map:
- kvfree(cfg->qpl_cfg->qpl_id_map);
- cfg->qpl_cfg->qpl_id_map = NULL;
-free_qpl_array:
- kvfree(qpls);
- return err;
-}
-
-static void gve_free_qpls(struct gve_priv *priv,
- struct gve_qpls_alloc_cfg *cfg)
-{
- int max_queues = cfg->tx_cfg->max_queues + cfg->rx_cfg->max_queues;
- struct gve_queue_page_list *qpls = cfg->qpls;
- int i;
-
- if (!qpls)
- return;
-
- kvfree(cfg->qpl_cfg->qpl_id_map);
- cfg->qpl_cfg->qpl_id_map = NULL;
-
- for (i = 0; i < max_queues; i++)
- gve_free_queue_page_list(priv, &qpls[i], i);
-
- kvfree(qpls);
- cfg->qpls = NULL;
+free_qpl:
+ kvfree(qpl);
}
/* Use this to schedule a reset when the device is capable of continuing
@@ -1282,18 +1217,6 @@ static void gve_drain_page_cache(struct gve_priv *priv)
page_frag_cache_drain(&priv->rx[i].page_cache);
}
-static void gve_qpls_get_curr_alloc_cfg(struct gve_priv *priv,
- struct gve_qpls_alloc_cfg *cfg)
-{
- cfg->raw_addressing = !gve_is_qpl(priv);
- cfg->is_gqi = gve_is_gqi(priv);
- cfg->num_xdp_queues = priv->num_xdp_queues;
- cfg->qpl_cfg = &priv->qpl_cfg;
- cfg->tx_cfg = &priv->tx_cfg;
- cfg->rx_cfg = &priv->rx_cfg;
- cfg->qpls = priv->qpls;
-}
-
static void gve_rx_get_curr_alloc_cfg(struct gve_priv *priv,
struct gve_rx_alloc_rings_cfg *cfg)
{
@@ -1301,8 +1224,6 @@ static void gve_rx_get_curr_alloc_cfg(struct gve_priv *priv,
cfg->qcfg_tx = &priv->tx_cfg;
cfg->raw_addressing = !gve_is_qpl(priv);
cfg->enable_header_split = priv->header_split_enabled;
- cfg->qpls = priv->qpls;
- cfg->qpl_cfg = &priv->qpl_cfg;
cfg->ring_size = priv->rx_desc_cnt;
cfg->packet_buffer_size = gve_is_gqi(priv) ?
GVE_DEFAULT_RX_BUFFER_SIZE :
@@ -1310,90 +1231,56 @@ static void gve_rx_get_curr_alloc_cfg(struct gve_priv *priv,
cfg->rx = priv->rx;
}
-static void gve_get_curr_alloc_cfgs(struct gve_priv *priv,
- struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
- struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
- struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
+void gve_get_curr_alloc_cfgs(struct gve_priv *priv,
+ struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
+ struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
{
- gve_qpls_get_curr_alloc_cfg(priv, qpls_alloc_cfg);
gve_tx_get_curr_alloc_cfg(priv, tx_alloc_cfg);
gve_rx_get_curr_alloc_cfg(priv, rx_alloc_cfg);
}
-static void gve_rx_start_rings(struct gve_priv *priv, int num_rings)
+static void gve_rx_start_ring(struct gve_priv *priv, int i)
{
- int i;
-
- for (i = 0; i < num_rings; i++) {
- if (gve_is_gqi(priv))
- gve_rx_start_ring_gqi(priv, i);
- else
- gve_rx_start_ring_dqo(priv, i);
- }
+ if (gve_is_gqi(priv))
+ gve_rx_start_ring_gqi(priv, i);
+ else
+ gve_rx_start_ring_dqo(priv, i);
}
-static void gve_rx_stop_rings(struct gve_priv *priv, int num_rings)
+static void gve_rx_start_rings(struct gve_priv *priv, int num_rings)
{
int i;
- if (!priv->rx)
- return;
-
- for (i = 0; i < num_rings; i++) {
- if (gve_is_gqi(priv))
- gve_rx_stop_ring_gqi(priv, i);
- else
- gve_rx_stop_ring_dqo(priv, i);
- }
+ for (i = 0; i < num_rings; i++)
+ gve_rx_start_ring(priv, i);
}
-static void gve_queues_mem_free(struct gve_priv *priv,
- struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
- struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
- struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
+static void gve_rx_stop_ring(struct gve_priv *priv, int i)
{
- gve_free_rings(priv, tx_alloc_cfg, rx_alloc_cfg);
- gve_free_qpls(priv, qpls_alloc_cfg);
+ if (gve_is_gqi(priv))
+ gve_rx_stop_ring_gqi(priv, i);
+ else
+ gve_rx_stop_ring_dqo(priv, i);
}
-static int gve_queues_mem_alloc(struct gve_priv *priv,
- struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
- struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
- struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
+static void gve_rx_stop_rings(struct gve_priv *priv, int num_rings)
{
- int err;
-
- err = gve_alloc_qpls(priv, qpls_alloc_cfg);
- if (err) {
- netif_err(priv, drv, priv->dev, "Failed to alloc QPLs\n");
- return err;
- }
- tx_alloc_cfg->qpls = qpls_alloc_cfg->qpls;
- rx_alloc_cfg->qpls = qpls_alloc_cfg->qpls;
- err = gve_alloc_rings(priv, tx_alloc_cfg, rx_alloc_cfg);
- if (err) {
- netif_err(priv, drv, priv->dev, "Failed to alloc rings\n");
- goto free_qpls;
- }
+ int i;
- return 0;
+ if (!priv->rx)
+ return;
-free_qpls:
- gve_free_qpls(priv, qpls_alloc_cfg);
- return err;
+ for (i = 0; i < num_rings; i++)
+ gve_rx_stop_ring(priv, i);
}
static void gve_queues_mem_remove(struct gve_priv *priv)
{
struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
- struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0};
- gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg,
- &tx_alloc_cfg, &rx_alloc_cfg);
- gve_queues_mem_free(priv, &qpls_alloc_cfg,
- &tx_alloc_cfg, &rx_alloc_cfg);
- priv->qpls = NULL;
+ gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
+ gve_queues_mem_free(priv, &tx_alloc_cfg, &rx_alloc_cfg);
priv->tx = NULL;
priv->rx = NULL;
}
@@ -1402,7 +1289,6 @@ static void gve_queues_mem_remove(struct gve_priv *priv)
* No memory is allocated. Passed-in memory is freed on errors.
*/
static int gve_queues_start(struct gve_priv *priv,
- struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
{
@@ -1410,12 +1296,10 @@ static int gve_queues_start(struct gve_priv *priv,
int err;
/* Record new resources into priv */
- priv->qpls = qpls_alloc_cfg->qpls;
priv->tx = tx_alloc_cfg->tx;
priv->rx = rx_alloc_cfg->rx;
/* Record new configs into priv */
- priv->qpl_cfg = *qpls_alloc_cfg->qpl_cfg;
priv->tx_cfg = *tx_alloc_cfg->qcfg;
priv->rx_cfg = *rx_alloc_cfg->qcfg;
priv->tx_desc_cnt = tx_alloc_cfg->ring_size;
@@ -1483,23 +1367,19 @@ static int gve_open(struct net_device *dev)
{
struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
- struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0};
struct gve_priv *priv = netdev_priv(dev);
int err;
- gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg,
- &tx_alloc_cfg, &rx_alloc_cfg);
+ gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
- err = gve_queues_mem_alloc(priv, &qpls_alloc_cfg,
- &tx_alloc_cfg, &rx_alloc_cfg);
+ err = gve_queues_mem_alloc(priv, &tx_alloc_cfg, &rx_alloc_cfg);
if (err)
return err;
/* No need to free on error: ownership of resources is lost after
* calling gve_queues_start.
*/
- err = gve_queues_start(priv, &qpls_alloc_cfg,
- &tx_alloc_cfg, &rx_alloc_cfg);
+ err = gve_queues_start(priv, &tx_alloc_cfg, &rx_alloc_cfg);
if (err)
return err;
@@ -1558,11 +1438,8 @@ static int gve_close(struct net_device *dev)
static int gve_remove_xdp_queues(struct gve_priv *priv)
{
- int qpl_start_id;
int err;
- qpl_start_id = gve_xdp_tx_start_queue_id(priv);
-
err = gve_destroy_xdp_rings(priv);
if (err)
return err;
@@ -1574,27 +1451,19 @@ static int gve_remove_xdp_queues(struct gve_priv *priv)
gve_unreg_xdp_info(priv);
gve_free_xdp_rings(priv);
- gve_free_n_qpls(priv, priv->qpls, qpl_start_id, gve_num_xdp_qpls(priv));
priv->num_xdp_queues = 0;
return 0;
}
static int gve_add_xdp_queues(struct gve_priv *priv)
{
- int start_id;
int err;
priv->num_xdp_queues = priv->rx_cfg.num_queues;
- start_id = gve_xdp_tx_start_queue_id(priv);
- err = gve_alloc_n_qpls(priv, priv->qpls, priv->tx_pages_per_qpl,
- start_id, gve_num_xdp_qpls(priv));
- if (err)
- goto err;
-
err = gve_alloc_xdp_rings(priv);
if (err)
- goto free_xdp_qpls;
+ goto err;
err = gve_reg_xdp_info(priv, priv->dev);
if (err)
@@ -1612,8 +1481,6 @@ static int gve_add_xdp_queues(struct gve_priv *priv)
free_xdp_rings:
gve_free_xdp_rings(priv);
-free_xdp_qpls:
- gve_free_n_qpls(priv, priv->qpls, start_id, gve_num_xdp_qpls(priv));
err:
priv->num_xdp_queues = 0;
return err;
@@ -1644,7 +1511,7 @@ static int gve_set_xdp(struct gve_priv *priv, struct bpf_prog *prog,
u32 status;
old_prog = READ_ONCE(priv->xdp_prog);
- if (!netif_carrier_ok(priv->dev)) {
+ if (!netif_running(priv->dev)) {
WRITE_ONCE(priv->xdp_prog, prog);
if (old_prog)
bpf_prog_put(old_prog);
@@ -1863,16 +1730,14 @@ static int gve_xdp(struct net_device *dev, struct netdev_bpf *xdp)
}
}
-static int gve_adjust_config(struct gve_priv *priv,
- struct gve_qpls_alloc_cfg *qpls_alloc_cfg,
- struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
- struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
+int gve_adjust_config(struct gve_priv *priv,
+ struct gve_tx_alloc_rings_cfg *tx_alloc_cfg,
+ struct gve_rx_alloc_rings_cfg *rx_alloc_cfg)
{
int err;
/* Allocate resources for the new confiugration */
- err = gve_queues_mem_alloc(priv, qpls_alloc_cfg,
- tx_alloc_cfg, rx_alloc_cfg);
+ err = gve_queues_mem_alloc(priv, tx_alloc_cfg, rx_alloc_cfg);
if (err) {
netif_err(priv, drv, priv->dev,
"Adjust config failed to alloc new queues");
@@ -1884,14 +1749,12 @@ static int gve_adjust_config(struct gve_priv *priv,
if (err) {
netif_err(priv, drv, priv->dev,
"Adjust config failed to close old queues");
- gve_queues_mem_free(priv, qpls_alloc_cfg,
- tx_alloc_cfg, rx_alloc_cfg);
+ gve_queues_mem_free(priv, tx_alloc_cfg, rx_alloc_cfg);
return err;
}
/* Bring the device back up again with the new resources. */
- err = gve_queues_start(priv, qpls_alloc_cfg,
- tx_alloc_cfg, rx_alloc_cfg);
+ err = gve_queues_start(priv, tx_alloc_cfg, rx_alloc_cfg);
if (err) {
netif_err(priv, drv, priv->dev,
"Adjust config failed to start new queues, !!! DISABLING ALL QUEUES !!!\n");
@@ -1911,32 +1774,18 @@ int gve_adjust_queues(struct gve_priv *priv,
{
struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
- struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0};
- struct gve_qpl_config new_qpl_cfg;
int err;
- gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg,
- &tx_alloc_cfg, &rx_alloc_cfg);
-
- /* qpl_cfg is not read-only, it contains a map that gets updated as
- * rings are allocated, which is why we cannot use the yet unreleased
- * one in priv.
- */
- qpls_alloc_cfg.qpl_cfg = &new_qpl_cfg;
- tx_alloc_cfg.qpl_cfg = &new_qpl_cfg;
- rx_alloc_cfg.qpl_cfg = &new_qpl_cfg;
+ gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
/* Relay the new config from ethtool */
- qpls_alloc_cfg.tx_cfg = &new_tx_config;
tx_alloc_cfg.qcfg = &new_tx_config;
rx_alloc_cfg.qcfg_tx = &new_tx_config;
- qpls_alloc_cfg.rx_cfg = &new_rx_config;
rx_alloc_cfg.qcfg = &new_rx_config;
tx_alloc_cfg.num_rings = new_tx_config.num_queues;
- if (netif_carrier_ok(priv->dev)) {
- err = gve_adjust_config(priv, &qpls_alloc_cfg,
- &tx_alloc_cfg, &rx_alloc_cfg);
+ if (netif_running(priv->dev)) {
+ err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg);
return err;
}
/* Set the config for the next up. */
@@ -1961,12 +1810,16 @@ static void gve_turndown(struct gve_priv *priv)
int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
+ if (!gve_tx_was_added_to_block(priv, idx))
+ continue;
napi_disable(&block->napi);
}
for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
+ if (!gve_rx_was_added_to_block(priv, idx))
+ continue;
napi_disable(&block->napi);
}
@@ -1989,6 +1842,9 @@ static void gve_turnup(struct gve_priv *priv)
int ntfy_idx = gve_tx_idx_to_ntfy(priv, idx);
struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
+ if (!gve_tx_was_added_to_block(priv, idx))
+ continue;
+
napi_enable(&block->napi);
if (gve_is_gqi(priv)) {
iowrite32be(0, gve_irq_doorbell(priv, block));
@@ -1996,11 +1852,21 @@ static void gve_turnup(struct gve_priv *priv)
gve_set_itr_coalesce_usecs_dqo(priv, block,
priv->tx_coalesce_usecs);
}
+
+ /* Any descs written by the NIC before this barrier will be
+ * handled by the one-off napi schedule below. Whereas any
+ * descs after the barrier will generate interrupts.
+ */
+ mb();
+ napi_schedule(&block->napi);
}
for (idx = 0; idx < priv->rx_cfg.num_queues; idx++) {
int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
struct gve_notify_block *block = &priv->ntfy_blocks[ntfy_idx];
+ if (!gve_rx_was_added_to_block(priv, idx))
+ continue;
+
napi_enable(&block->napi);
if (gve_is_gqi(priv)) {
iowrite32be(0, gve_irq_doorbell(priv, block));
@@ -2008,11 +1874,27 @@ static void gve_turnup(struct gve_priv *priv)
gve_set_itr_coalesce_usecs_dqo(priv, block,
priv->rx_coalesce_usecs);
}
+
+ /* Any descs written by the NIC before this barrier will be
+ * handled by the one-off napi schedule below. Whereas any
+ * descs after the barrier will generate interrupts.
+ */
+ mb();
+ napi_schedule(&block->napi);
}
gve_set_napi_enabled(priv);
}
+static void gve_turnup_and_check_status(struct gve_priv *priv)
+{
+ u32 status;
+
+ gve_turnup(priv);
+ status = ioread32be(&priv->reg_bar0->device_status);
+ gve_handle_link_status(priv, GVE_DEVICE_STATUS_LINK_STATUS_MASK & status);
+}
+
static void gve_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct gve_notify_block *block;
@@ -2077,7 +1959,6 @@ int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split)
{
struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
- struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0};
bool enable_hdr_split;
int err = 0;
@@ -2097,15 +1978,13 @@ int gve_set_hsplit_config(struct gve_priv *priv, u8 tcp_data_split)
if (enable_hdr_split == priv->header_split_enabled)
return 0;
- gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg,
- &tx_alloc_cfg, &rx_alloc_cfg);
+ gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
rx_alloc_cfg.enable_header_split = enable_hdr_split;
rx_alloc_cfg.packet_buffer_size = gve_get_pkt_buf_size(priv, enable_hdr_split);
if (netif_running(priv->dev))
- err = gve_adjust_config(priv, &qpls_alloc_cfg,
- &tx_alloc_cfg, &rx_alloc_cfg);
+ err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg);
return err;
}
@@ -2115,26 +1994,15 @@ static int gve_set_features(struct net_device *netdev,
const netdev_features_t orig_features = netdev->features;
struct gve_tx_alloc_rings_cfg tx_alloc_cfg = {0};
struct gve_rx_alloc_rings_cfg rx_alloc_cfg = {0};
- struct gve_qpls_alloc_cfg qpls_alloc_cfg = {0};
struct gve_priv *priv = netdev_priv(netdev);
- struct gve_qpl_config new_qpl_cfg;
int err;
- gve_get_curr_alloc_cfgs(priv, &qpls_alloc_cfg,
- &tx_alloc_cfg, &rx_alloc_cfg);
- /* qpl_cfg is not read-only, it contains a map that gets updated as
- * rings are allocated, which is why we cannot use the yet unreleased
- * one in priv.
- */
- qpls_alloc_cfg.qpl_cfg = &new_qpl_cfg;
- tx_alloc_cfg.qpl_cfg = &new_qpl_cfg;
- rx_alloc_cfg.qpl_cfg = &new_qpl_cfg;
+ gve_get_curr_alloc_cfgs(priv, &tx_alloc_cfg, &rx_alloc_cfg);
if ((netdev->features & NETIF_F_LRO) != (features & NETIF_F_LRO)) {
netdev->features ^= NETIF_F_LRO;
- if (netif_carrier_ok(netdev)) {
- err = gve_adjust_config(priv, &qpls_alloc_cfg,
- &tx_alloc_cfg, &rx_alloc_cfg);
+ if (netif_running(netdev)) {
+ err = gve_adjust_config(priv, &tx_alloc_cfg, &rx_alloc_cfg);
if (err) {
/* Revert the change on error. */
netdev->features = orig_features;
@@ -2422,7 +2290,7 @@ err:
int gve_reset(struct gve_priv *priv, bool attempt_teardown)
{
- bool was_up = netif_carrier_ok(priv->dev);
+ bool was_up = netif_running(priv->dev);
int err;
dev_info(&priv->pdev->dev, "Performing reset\n");
@@ -2473,6 +2341,140 @@ static void gve_write_version(u8 __iomem *driver_version_register)
writeb('\n', driver_version_register);
}
+static int gve_rx_queue_stop(struct net_device *dev, void *per_q_mem, int idx)
+{
+ struct gve_priv *priv = netdev_priv(dev);
+ struct gve_rx_ring *gve_per_q_mem;
+ int err;
+
+ if (!priv->rx)
+ return -EAGAIN;
+
+ /* Destroying queue 0 while other queues exist is not supported in DQO */
+ if (!gve_is_gqi(priv) && idx == 0)
+ return -ERANGE;
+
+ /* Single-queue destruction requires quiescence on all queues */
+ gve_turndown(priv);
+
+ /* This failure will trigger a reset - no need to clean up */
+ err = gve_adminq_destroy_single_rx_queue(priv, idx);
+ if (err)
+ return err;
+
+ if (gve_is_qpl(priv)) {
+ /* This failure will trigger a reset - no need to clean up */
+ err = gve_unregister_qpl(priv, gve_rx_get_qpl(priv, idx));
+ if (err)
+ return err;
+ }
+
+ gve_rx_stop_ring(priv, idx);
+
+ /* Turn the unstopped queues back up */
+ gve_turnup_and_check_status(priv);
+
+ gve_per_q_mem = (struct gve_rx_ring *)per_q_mem;
+ *gve_per_q_mem = priv->rx[idx];
+ memset(&priv->rx[idx], 0, sizeof(priv->rx[idx]));
+ return 0;
+}
+
+static void gve_rx_queue_mem_free(struct net_device *dev, void *per_q_mem)
+{
+ struct gve_priv *priv = netdev_priv(dev);
+ struct gve_rx_alloc_rings_cfg cfg = {0};
+ struct gve_rx_ring *gve_per_q_mem;
+
+ gve_per_q_mem = (struct gve_rx_ring *)per_q_mem;
+ gve_rx_get_curr_alloc_cfg(priv, &cfg);
+
+ if (gve_is_gqi(priv))
+ gve_rx_free_ring_gqi(priv, gve_per_q_mem, &cfg);
+ else
+ gve_rx_free_ring_dqo(priv, gve_per_q_mem, &cfg);
+}
+
+static int gve_rx_queue_mem_alloc(struct net_device *dev, void *per_q_mem,
+ int idx)
+{
+ struct gve_priv *priv = netdev_priv(dev);
+ struct gve_rx_alloc_rings_cfg cfg = {0};
+ struct gve_rx_ring *gve_per_q_mem;
+ int err;
+
+ if (!priv->rx)
+ return -EAGAIN;
+
+ gve_per_q_mem = (struct gve_rx_ring *)per_q_mem;
+ gve_rx_get_curr_alloc_cfg(priv, &cfg);
+
+ if (gve_is_gqi(priv))
+ err = gve_rx_alloc_ring_gqi(priv, &cfg, gve_per_q_mem, idx);
+ else
+ err = gve_rx_alloc_ring_dqo(priv, &cfg, gve_per_q_mem, idx);
+
+ return err;
+}
+
+static int gve_rx_queue_start(struct net_device *dev, void *per_q_mem, int idx)
+{
+ struct gve_priv *priv = netdev_priv(dev);
+ struct gve_rx_ring *gve_per_q_mem;
+ int err;
+
+ if (!priv->rx)
+ return -EAGAIN;
+
+ gve_per_q_mem = (struct gve_rx_ring *)per_q_mem;
+ priv->rx[idx] = *gve_per_q_mem;
+
+ /* Single-queue creation requires quiescence on all queues */
+ gve_turndown(priv);
+
+ gve_rx_start_ring(priv, idx);
+
+ if (gve_is_qpl(priv)) {
+ /* This failure will trigger a reset - no need to clean up */
+ err = gve_register_qpl(priv, gve_rx_get_qpl(priv, idx));
+ if (err)
+ goto abort;
+ }
+
+ /* This failure will trigger a reset - no need to clean up */
+ err = gve_adminq_create_single_rx_queue(priv, idx);
+ if (err)
+ goto abort;
+
+ if (gve_is_gqi(priv))
+ gve_rx_write_doorbell(priv, &priv->rx[idx]);
+ else
+ gve_rx_post_buffers_dqo(&priv->rx[idx]);
+
+ /* Turn the unstopped queues back up */
+ gve_turnup_and_check_status(priv);
+ return 0;
+
+abort:
+ gve_rx_stop_ring(priv, idx);
+
+ /* All failures in this func result in a reset, by clearing the struct
+ * at idx, we prevent a double free when that reset runs. The reset,
+ * which needs the rtnl lock, will not run till this func returns and
+ * its caller gives up the lock.
+ */
+ memset(&priv->rx[idx], 0, sizeof(priv->rx[idx]));
+ return err;
+}
+
+static const struct netdev_queue_mgmt_ops gve_queue_mgmt_ops = {
+ .ndo_queue_mem_size = sizeof(struct gve_rx_ring),
+ .ndo_queue_mem_alloc = gve_rx_queue_mem_alloc,
+ .ndo_queue_mem_free = gve_rx_queue_mem_free,
+ .ndo_queue_start = gve_rx_queue_start,
+ .ndo_queue_stop = gve_rx_queue_stop,
+};
+
static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int max_tx_queues, max_rx_queues;
@@ -2527,6 +2529,7 @@ static int gve_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, dev);
dev->ethtool_ops = &gve_ethtool_ops;
dev->netdev_ops = &gve_netdev_ops;
+ dev->queue_mgmt_ops = &gve_queue_mgmt_ops;
/* Set default and supported features.
*
@@ -2628,7 +2631,7 @@ static void gve_shutdown(struct pci_dev *pdev)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct gve_priv *priv = netdev_priv(netdev);
- bool was_up = netif_carrier_ok(priv->dev);
+ bool was_up = netif_running(priv->dev);
rtnl_lock();
if (was_up && gve_close(priv->dev)) {
@@ -2646,7 +2649,7 @@ static int gve_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct net_device *netdev = pci_get_drvdata(pdev);
struct gve_priv *priv = netdev_priv(netdev);
- bool was_up = netif_carrier_ok(priv->dev);
+ bool was_up = netif_running(priv->dev);
priv->suspend_cnt++;
rtnl_lock();
diff --git a/drivers/net/ethernet/google/gve/gve_rx.c b/drivers/net/ethernet/google/gve/gve_rx.c
index 20f5a9e7fa..acb73d4d0d 100644
--- a/drivers/net/ethernet/google/gve/gve_rx.c
+++ b/drivers/net/ethernet/google/gve/gve_rx.c
@@ -30,6 +30,9 @@ static void gve_rx_unfill_pages(struct gve_priv *priv,
u32 slots = rx->mask + 1;
int i;
+ if (!rx->data.page_info)
+ return;
+
if (rx->data.raw_addressing) {
for (i = 0; i < slots; i++)
gve_rx_free_buffer(&priv->pdev->dev, &rx->data.page_info[i],
@@ -38,8 +41,6 @@ static void gve_rx_unfill_pages(struct gve_priv *priv,
for (i = 0; i < slots; i++)
page_ref_sub(rx->data.page_info[i].page,
rx->data.page_info[i].pagecnt_bias - 1);
- gve_unassign_qpl(cfg->qpl_cfg, rx->data.qpl->id);
- rx->data.qpl = NULL;
for (i = 0; i < rx->qpl_copy_pool_mask + 1; i++) {
page_ref_sub(rx->qpl_copy_pool[i].page,
@@ -51,6 +52,41 @@ static void gve_rx_unfill_pages(struct gve_priv *priv,
rx->data.page_info = NULL;
}
+static void gve_rx_ctx_clear(struct gve_rx_ctx *ctx)
+{
+ ctx->skb_head = NULL;
+ ctx->skb_tail = NULL;
+ ctx->total_size = 0;
+ ctx->frag_cnt = 0;
+ ctx->drop_pkt = false;
+}
+
+static void gve_rx_init_ring_state_gqi(struct gve_rx_ring *rx)
+{
+ rx->desc.seqno = 1;
+ rx->cnt = 0;
+ gve_rx_ctx_clear(&rx->ctx);
+}
+
+static void gve_rx_reset_ring_gqi(struct gve_priv *priv, int idx)
+{
+ struct gve_rx_ring *rx = &priv->rx[idx];
+ const u32 slots = priv->rx_desc_cnt;
+ size_t size;
+
+ /* Reset desc ring */
+ if (rx->desc.desc_ring) {
+ size = slots * sizeof(rx->desc.desc_ring[0]);
+ memset(rx->desc.desc_ring, 0, size);
+ }
+
+ /* Reset q_resources */
+ if (rx->q_resources)
+ memset(rx->q_resources, 0, sizeof(*rx->q_resources));
+
+ gve_rx_init_ring_state_gqi(rx);
+}
+
void gve_rx_stop_ring_gqi(struct gve_priv *priv, int idx)
{
int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
@@ -60,34 +96,48 @@ void gve_rx_stop_ring_gqi(struct gve_priv *priv, int idx)
gve_remove_napi(priv, ntfy_idx);
gve_rx_remove_from_block(priv, idx);
+ gve_rx_reset_ring_gqi(priv, idx);
}
-static void gve_rx_free_ring_gqi(struct gve_priv *priv, struct gve_rx_ring *rx,
- struct gve_rx_alloc_rings_cfg *cfg)
+void gve_rx_free_ring_gqi(struct gve_priv *priv, struct gve_rx_ring *rx,
+ struct gve_rx_alloc_rings_cfg *cfg)
{
struct device *dev = &priv->pdev->dev;
u32 slots = rx->mask + 1;
int idx = rx->q_num;
size_t bytes;
+ u32 qpl_id;
- bytes = sizeof(struct gve_rx_desc) * cfg->ring_size;
- dma_free_coherent(dev, bytes, rx->desc.desc_ring, rx->desc.bus);
- rx->desc.desc_ring = NULL;
+ if (rx->desc.desc_ring) {
+ bytes = sizeof(struct gve_rx_desc) * cfg->ring_size;
+ dma_free_coherent(dev, bytes, rx->desc.desc_ring, rx->desc.bus);
+ rx->desc.desc_ring = NULL;
+ }
- dma_free_coherent(dev, sizeof(*rx->q_resources),
- rx->q_resources, rx->q_resources_bus);
- rx->q_resources = NULL;
+ if (rx->q_resources) {
+ dma_free_coherent(dev, sizeof(*rx->q_resources),
+ rx->q_resources, rx->q_resources_bus);
+ rx->q_resources = NULL;
+ }
gve_rx_unfill_pages(priv, rx, cfg);
- bytes = sizeof(*rx->data.data_ring) * slots;
- dma_free_coherent(dev, bytes, rx->data.data_ring,
- rx->data.data_bus);
- rx->data.data_ring = NULL;
+ if (rx->data.data_ring) {
+ bytes = sizeof(*rx->data.data_ring) * slots;
+ dma_free_coherent(dev, bytes, rx->data.data_ring,
+ rx->data.data_bus);
+ rx->data.data_ring = NULL;
+ }
kvfree(rx->qpl_copy_pool);
rx->qpl_copy_pool = NULL;
+ if (rx->data.qpl) {
+ qpl_id = gve_get_rx_qpl_id(cfg->qcfg_tx, idx);
+ gve_free_queue_page_list(priv, rx->data.qpl, qpl_id);
+ rx->data.qpl = NULL;
+ }
+
netif_dbg(priv, drv, priv->dev, "freed rx ring %d\n", idx);
}
@@ -144,14 +194,6 @@ static int gve_rx_prefill_pages(struct gve_rx_ring *rx,
if (!rx->data.page_info)
return -ENOMEM;
- if (!rx->data.raw_addressing) {
- rx->data.qpl = gve_assign_rx_qpl(cfg, rx->q_num);
- if (!rx->data.qpl) {
- kvfree(rx->data.page_info);
- rx->data.page_info = NULL;
- return -ENOMEM;
- }
- }
for (i = 0; i < slots; i++) {
if (!rx->data.raw_addressing) {
struct page *page = rx->data.qpl->pages[i];
@@ -204,9 +246,6 @@ alloc_err_qpl:
page_ref_sub(rx->data.page_info[i].page,
rx->data.page_info[i].pagecnt_bias - 1);
- gve_unassign_qpl(cfg->qpl_cfg, rx->data.qpl->id);
- rx->data.qpl = NULL;
-
return err;
alloc_err_rda:
@@ -217,15 +256,6 @@ alloc_err_rda:
return err;
}
-static void gve_rx_ctx_clear(struct gve_rx_ctx *ctx)
-{
- ctx->skb_head = NULL;
- ctx->skb_tail = NULL;
- ctx->total_size = 0;
- ctx->frag_cnt = 0;
- ctx->drop_pkt = false;
-}
-
void gve_rx_start_ring_gqi(struct gve_priv *priv, int idx)
{
int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
@@ -234,14 +264,16 @@ void gve_rx_start_ring_gqi(struct gve_priv *priv, int idx)
gve_add_napi(priv, ntfy_idx, gve_napi_poll);
}
-static int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
- struct gve_rx_alloc_rings_cfg *cfg,
- struct gve_rx_ring *rx,
- int idx)
+int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
+ struct gve_rx_alloc_rings_cfg *cfg,
+ struct gve_rx_ring *rx,
+ int idx)
{
struct device *hdev = &priv->pdev->dev;
- u32 slots = priv->rx_data_slot_cnt;
+ u32 slots = cfg->ring_size;
int filled_pages;
+ int qpl_page_cnt;
+ u32 qpl_id = 0;
size_t bytes;
int err;
@@ -274,10 +306,22 @@ static int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
goto abort_with_slots;
}
+ if (!rx->data.raw_addressing) {
+ qpl_id = gve_get_rx_qpl_id(cfg->qcfg_tx, rx->q_num);
+ qpl_page_cnt = cfg->ring_size;
+
+ rx->data.qpl = gve_alloc_queue_page_list(priv, qpl_id,
+ qpl_page_cnt);
+ if (!rx->data.qpl) {
+ err = -ENOMEM;
+ goto abort_with_copy_pool;
+ }
+ }
+
filled_pages = gve_rx_prefill_pages(rx, cfg);
if (filled_pages < 0) {
err = -ENOMEM;
- goto abort_with_copy_pool;
+ goto abort_with_qpl;
}
rx->fill_cnt = filled_pages;
/* Ensure data ring slots (packet buffers) are visible. */
@@ -304,9 +348,8 @@ static int gve_rx_alloc_ring_gqi(struct gve_priv *priv,
err = -ENOMEM;
goto abort_with_q_resources;
}
- rx->cnt = 0;
rx->db_threshold = slots / 2;
- rx->desc.seqno = 1;
+ gve_rx_init_ring_state_gqi(rx);
rx->packet_buffer_size = GVE_DEFAULT_RX_BUFFER_SIZE;
gve_rx_ctx_clear(&rx->ctx);
@@ -319,6 +362,11 @@ abort_with_q_resources:
rx->q_resources = NULL;
abort_filled:
gve_rx_unfill_pages(priv, rx, cfg);
+abort_with_qpl:
+ if (!rx->data.raw_addressing) {
+ gve_free_queue_page_list(priv, rx->data.qpl, qpl_id);
+ rx->data.qpl = NULL;
+ }
abort_with_copy_pool:
kvfree(rx->qpl_copy_pool);
rx->qpl_copy_pool = NULL;
@@ -337,12 +385,6 @@ int gve_rx_alloc_rings_gqi(struct gve_priv *priv,
int err = 0;
int i, j;
- if (!cfg->raw_addressing && !cfg->qpls) {
- netif_err(priv, drv, priv->dev,
- "Cannot alloc QPL ring before allocing QPLs\n");
- return -EINVAL;
- }
-
rx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_rx_ring),
GFP_KERNEL);
if (!rx)
diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
index 8e8071308a..1154c1d8f6 100644
--- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c
@@ -178,7 +178,7 @@ static int gve_alloc_page_dqo(struct gve_rx_ring *rx,
return err;
} else {
idx = rx->dqo.next_qpl_page_idx;
- if (idx >= priv->rx_pages_per_qpl) {
+ if (idx >= gve_get_rx_pages_per_qpl_dqo(priv->rx_desc_cnt)) {
net_err_ratelimited("%s: Out of QPL pages\n",
priv->dev->name);
return -ENOMEM;
@@ -211,6 +211,82 @@ static void gve_rx_free_hdr_bufs(struct gve_priv *priv, struct gve_rx_ring *rx)
}
}
+static void gve_rx_init_ring_state_dqo(struct gve_rx_ring *rx,
+ const u32 buffer_queue_slots,
+ const u32 completion_queue_slots)
+{
+ int i;
+
+ /* Set buffer queue state */
+ rx->dqo.bufq.mask = buffer_queue_slots - 1;
+ rx->dqo.bufq.head = 0;
+ rx->dqo.bufq.tail = 0;
+
+ /* Set completion queue state */
+ rx->dqo.complq.num_free_slots = completion_queue_slots;
+ rx->dqo.complq.mask = completion_queue_slots - 1;
+ rx->dqo.complq.cur_gen_bit = 0;
+ rx->dqo.complq.head = 0;
+
+ /* Set RX SKB context */
+ rx->ctx.skb_head = NULL;
+ rx->ctx.skb_tail = NULL;
+
+ /* Set up linked list of buffer IDs */
+ if (rx->dqo.buf_states) {
+ for (i = 0; i < rx->dqo.num_buf_states - 1; i++)
+ rx->dqo.buf_states[i].next = i + 1;
+ rx->dqo.buf_states[rx->dqo.num_buf_states - 1].next = -1;
+ }
+
+ rx->dqo.free_buf_states = 0;
+ rx->dqo.recycled_buf_states.head = -1;
+ rx->dqo.recycled_buf_states.tail = -1;
+ rx->dqo.used_buf_states.head = -1;
+ rx->dqo.used_buf_states.tail = -1;
+}
+
+static void gve_rx_reset_ring_dqo(struct gve_priv *priv, int idx)
+{
+ struct gve_rx_ring *rx = &priv->rx[idx];
+ size_t size;
+ int i;
+
+ const u32 buffer_queue_slots = priv->rx_desc_cnt;
+ const u32 completion_queue_slots = priv->rx_desc_cnt;
+
+ /* Reset buffer queue */
+ if (rx->dqo.bufq.desc_ring) {
+ size = sizeof(rx->dqo.bufq.desc_ring[0]) *
+ buffer_queue_slots;
+ memset(rx->dqo.bufq.desc_ring, 0, size);
+ }
+
+ /* Reset completion queue */
+ if (rx->dqo.complq.desc_ring) {
+ size = sizeof(rx->dqo.complq.desc_ring[0]) *
+ completion_queue_slots;
+ memset(rx->dqo.complq.desc_ring, 0, size);
+ }
+
+ /* Reset q_resources */
+ if (rx->q_resources)
+ memset(rx->q_resources, 0, sizeof(*rx->q_resources));
+
+ /* Reset buf states */
+ if (rx->dqo.buf_states) {
+ for (i = 0; i < rx->dqo.num_buf_states; i++) {
+ struct gve_rx_buf_state_dqo *bs = &rx->dqo.buf_states[i];
+
+ if (bs->page_info.page)
+ gve_free_page_dqo(priv, bs, !rx->dqo.qpl);
+ }
+ }
+
+ gve_rx_init_ring_state_dqo(rx, buffer_queue_slots,
+ completion_queue_slots);
+}
+
void gve_rx_stop_ring_dqo(struct gve_priv *priv, int idx)
{
int ntfy_idx = gve_rx_idx_to_ntfy(priv, idx);
@@ -220,16 +296,18 @@ void gve_rx_stop_ring_dqo(struct gve_priv *priv, int idx)
gve_remove_napi(priv, ntfy_idx);
gve_rx_remove_from_block(priv, idx);
+ gve_rx_reset_ring_dqo(priv, idx);
}
-static void gve_rx_free_ring_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
- struct gve_rx_alloc_rings_cfg *cfg)
+void gve_rx_free_ring_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
+ struct gve_rx_alloc_rings_cfg *cfg)
{
struct device *hdev = &priv->pdev->dev;
size_t completion_queue_slots;
size_t buffer_queue_slots;
int idx = rx->q_num;
size_t size;
+ u32 qpl_id;
int i;
completion_queue_slots = rx->dqo.complq.mask + 1;
@@ -247,8 +325,10 @@ static void gve_rx_free_ring_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
if (bs->page_info.page)
gve_free_page_dqo(priv, bs, !rx->dqo.qpl);
}
+
if (rx->dqo.qpl) {
- gve_unassign_qpl(cfg->qpl_cfg, rx->dqo.qpl->id);
+ qpl_id = gve_get_rx_qpl_id(cfg->qcfg_tx, rx->q_num);
+ gve_free_queue_page_list(priv, rx->dqo.qpl, qpl_id);
rx->dqo.qpl = NULL;
}
@@ -275,10 +355,10 @@ static void gve_rx_free_ring_dqo(struct gve_priv *priv, struct gve_rx_ring *rx,
netif_dbg(priv, drv, priv->dev, "freed rx ring %d\n", idx);
}
-static int gve_rx_alloc_hdr_bufs(struct gve_priv *priv, struct gve_rx_ring *rx)
+static int gve_rx_alloc_hdr_bufs(struct gve_priv *priv, struct gve_rx_ring *rx,
+ const u32 buf_count)
{
struct device *hdev = &priv->pdev->dev;
- int buf_count = rx->dqo.bufq.mask + 1;
rx->dqo.hdr_bufs.data = dma_alloc_coherent(hdev, priv->header_buf_size * buf_count,
&rx->dqo.hdr_bufs.addr, GFP_KERNEL);
@@ -296,17 +376,17 @@ void gve_rx_start_ring_dqo(struct gve_priv *priv, int idx)
gve_add_napi(priv, ntfy_idx, gve_napi_poll_dqo);
}
-static int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
- struct gve_rx_alloc_rings_cfg *cfg,
- struct gve_rx_ring *rx,
- int idx)
+int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
+ struct gve_rx_alloc_rings_cfg *cfg,
+ struct gve_rx_ring *rx,
+ int idx)
{
struct device *hdev = &priv->pdev->dev;
+ int qpl_page_cnt;
size_t size;
- int i;
+ u32 qpl_id;
- const u32 buffer_queue_slots = cfg->raw_addressing ?
- priv->options_dqo_rda.rx_buff_ring_entries : cfg->ring_size;
+ const u32 buffer_queue_slots = cfg->ring_size;
const u32 completion_queue_slots = cfg->ring_size;
netif_dbg(priv, drv, priv->dev, "allocating rx ring DQO\n");
@@ -314,15 +394,10 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
memset(rx, 0, sizeof(*rx));
rx->gve = priv;
rx->q_num = idx;
- rx->dqo.bufq.mask = buffer_queue_slots - 1;
- rx->dqo.complq.num_free_slots = completion_queue_slots;
- rx->dqo.complq.mask = completion_queue_slots - 1;
- rx->ctx.skb_head = NULL;
- rx->ctx.skb_tail = NULL;
rx->dqo.num_buf_states = cfg->raw_addressing ?
min_t(s16, S16_MAX, buffer_queue_slots * 4) :
- priv->rx_pages_per_qpl;
+ gve_get_rx_pages_per_qpl_dqo(cfg->ring_size);
rx->dqo.buf_states = kvcalloc(rx->dqo.num_buf_states,
sizeof(rx->dqo.buf_states[0]),
GFP_KERNEL);
@@ -331,19 +406,9 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
/* Allocate header buffers for header-split */
if (cfg->enable_header_split)
- if (gve_rx_alloc_hdr_bufs(priv, rx))
+ if (gve_rx_alloc_hdr_bufs(priv, rx, buffer_queue_slots))
goto err;
- /* Set up linked list of buffer IDs */
- for (i = 0; i < rx->dqo.num_buf_states - 1; i++)
- rx->dqo.buf_states[i].next = i + 1;
-
- rx->dqo.buf_states[rx->dqo.num_buf_states - 1].next = -1;
- rx->dqo.recycled_buf_states.head = -1;
- rx->dqo.recycled_buf_states.tail = -1;
- rx->dqo.used_buf_states.head = -1;
- rx->dqo.used_buf_states.tail = -1;
-
/* Allocate RX completion queue */
size = sizeof(rx->dqo.complq.desc_ring[0]) *
completion_queue_slots;
@@ -360,7 +425,11 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
goto err;
if (!cfg->raw_addressing) {
- rx->dqo.qpl = gve_assign_rx_qpl(cfg, rx->q_num);
+ qpl_id = gve_get_rx_qpl_id(cfg->qcfg_tx, rx->q_num);
+ qpl_page_cnt = gve_get_rx_pages_per_qpl_dqo(cfg->ring_size);
+
+ rx->dqo.qpl = gve_alloc_queue_page_list(priv, qpl_id,
+ qpl_page_cnt);
if (!rx->dqo.qpl)
goto err;
rx->dqo.next_qpl_page_idx = 0;
@@ -371,6 +440,9 @@ static int gve_rx_alloc_ring_dqo(struct gve_priv *priv,
if (!rx->q_resources)
goto err;
+ gve_rx_init_ring_state_dqo(rx, buffer_queue_slots,
+ completion_queue_slots);
+
return 0;
err:
@@ -393,12 +465,6 @@ int gve_rx_alloc_rings_dqo(struct gve_priv *priv,
int err;
int i;
- if (!cfg->raw_addressing && !cfg->qpls) {
- netif_err(priv, drv, priv->dev,
- "Cannot alloc QPL ring before allocing QPLs\n");
- return -EINVAL;
- }
-
rx = kvcalloc(cfg->qcfg->max_queues, sizeof(struct gve_rx_ring),
GFP_KERNEL);
if (!rx)
@@ -581,11 +647,13 @@ static void gve_rx_skb_hash(struct sk_buff *skb,
skb_set_hash(skb, le32_to_cpu(compl_desc->hash), hash_type);
}
-static void gve_rx_free_skb(struct gve_rx_ring *rx)
+static void gve_rx_free_skb(struct napi_struct *napi, struct gve_rx_ring *rx)
{
if (!rx->ctx.skb_head)
return;
+ if (rx->ctx.skb_head == napi->skb)
+ napi->skb = NULL;
dev_kfree_skb_any(rx->ctx.skb_head);
rx->ctx.skb_head = NULL;
rx->ctx.skb_tail = NULL;
@@ -884,7 +952,7 @@ int gve_rx_poll_dqo(struct gve_notify_block *block, int budget)
err = gve_rx_dqo(napi, rx, compl_desc, complq->head, rx->q_num);
if (err < 0) {
- gve_rx_free_skb(rx);
+ gve_rx_free_skb(napi, rx);
u64_stats_update_begin(&rx->statss);
if (err == -ENOMEM)
rx->rx_skb_alloc_fail++;
@@ -927,7 +995,7 @@ int gve_rx_poll_dqo(struct gve_notify_block *block, int budget)
/* gve_rx_complete_skb() will consume skb if successful */
if (gve_rx_complete_skb(rx, napi, compl_desc, feat) != 0) {
- gve_rx_free_skb(rx);
+ gve_rx_free_skb(napi, rx);
u64_stats_update_begin(&rx->statss);
rx->rx_desc_err_dropped_pkt++;
u64_stats_update_end(&rx->statss);
diff --git a/drivers/net/ethernet/google/gve/gve_tx.c b/drivers/net/ethernet/google/gve/gve_tx.c
index 4b9853adc1..e7fb7d6d28 100644
--- a/drivers/net/ethernet/google/gve/gve_tx.c
+++ b/drivers/net/ethernet/google/gve/gve_tx.c
@@ -158,15 +158,16 @@ static int gve_clean_xdp_done(struct gve_priv *priv, struct gve_tx_ring *tx,
u32 to_do)
{
struct gve_tx_buffer_state *info;
- u32 clean_end = tx->done + to_do;
u64 pkts = 0, bytes = 0;
size_t space_freed = 0;
u32 xsk_complete = 0;
u32 idx;
+ int i;
- for (; tx->done < clean_end; tx->done++) {
+ for (i = 0; i < to_do; i++) {
idx = tx->done & tx->mask;
info = &tx->info[idx];
+ tx->done++;
if (unlikely(!info->xdp.size))
continue;
@@ -216,6 +217,7 @@ static void gve_tx_free_ring_gqi(struct gve_priv *priv, struct gve_tx_ring *tx,
struct device *hdev = &priv->pdev->dev;
int idx = tx->q_num;
size_t bytes;
+ u32 qpl_id;
u32 slots;
slots = tx->mask + 1;
@@ -223,9 +225,12 @@ static void gve_tx_free_ring_gqi(struct gve_priv *priv, struct gve_tx_ring *tx,
tx->q_resources, tx->q_resources_bus);
tx->q_resources = NULL;
- if (!tx->raw_addressing) {
- gve_tx_fifo_release(priv, &tx->tx_fifo);
- gve_unassign_qpl(cfg->qpl_cfg, tx->tx_fifo.qpl->id);
+ if (tx->tx_fifo.qpl) {
+ if (tx->tx_fifo.base)
+ gve_tx_fifo_release(priv, &tx->tx_fifo);
+
+ qpl_id = gve_tx_qpl_id(priv, tx->q_num);
+ gve_free_queue_page_list(priv, tx->tx_fifo.qpl, qpl_id);
tx->tx_fifo.qpl = NULL;
}
@@ -256,6 +261,8 @@ static int gve_tx_alloc_ring_gqi(struct gve_priv *priv,
int idx)
{
struct device *hdev = &priv->pdev->dev;
+ int qpl_page_cnt;
+ u32 qpl_id = 0;
size_t bytes;
/* Make sure everything is zeroed to start */
@@ -280,9 +287,14 @@ static int gve_tx_alloc_ring_gqi(struct gve_priv *priv,
tx->raw_addressing = cfg->raw_addressing;
tx->dev = hdev;
if (!tx->raw_addressing) {
- tx->tx_fifo.qpl = gve_assign_tx_qpl(cfg, idx);
+ qpl_id = gve_tx_qpl_id(priv, tx->q_num);
+ qpl_page_cnt = priv->tx_pages_per_qpl;
+
+ tx->tx_fifo.qpl = gve_alloc_queue_page_list(priv, qpl_id,
+ qpl_page_cnt);
if (!tx->tx_fifo.qpl)
goto abort_with_desc;
+
/* map Tx FIFO */
if (gve_tx_fifo_init(priv, &tx->tx_fifo))
goto abort_with_qpl;
@@ -302,8 +314,10 @@ abort_with_fifo:
if (!tx->raw_addressing)
gve_tx_fifo_release(priv, &tx->tx_fifo);
abort_with_qpl:
- if (!tx->raw_addressing)
- gve_unassign_qpl(cfg->qpl_cfg, tx->tx_fifo.qpl->id);
+ if (!tx->raw_addressing) {
+ gve_free_queue_page_list(priv, tx->tx_fifo.qpl, qpl_id);
+ tx->tx_fifo.qpl = NULL;
+ }
abort_with_desc:
dma_free_coherent(hdev, bytes, tx->desc, tx->bus);
tx->desc = NULL;
@@ -320,12 +334,6 @@ int gve_tx_alloc_rings_gqi(struct gve_priv *priv,
int err = 0;
int i, j;
- if (!cfg->raw_addressing && !cfg->qpls) {
- netif_err(priv, drv, priv->dev,
- "Cannot alloc QPL ring before allocing QPLs\n");
- return -EINVAL;
- }
-
if (cfg->start_idx + cfg->num_rings > cfg->qcfg->max_queues) {
netif_err(priv, drv, priv->dev,
"Cannot alloc more than the max num of Tx rings\n");
diff --git a/drivers/net/ethernet/google/gve/gve_tx_dqo.c b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
index bc34b6cd3a..f879426cb5 100644
--- a/drivers/net/ethernet/google/gve/gve_tx_dqo.c
+++ b/drivers/net/ethernet/google/gve/gve_tx_dqo.c
@@ -209,6 +209,7 @@ static void gve_tx_free_ring_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
struct device *hdev = &priv->pdev->dev;
int idx = tx->q_num;
size_t bytes;
+ u32 qpl_id;
if (tx->q_resources) {
dma_free_coherent(hdev, sizeof(*tx->q_resources),
@@ -237,7 +238,8 @@ static void gve_tx_free_ring_dqo(struct gve_priv *priv, struct gve_tx_ring *tx,
tx->dqo.tx_qpl_buf_next = NULL;
if (tx->dqo.qpl) {
- gve_unassign_qpl(cfg->qpl_cfg, tx->dqo.qpl->id);
+ qpl_id = gve_tx_qpl_id(priv, tx->q_num);
+ gve_free_queue_page_list(priv, tx->dqo.qpl, qpl_id);
tx->dqo.qpl = NULL;
}
@@ -285,7 +287,9 @@ static int gve_tx_alloc_ring_dqo(struct gve_priv *priv,
{
struct device *hdev = &priv->pdev->dev;
int num_pending_packets;
+ int qpl_page_cnt;
size_t bytes;
+ u32 qpl_id;
int i;
memset(tx, 0, sizeof(*tx));
@@ -295,9 +299,7 @@ static int gve_tx_alloc_ring_dqo(struct gve_priv *priv,
/* Queue sizes must be a power of 2 */
tx->mask = cfg->ring_size - 1;
- tx->dqo.complq_mask = priv->queue_format == GVE_DQO_RDA_FORMAT ?
- priv->options_dqo_rda.tx_comp_ring_entries - 1 :
- tx->mask;
+ tx->dqo.complq_mask = tx->mask;
/* The max number of pending packets determines the maximum number of
* descriptors which maybe written to the completion queue.
@@ -354,7 +356,11 @@ static int gve_tx_alloc_ring_dqo(struct gve_priv *priv,
goto err;
if (!cfg->raw_addressing) {
- tx->dqo.qpl = gve_assign_tx_qpl(cfg, idx);
+ qpl_id = gve_tx_qpl_id(priv, tx->q_num);
+ qpl_page_cnt = priv->tx_pages_per_qpl;
+
+ tx->dqo.qpl = gve_alloc_queue_page_list(priv, qpl_id,
+ qpl_page_cnt);
if (!tx->dqo.qpl)
goto err;
@@ -376,12 +382,6 @@ int gve_tx_alloc_rings_dqo(struct gve_priv *priv,
int err = 0;
int i, j;
- if (!cfg->raw_addressing && !cfg->qpls) {
- netif_err(priv, drv, priv->dev,
- "Cannot alloc QPL ring before allocing QPLs\n");
- return -EINVAL;
- }
-
if (cfg->start_idx + cfg->num_rings > cfg->qcfg->max_queues) {
netif_err(priv, drv, priv->dev,
"Cannot alloc more than the max num of Tx rings\n");
@@ -555,28 +555,18 @@ static int gve_prep_tso(struct sk_buff *skb)
if (unlikely(skb_shinfo(skb)->gso_size < GVE_TX_MIN_TSO_MSS_DQO))
return -1;
+ if (!(skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
+ return -EINVAL;
+
/* Needed because we will modify header. */
err = skb_cow_head(skb, 0);
if (err < 0)
return err;
tcp = tcp_hdr(skb);
-
- /* Remove payload length from checksum. */
paylen = skb->len - skb_transport_offset(skb);
-
- switch (skb_shinfo(skb)->gso_type) {
- case SKB_GSO_TCPV4:
- case SKB_GSO_TCPV6:
- csum_replace_by_diff(&tcp->check,
- (__force __wsum)htonl(paylen));
-
- /* Compute length of segmentation header. */
- header_len = skb_tcp_all_headers(skb);
- break;
- default:
- return -EINVAL;
- }
+ csum_replace_by_diff(&tcp->check, (__force __wsum)htonl(paylen));
+ header_len = skb_tcp_all_headers(skb);
if (unlikely(header_len > GVE_TX_MAX_HDR_SIZE_DQO))
return -EINVAL;
@@ -876,22 +866,42 @@ static bool gve_can_send_tso(const struct sk_buff *skb)
const int header_len = skb_tcp_all_headers(skb);
const int gso_size = shinfo->gso_size;
int cur_seg_num_bufs;
+ int prev_frag_size;
int cur_seg_size;
int i;
cur_seg_size = skb_headlen(skb) - header_len;
+ prev_frag_size = skb_headlen(skb);
cur_seg_num_bufs = cur_seg_size > 0;
for (i = 0; i < shinfo->nr_frags; i++) {
if (cur_seg_size >= gso_size) {
cur_seg_size %= gso_size;
cur_seg_num_bufs = cur_seg_size > 0;
+
+ if (prev_frag_size > GVE_TX_MAX_BUF_SIZE_DQO) {
+ int prev_frag_remain = prev_frag_size %
+ GVE_TX_MAX_BUF_SIZE_DQO;
+
+ /* If the last descriptor of the previous frag
+ * is less than cur_seg_size, the segment will
+ * span two descriptors in the previous frag.
+ * Since max gso size (9728) is less than
+ * GVE_TX_MAX_BUF_SIZE_DQO, it is impossible
+ * for the segment to span more than two
+ * descriptors.
+ */
+ if (prev_frag_remain &&
+ cur_seg_size > prev_frag_remain)
+ cur_seg_num_bufs++;
+ }
}
if (unlikely(++cur_seg_num_bufs > max_bufs_per_seg))
return false;
- cur_seg_size += skb_frag_size(&shinfo->frags[i]);
+ prev_frag_size = skb_frag_size(&shinfo->frags[i]);
+ cur_seg_size += prev_frag_size;
}
return true;