summaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/intel/idpf
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-08-07 13:11:22 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-08-07 13:11:22 +0000
commitb20732900e4636a467c0183a47f7396700f5f743 (patch)
tree42f079ff82e701ebcb76829974b4caca3e5b6798 /drivers/net/ethernet/intel/idpf
parentAdding upstream version 6.8.12. (diff)
downloadlinux-b20732900e4636a467c0183a47f7396700f5f743.tar.xz
linux-b20732900e4636a467c0183a47f7396700f5f743.zip
Adding upstream version 6.9.7.upstream/6.9.7
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/net/ethernet/intel/idpf')
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf.h146
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_controlq.c7
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_controlq_api.h5
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_dev.c1
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_ethtool.c21
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_lib.c40
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_main.c6
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.c13
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_txrx.h1
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_vf_dev.c3
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.c2278
-rw-r--r--drivers/net/ethernet/intel/idpf/idpf_virtchnl.h70
12 files changed, 1197 insertions, 1394 deletions
diff --git a/drivers/net/ethernet/intel/idpf/idpf.h b/drivers/net/ethernet/intel/idpf/idpf.h
index 0acc125dec..e7a0365382 100644
--- a/drivers/net/ethernet/intel/idpf/idpf.h
+++ b/drivers/net/ethernet/intel/idpf/idpf.h
@@ -37,8 +37,6 @@ struct idpf_vport_max_q;
#define IDPF_MB_MAX_ERR 20
#define IDPF_NUM_CHUNKS_PER_MSG(struct_sz, chunk_sz) \
((IDPF_CTLQ_MAX_BUF_LEN - (struct_sz)) / (chunk_sz))
-#define IDPF_WAIT_FOR_EVENT_TIMEO_MIN 2000
-#define IDPF_WAIT_FOR_EVENT_TIMEO 60000
#define IDPF_MAX_WAIT 500
@@ -66,14 +64,12 @@ struct idpf_mac_filter {
/**
* enum idpf_state - State machine to handle bring up
- * @__IDPF_STARTUP: Start the state machine
* @__IDPF_VER_CHECK: Negotiate virtchnl version
* @__IDPF_GET_CAPS: Negotiate capabilities
* @__IDPF_INIT_SW: Init based on given capabilities
* @__IDPF_STATE_LAST: Must be last, used to determine size
*/
enum idpf_state {
- __IDPF_STARTUP,
__IDPF_VER_CHECK,
__IDPF_GET_CAPS,
__IDPF_INIT_SW,
@@ -87,6 +83,7 @@ enum idpf_state {
* @IDPF_HR_RESET_IN_PROG: Reset in progress
* @IDPF_REMOVE_IN_PROG: Driver remove in progress
* @IDPF_MB_INTR_MODE: Mailbox in interrupt mode
+ * @IDPF_VC_CORE_INIT: virtchnl core has been init
* @IDPF_FLAGS_NBITS: Must be last
*/
enum idpf_flags {
@@ -95,6 +92,7 @@ enum idpf_flags {
IDPF_HR_RESET_IN_PROG,
IDPF_REMOVE_IN_PROG,
IDPF_MB_INTR_MODE,
+ IDPF_VC_CORE_INIT,
IDPF_FLAGS_NBITS,
};
@@ -209,71 +207,6 @@ struct idpf_dev_ops {
struct idpf_reg_ops reg_ops;
};
-/* These macros allow us to generate an enum and a matching char * array of
- * stringified enums that are always in sync. Checkpatch issues a bogus warning
- * about this being a complex macro; but it's wrong, these are never used as a
- * statement and instead only used to define the enum and array.
- */
-#define IDPF_FOREACH_VPORT_VC_STATE(STATE) \
- STATE(IDPF_VC_CREATE_VPORT) \
- STATE(IDPF_VC_CREATE_VPORT_ERR) \
- STATE(IDPF_VC_ENA_VPORT) \
- STATE(IDPF_VC_ENA_VPORT_ERR) \
- STATE(IDPF_VC_DIS_VPORT) \
- STATE(IDPF_VC_DIS_VPORT_ERR) \
- STATE(IDPF_VC_DESTROY_VPORT) \
- STATE(IDPF_VC_DESTROY_VPORT_ERR) \
- STATE(IDPF_VC_CONFIG_TXQ) \
- STATE(IDPF_VC_CONFIG_TXQ_ERR) \
- STATE(IDPF_VC_CONFIG_RXQ) \
- STATE(IDPF_VC_CONFIG_RXQ_ERR) \
- STATE(IDPF_VC_ENA_QUEUES) \
- STATE(IDPF_VC_ENA_QUEUES_ERR) \
- STATE(IDPF_VC_DIS_QUEUES) \
- STATE(IDPF_VC_DIS_QUEUES_ERR) \
- STATE(IDPF_VC_MAP_IRQ) \
- STATE(IDPF_VC_MAP_IRQ_ERR) \
- STATE(IDPF_VC_UNMAP_IRQ) \
- STATE(IDPF_VC_UNMAP_IRQ_ERR) \
- STATE(IDPF_VC_ADD_QUEUES) \
- STATE(IDPF_VC_ADD_QUEUES_ERR) \
- STATE(IDPF_VC_DEL_QUEUES) \
- STATE(IDPF_VC_DEL_QUEUES_ERR) \
- STATE(IDPF_VC_ALLOC_VECTORS) \
- STATE(IDPF_VC_ALLOC_VECTORS_ERR) \
- STATE(IDPF_VC_DEALLOC_VECTORS) \
- STATE(IDPF_VC_DEALLOC_VECTORS_ERR) \
- STATE(IDPF_VC_SET_SRIOV_VFS) \
- STATE(IDPF_VC_SET_SRIOV_VFS_ERR) \
- STATE(IDPF_VC_GET_RSS_LUT) \
- STATE(IDPF_VC_GET_RSS_LUT_ERR) \
- STATE(IDPF_VC_SET_RSS_LUT) \
- STATE(IDPF_VC_SET_RSS_LUT_ERR) \
- STATE(IDPF_VC_GET_RSS_KEY) \
- STATE(IDPF_VC_GET_RSS_KEY_ERR) \
- STATE(IDPF_VC_SET_RSS_KEY) \
- STATE(IDPF_VC_SET_RSS_KEY_ERR) \
- STATE(IDPF_VC_GET_STATS) \
- STATE(IDPF_VC_GET_STATS_ERR) \
- STATE(IDPF_VC_ADD_MAC_ADDR) \
- STATE(IDPF_VC_ADD_MAC_ADDR_ERR) \
- STATE(IDPF_VC_DEL_MAC_ADDR) \
- STATE(IDPF_VC_DEL_MAC_ADDR_ERR) \
- STATE(IDPF_VC_GET_PTYPE_INFO) \
- STATE(IDPF_VC_GET_PTYPE_INFO_ERR) \
- STATE(IDPF_VC_LOOPBACK_STATE) \
- STATE(IDPF_VC_LOOPBACK_STATE_ERR) \
- STATE(IDPF_VC_NBITS)
-
-#define IDPF_GEN_ENUM(ENUM) ENUM,
-#define IDPF_GEN_STRING(STRING) #STRING,
-
-enum idpf_vport_vc_state {
- IDPF_FOREACH_VPORT_VC_STATE(IDPF_GEN_ENUM)
-};
-
-extern const char * const idpf_vport_vc_state_str[];
-
/**
* enum idpf_vport_reset_cause - Vport soft reset causes
* @IDPF_SR_Q_CHANGE: Soft reset queue change
@@ -358,11 +291,7 @@ struct idpf_port_stats {
* @port_stats: per port csum, header split, and other offload stats
* @link_up: True if link is up
* @link_speed_mbps: Link speed in mbps
- * @vc_msg: Virtchnl message buffer
- * @vc_state: Virtchnl message state
- * @vchnl_wq: Wait queue for virtchnl messages
* @sw_marker_wq: workqueue for marker packets
- * @vc_buf_lock: Lock to protect virtchnl buffer
*/
struct idpf_vport {
u16 num_txq;
@@ -408,12 +337,7 @@ struct idpf_vport {
bool link_up;
u32 link_speed_mbps;
- char vc_msg[IDPF_CTLQ_MAX_BUF_LEN];
- DECLARE_BITMAP(vc_state, IDPF_VC_NBITS);
-
- wait_queue_head_t vchnl_wq;
wait_queue_head_t sw_marker_wq;
- struct mutex vc_buf_lock;
};
/**
@@ -476,15 +400,11 @@ struct idpf_vport_user_config_data {
* enum idpf_vport_config_flags - Vport config flags
* @IDPF_VPORT_REG_NETDEV: Register netdev
* @IDPF_VPORT_UP_REQUESTED: Set if interface up is requested on core reset
- * @IDPF_VPORT_ADD_MAC_REQ: Asynchronous add ether address in flight
- * @IDPF_VPORT_DEL_MAC_REQ: Asynchronous delete ether address in flight
* @IDPF_VPORT_CONFIG_FLAGS_NBITS: Must be last
*/
enum idpf_vport_config_flags {
IDPF_VPORT_REG_NETDEV,
IDPF_VPORT_UP_REQUESTED,
- IDPF_VPORT_ADD_MAC_REQ,
- IDPF_VPORT_DEL_MAC_REQ,
IDPF_VPORT_CONFIG_FLAGS_NBITS,
};
@@ -555,11 +475,13 @@ struct idpf_vector_lifo {
struct idpf_vport_config {
struct idpf_vport_user_config_data user_config;
struct idpf_vport_max_q max_q;
- void *req_qs_chunks;
+ struct virtchnl2_add_queues *req_qs_chunks;
spinlock_t mac_filter_list_lock;
DECLARE_BITMAP(flags, IDPF_VPORT_CONFIG_FLAGS_NBITS);
};
+struct idpf_vc_xn_manager;
+
/**
* struct idpf_adapter - Device data struct generated on probe
* @pdev: PCI device struct given on probe
@@ -601,9 +523,7 @@ struct idpf_vport_config {
* @stats_task: Periodic statistics retrieval task
* @stats_wq: Workqueue for statistics task
* @caps: Negotiated capabilities with device
- * @vchnl_wq: Wait queue for virtchnl messages
- * @vc_state: Virtchnl message state
- * @vc_msg: Virtchnl message buffer
+ * @vcxn_mngr: Virtchnl transaction manager
* @dev_ops: See idpf_dev_ops
* @num_vfs: Number of allocated VFs through sysfs. PF does not directly talk
* to VFs but is used to initialize them
@@ -659,10 +579,8 @@ struct idpf_adapter {
struct delayed_work stats_task;
struct workqueue_struct *stats_wq;
struct virtchnl2_get_capabilities caps;
+ struct idpf_vc_xn_manager *vcxn_mngr;
- wait_queue_head_t vchnl_wq;
- DECLARE_BITMAP(vc_state, IDPF_VC_NBITS);
- char vc_msg[IDPF_CTLQ_MAX_BUF_LEN];
struct idpf_dev_ops dev_ops;
int num_vfs;
bool crc_enable;
@@ -903,68 +821,18 @@ void idpf_mbx_task(struct work_struct *work);
void idpf_vc_event_task(struct work_struct *work);
void idpf_dev_ops_init(struct idpf_adapter *adapter);
void idpf_vf_dev_ops_init(struct idpf_adapter *adapter);
-int idpf_vport_adjust_qs(struct idpf_vport *vport);
-int idpf_init_dflt_mbx(struct idpf_adapter *adapter);
-void idpf_deinit_dflt_mbx(struct idpf_adapter *adapter);
-int idpf_vc_core_init(struct idpf_adapter *adapter);
-void idpf_vc_core_deinit(struct idpf_adapter *adapter);
int idpf_intr_req(struct idpf_adapter *adapter);
void idpf_intr_rel(struct idpf_adapter *adapter);
-int idpf_get_reg_intr_vecs(struct idpf_vport *vport,
- struct idpf_vec_regs *reg_vals);
u16 idpf_get_max_tx_hdr_size(struct idpf_adapter *adapter);
-int idpf_send_delete_queues_msg(struct idpf_vport *vport);
-int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q,
- u16 num_complq, u16 num_rx_q, u16 num_rx_bufq);
int idpf_initiate_soft_reset(struct idpf_vport *vport,
enum idpf_vport_reset_cause reset_cause);
-int idpf_send_enable_vport_msg(struct idpf_vport *vport);
-int idpf_send_disable_vport_msg(struct idpf_vport *vport);
-int idpf_send_destroy_vport_msg(struct idpf_vport *vport);
-int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport);
-int idpf_send_ena_dis_loopback_msg(struct idpf_vport *vport);
-int idpf_send_get_set_rss_key_msg(struct idpf_vport *vport, bool get);
-int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get);
-int idpf_send_dealloc_vectors_msg(struct idpf_adapter *adapter);
-int idpf_send_alloc_vectors_msg(struct idpf_adapter *adapter, u16 num_vectors);
void idpf_deinit_task(struct idpf_adapter *adapter);
int idpf_req_rel_vector_indexes(struct idpf_adapter *adapter,
u16 *q_vector_idxs,
struct idpf_vector_info *vec_info);
-int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport);
-int idpf_send_get_stats_msg(struct idpf_vport *vport);
-int idpf_get_vec_ids(struct idpf_adapter *adapter,
- u16 *vecids, int num_vecids,
- struct virtchnl2_vector_chunks *chunks);
-int idpf_recv_mb_msg(struct idpf_adapter *adapter, u32 op,
- void *msg, int msg_size);
-int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op,
- u16 msg_size, u8 *msg);
void idpf_set_ethtool_ops(struct net_device *netdev);
-int idpf_vport_alloc_max_qs(struct idpf_adapter *adapter,
- struct idpf_vport_max_q *max_q);
-void idpf_vport_dealloc_max_qs(struct idpf_adapter *adapter,
- struct idpf_vport_max_q *max_q);
-int idpf_add_del_mac_filters(struct idpf_vport *vport,
- struct idpf_netdev_priv *np,
- bool add, bool async);
-int idpf_set_promiscuous(struct idpf_adapter *adapter,
- struct idpf_vport_user_config_data *config_data,
- u32 vport_id);
-int idpf_send_disable_queues_msg(struct idpf_vport *vport);
-void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q);
-u32 idpf_get_vport_id(struct idpf_vport *vport);
-int idpf_vport_queue_ids_init(struct idpf_vport *vport);
-int idpf_queue_reg_init(struct idpf_vport *vport);
-int idpf_send_config_queues_msg(struct idpf_vport *vport);
-int idpf_send_enable_queues_msg(struct idpf_vport *vport);
-int idpf_send_create_vport_msg(struct idpf_adapter *adapter,
- struct idpf_vport_max_q *max_q);
-int idpf_check_supported_desc_ids(struct idpf_vport *vport);
void idpf_vport_intr_write_itr(struct idpf_q_vector *q_vector,
u16 itr, bool tx);
-int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map);
-int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs);
int idpf_sriov_configure(struct pci_dev *pdev, int num_vfs);
u8 idpf_vport_get_hsplit(const struct idpf_vport *vport);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_controlq.c b/drivers/net/ethernet/intel/idpf/idpf_controlq.c
index c7f43d2fcd..4849590a55 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_controlq.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_controlq.c
@@ -516,6 +516,8 @@ post_buffs_out:
/* Wrap to end of end ring since current ntp is 0 */
cq->next_to_post = cq->ring_size - 1;
+ dma_wmb();
+
wr32(hw, cq->reg.tail, cq->next_to_post);
}
@@ -546,11 +548,6 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
int err = 0;
u16 i;
- if (*num_q_msg == 0)
- return 0;
- else if (*num_q_msg > cq->ring_size)
- return -EBADR;
-
/* take the lock before we start messing with the ring */
mutex_lock(&cq->cq_lock);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h b/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h
index 8dee098bbf..e8e046ef2f 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_controlq_api.h
@@ -69,6 +69,11 @@ struct idpf_ctlq_msg {
u8 context[IDPF_INDIRECT_CTX_SIZE];
struct idpf_dma_mem *payload;
} indirect;
+ struct {
+ u32 rsvd;
+ u16 data;
+ u16 flags;
+ } sw_cookie;
} ctx;
};
diff --git a/drivers/net/ethernet/intel/idpf/idpf_dev.c b/drivers/net/ethernet/intel/idpf/idpf_dev.c
index 34ad1ac46b..3df9935685 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_dev.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_dev.c
@@ -3,6 +3,7 @@
#include "idpf.h"
#include "idpf_lan_pf_regs.h"
+#include "idpf_virtchnl.h"
#define IDPF_PF_ITR_IDX_SPACING 0x4
diff --git a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
index 6972d72843..1885ba6189 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_ethtool.c
@@ -222,14 +222,19 @@ static int idpf_set_channels(struct net_device *netdev,
struct ethtool_channels *ch)
{
struct idpf_vport_config *vport_config;
- u16 combined, num_txq, num_rxq;
unsigned int num_req_tx_q;
unsigned int num_req_rx_q;
struct idpf_vport *vport;
+ u16 num_txq, num_rxq;
struct device *dev;
int err = 0;
u16 idx;
+ if (ch->rx_count && ch->tx_count) {
+ netdev_err(netdev, "Dedicated RX or TX channels cannot be used simultaneously\n");
+ return -EINVAL;
+ }
+
idpf_vport_ctrl_lock(netdev);
vport = idpf_netdev_to_vport(netdev);
@@ -239,20 +244,6 @@ static int idpf_set_channels(struct net_device *netdev,
num_txq = vport_config->user_config.num_req_tx_qs;
num_rxq = vport_config->user_config.num_req_rx_qs;
- combined = min(num_txq, num_rxq);
-
- /* these checks are for cases where user didn't specify a particular
- * value on cmd line but we get non-zero value anyway via
- * get_channels(); look at ethtool.c in ethtool repository (the user
- * space part), particularly, do_schannels() routine
- */
- if (ch->combined_count == combined)
- ch->combined_count = 0;
- if (ch->combined_count && ch->rx_count == num_rxq - combined)
- ch->rx_count = 0;
- if (ch->combined_count && ch->tx_count == num_txq - combined)
- ch->tx_count = 0;
-
num_req_tx_q = ch->combined_count + ch->tx_count;
num_req_rx_q = ch->combined_count + ch->rx_count;
diff --git a/drivers/net/ethernet/intel/idpf/idpf_lib.c b/drivers/net/ethernet/intel/idpf/idpf_lib.c
index 58179bd733..ae8a48c480 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_lib.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_lib.c
@@ -2,14 +2,11 @@
/* Copyright (C) 2023 Intel Corporation */
#include "idpf.h"
+#include "idpf_virtchnl.h"
static const struct net_device_ops idpf_netdev_ops_splitq;
static const struct net_device_ops idpf_netdev_ops_singleq;
-const char * const idpf_vport_vc_state_str[] = {
- IDPF_FOREACH_VPORT_VC_STATE(IDPF_GEN_STRING)
-};
-
/**
* idpf_init_vector_stack - Fill the MSIX vector stack with vector index
* @adapter: private data struct
@@ -82,19 +79,12 @@ static void idpf_mb_intr_rel_irq(struct idpf_adapter *adapter)
*/
void idpf_intr_rel(struct idpf_adapter *adapter)
{
- int err;
-
if (!adapter->msix_entries)
return;
idpf_mb_intr_rel_irq(adapter);
pci_free_irq_vectors(adapter->pdev);
-
- err = idpf_send_dealloc_vectors_msg(adapter);
- if (err)
- dev_err(&adapter->pdev->dev,
- "Failed to deallocate vectors: %d\n", err);
-
+ idpf_send_dealloc_vectors_msg(adapter);
idpf_deinit_vector_stack(adapter);
kfree(adapter->msix_entries);
adapter->msix_entries = NULL;
@@ -975,7 +965,6 @@ static void idpf_vport_rel(struct idpf_vport *vport)
struct idpf_rss_data *rss_data;
struct idpf_vport_max_q max_q;
u16 idx = vport->idx;
- int i;
vport_config = adapter->vport_config[vport->idx];
idpf_deinit_rss(vport);
@@ -985,20 +974,6 @@ static void idpf_vport_rel(struct idpf_vport *vport)
idpf_send_destroy_vport_msg(vport);
- /* Set all bits as we dont know on which vc_state the vport vhnl_wq
- * is waiting on and wakeup the virtchnl workqueue even if it is
- * waiting for the response as we are going down
- */
- for (i = 0; i < IDPF_VC_NBITS; i++)
- set_bit(i, vport->vc_state);
- wake_up(&vport->vchnl_wq);
-
- mutex_destroy(&vport->vc_buf_lock);
-
- /* Clear all the bits */
- for (i = 0; i < IDPF_VC_NBITS; i++)
- clear_bit(i, vport->vc_state);
-
/* Release all max queues allocated to the adapter's pool */
max_q.max_rxq = vport_config->max_q.max_rxq;
max_q.max_txq = vport_config->max_q.max_txq;
@@ -1253,7 +1228,7 @@ void idpf_mbx_task(struct work_struct *work)
queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task,
msecs_to_jiffies(300));
- idpf_recv_mb_msg(adapter, VIRTCHNL2_OP_UNKNOWN, NULL, 0);
+ idpf_recv_mb_msg(adapter);
}
/**
@@ -1419,6 +1394,7 @@ static int idpf_vport_open(struct idpf_vport *vport, bool alloc_res)
}
idpf_rx_init_buf_tail(vport);
+ idpf_vport_intr_ena(vport);
err = idpf_send_config_queues_msg(vport);
if (err) {
@@ -1543,9 +1519,7 @@ void idpf_init_task(struct work_struct *work)
vport_config = adapter->vport_config[index];
init_waitqueue_head(&vport->sw_marker_wq);
- init_waitqueue_head(&vport->vchnl_wq);
- mutex_init(&vport->vc_buf_lock);
spin_lock_init(&vport_config->mac_filter_list_lock);
INIT_LIST_HEAD(&vport_config->user_config.mac_filter_list);
@@ -1823,6 +1797,8 @@ static int idpf_init_hard_reset(struct idpf_adapter *adapter)
goto unlock_mutex;
}
+ queue_delayed_work(adapter->mbx_wq, &adapter->mbx_task, 0);
+
/* Initialize the state machine, also allocate memory and request
* resources
*/
@@ -1902,7 +1878,7 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
* mess with. Nothing below should use those variables from new_vport
* and should instead always refer to them in vport if they need to.
*/
- memcpy(new_vport, vport, offsetof(struct idpf_vport, vc_state));
+ memcpy(new_vport, vport, offsetof(struct idpf_vport, link_speed_mbps));
/* Adjust resource parameters prior to reallocating resources */
switch (reset_cause) {
@@ -1951,7 +1927,7 @@ int idpf_initiate_soft_reset(struct idpf_vport *vport,
/* Same comment as above regarding avoiding copying the wait_queues and
* mutexes applies here. We do not want to mess with those if possible.
*/
- memcpy(vport, new_vport, offsetof(struct idpf_vport, vc_state));
+ memcpy(vport, new_vport, offsetof(struct idpf_vport, link_speed_mbps));
/* Since idpf_vport_queues_alloc was called with new_port, the queue
* back pointers are currently pointing to the local new_vport. Reset
diff --git a/drivers/net/ethernet/intel/idpf/idpf_main.c b/drivers/net/ethernet/intel/idpf/idpf_main.c
index e1febc74ce..f784eea044 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_main.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_main.c
@@ -3,6 +3,7 @@
#include "idpf.h"
#include "idpf_devids.h"
+#include "idpf_virtchnl.h"
#define DRV_SUMMARY "Intel(R) Infrastructure Data Path Function Linux Driver"
@@ -30,6 +31,7 @@ static void idpf_remove(struct pci_dev *pdev)
idpf_sriov_configure(pdev, 0);
idpf_vc_core_deinit(adapter);
+
/* Be a good citizen and leave the device clean on exit */
adapter->dev_ops.reg_ops.trigger_reset(adapter, IDPF_HR_FUNC_RESET);
idpf_deinit_dflt_mbx(adapter);
@@ -66,6 +68,8 @@ destroy_wqs:
adapter->vport_config = NULL;
kfree(adapter->netdevs);
adapter->netdevs = NULL;
+ kfree(adapter->vcxn_mngr);
+ adapter->vcxn_mngr = NULL;
mutex_destroy(&adapter->vport_ctrl_lock);
mutex_destroy(&adapter->vector_lock);
@@ -229,8 +233,6 @@ static int idpf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
mutex_init(&adapter->queue_lock);
mutex_init(&adapter->vc_buf_lock);
- init_waitqueue_head(&adapter->vchnl_wq);
-
INIT_DELAYED_WORK(&adapter->init_task, idpf_init_task);
INIT_DELAYED_WORK(&adapter->serv_task, idpf_service_task);
INIT_DELAYED_WORK(&adapter->mbx_task, idpf_mbx_task);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.c b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
index 017a081d85..7fc77ed9d1 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.c
@@ -2,6 +2,7 @@
/* Copyright (C) 2023 Intel Corporation */
#include "idpf.h"
+#include "idpf_virtchnl.h"
/**
* idpf_buf_lifo_push - push a buffer pointer onto stack
@@ -3746,9 +3747,9 @@ static void idpf_vport_intr_ena_irq_all(struct idpf_vport *vport)
*/
void idpf_vport_intr_deinit(struct idpf_vport *vport)
{
+ idpf_vport_intr_dis_irq_all(vport);
idpf_vport_intr_napi_dis_all(vport);
idpf_vport_intr_napi_del_all(vport);
- idpf_vport_intr_dis_irq_all(vport);
idpf_vport_intr_rel_irq(vport);
}
@@ -4179,7 +4180,6 @@ int idpf_vport_intr_init(struct idpf_vport *vport)
idpf_vport_intr_map_vector_to_qs(vport);
idpf_vport_intr_napi_add_all(vport);
- idpf_vport_intr_napi_ena_all(vport);
err = vport->adapter->dev_ops.reg_ops.intr_reg_init(vport);
if (err)
@@ -4193,17 +4193,20 @@ int idpf_vport_intr_init(struct idpf_vport *vport)
if (err)
goto unroll_vectors_alloc;
- idpf_vport_intr_ena_irq_all(vport);
-
return 0;
unroll_vectors_alloc:
- idpf_vport_intr_napi_dis_all(vport);
idpf_vport_intr_napi_del_all(vport);
return err;
}
+void idpf_vport_intr_ena(struct idpf_vport *vport)
+{
+ idpf_vport_intr_napi_ena_all(vport);
+ idpf_vport_intr_ena_irq_all(vport);
+}
+
/**
* idpf_config_rss - Send virtchnl messages to configure RSS
* @vport: virtual port
diff --git a/drivers/net/ethernet/intel/idpf/idpf_txrx.h b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
index df76493faa..85a1466890 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_txrx.h
+++ b/drivers/net/ethernet/intel/idpf/idpf_txrx.h
@@ -988,6 +988,7 @@ int idpf_vport_intr_alloc(struct idpf_vport *vport);
void idpf_vport_intr_update_itr_ena_irq(struct idpf_q_vector *q_vector);
void idpf_vport_intr_deinit(struct idpf_vport *vport);
int idpf_vport_intr_init(struct idpf_vport *vport);
+void idpf_vport_intr_ena(struct idpf_vport *vport);
enum pkt_hash_types idpf_ptype_to_htype(const struct idpf_rx_ptype_decoded *decoded);
int idpf_config_rss(struct idpf_vport *vport);
int idpf_init_rss(struct idpf_vport *vport);
diff --git a/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c b/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c
index 8ade4e3a9f..629cb5cb7c 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_vf_dev.c
@@ -3,6 +3,7 @@
#include "idpf.h"
#include "idpf_lan_vf_regs.h"
+#include "idpf_virtchnl.h"
#define IDPF_VF_ITR_IDX_SPACING 0x40
@@ -137,7 +138,7 @@ static void idpf_vf_trigger_reset(struct idpf_adapter *adapter,
/* Do not send VIRTCHNL2_OP_RESET_VF message on driver unload */
if (trig_cause == IDPF_HR_FUNC_RESET &&
!test_bit(IDPF_REMOVE_IN_PROG, adapter->flags))
- idpf_send_mb_msg(adapter, VIRTCHNL2_OP_RESET_VF, 0, NULL);
+ idpf_send_mb_msg(adapter, VIRTCHNL2_OP_RESET_VF, 0, NULL, 0);
}
/**
diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
index 390977a76d..a5f9b7a5ef 100644
--- a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.c
@@ -2,46 +2,192 @@
/* Copyright (C) 2023 Intel Corporation */
#include "idpf.h"
+#include "idpf_virtchnl.h"
+
+#define IDPF_VC_XN_MIN_TIMEOUT_MSEC 2000
+#define IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC (60 * 1000)
+#define IDPF_VC_XN_IDX_M GENMASK(7, 0)
+#define IDPF_VC_XN_SALT_M GENMASK(15, 8)
+#define IDPF_VC_XN_RING_LEN U8_MAX
+
+/**
+ * enum idpf_vc_xn_state - Virtchnl transaction status
+ * @IDPF_VC_XN_IDLE: not expecting a reply, ready to be used
+ * @IDPF_VC_XN_WAITING: expecting a reply, not yet received
+ * @IDPF_VC_XN_COMPLETED_SUCCESS: a reply was expected and received,
+ * buffer updated
+ * @IDPF_VC_XN_COMPLETED_FAILED: a reply was expected and received, but there
+ * was an error, buffer not updated
+ * @IDPF_VC_XN_SHUTDOWN: transaction object cannot be used, VC torn down
+ * @IDPF_VC_XN_ASYNC: transaction sent asynchronously and doesn't have the
+ * return context; a callback may be provided to handle
+ * return
+ */
+enum idpf_vc_xn_state {
+ IDPF_VC_XN_IDLE = 1,
+ IDPF_VC_XN_WAITING,
+ IDPF_VC_XN_COMPLETED_SUCCESS,
+ IDPF_VC_XN_COMPLETED_FAILED,
+ IDPF_VC_XN_SHUTDOWN,
+ IDPF_VC_XN_ASYNC,
+};
+
+struct idpf_vc_xn;
+/* Callback for asynchronous messages */
+typedef int (*async_vc_cb) (struct idpf_adapter *, struct idpf_vc_xn *,
+ const struct idpf_ctlq_msg *);
+
+/**
+ * struct idpf_vc_xn - Data structure representing virtchnl transactions
+ * @completed: virtchnl event loop uses that to signal when a reply is
+ * available, uses kernel completion API
+ * @state: virtchnl event loop stores the data below, protected by the
+ * completion's lock.
+ * @reply_sz: Original size of reply, may be > reply_buf.iov_len; it will be
+ * truncated on its way to the receiver thread according to
+ * reply_buf.iov_len.
+ * @reply: Reference to the buffer(s) where the reply data should be written
+ * to. May be 0-length (then NULL address permitted) if the reply data
+ * should be ignored.
+ * @async_handler: if sent asynchronously, a callback can be provided to handle
+ * the reply when it's received
+ * @vc_op: corresponding opcode sent with this transaction
+ * @idx: index used as retrieval on reply receive, used for cookie
+ * @salt: changed every message to make unique, used for cookie
+ */
+struct idpf_vc_xn {
+ struct completion completed;
+ enum idpf_vc_xn_state state;
+ size_t reply_sz;
+ struct kvec reply;
+ async_vc_cb async_handler;
+ u32 vc_op;
+ u8 idx;
+ u8 salt;
+};
+
+/**
+ * struct idpf_vc_xn_params - Parameters for executing transaction
+ * @send_buf: kvec for send buffer
+ * @recv_buf: kvec for recv buffer, may be NULL, must then have zero length
+ * @timeout_ms: timeout to wait for reply
+ * @async: send message asynchronously, will not wait on completion
+ * @async_handler: If sent asynchronously, optional callback handler. The user
+ * must be careful when using async handlers as the memory for
+ * the recv_buf _cannot_ be on stack if this is async.
+ * @vc_op: virtchnl op to send
+ */
+struct idpf_vc_xn_params {
+ struct kvec send_buf;
+ struct kvec recv_buf;
+ int timeout_ms;
+ bool async;
+ async_vc_cb async_handler;
+ u32 vc_op;
+};
+
+/**
+ * struct idpf_vc_xn_manager - Manager for tracking transactions
+ * @ring: backing and lookup for transactions
+ * @free_xn_bm: bitmap for free transactions
+ * @xn_bm_lock: make bitmap access synchronous where necessary
+ * @salt: used to make cookie unique every message
+ */
+struct idpf_vc_xn_manager {
+ struct idpf_vc_xn ring[IDPF_VC_XN_RING_LEN];
+ DECLARE_BITMAP(free_xn_bm, IDPF_VC_XN_RING_LEN);
+ spinlock_t xn_bm_lock;
+ u8 salt;
+};
+
+/**
+ * idpf_vid_to_vport - Translate vport id to vport pointer
+ * @adapter: private data struct
+ * @v_id: vport id to translate
+ *
+ * Returns vport matching v_id, NULL if not found.
+ */
+static
+struct idpf_vport *idpf_vid_to_vport(struct idpf_adapter *adapter, u32 v_id)
+{
+ u16 num_max_vports = idpf_get_max_vports(adapter);
+ int i;
+
+ for (i = 0; i < num_max_vports; i++)
+ if (adapter->vport_ids[i] == v_id)
+ return adapter->vports[i];
+
+ return NULL;
+}
+
+/**
+ * idpf_handle_event_link - Handle link event message
+ * @adapter: private data struct
+ * @v2e: virtchnl event message
+ */
+static void idpf_handle_event_link(struct idpf_adapter *adapter,
+ const struct virtchnl2_event *v2e)
+{
+ struct idpf_netdev_priv *np;
+ struct idpf_vport *vport;
+
+ vport = idpf_vid_to_vport(adapter, le32_to_cpu(v2e->vport_id));
+ if (!vport) {
+ dev_err_ratelimited(&adapter->pdev->dev, "Failed to find vport_id %d for link event\n",
+ v2e->vport_id);
+ return;
+ }
+ np = netdev_priv(vport->netdev);
+
+ vport->link_speed_mbps = le32_to_cpu(v2e->link_speed);
+
+ if (vport->link_up == v2e->link_status)
+ return;
+
+ vport->link_up = v2e->link_status;
+
+ if (np->state != __IDPF_VPORT_UP)
+ return;
+
+ if (vport->link_up) {
+ netif_tx_start_all_queues(vport->netdev);
+ netif_carrier_on(vport->netdev);
+ } else {
+ netif_tx_stop_all_queues(vport->netdev);
+ netif_carrier_off(vport->netdev);
+ }
+}
/**
* idpf_recv_event_msg - Receive virtchnl event message
- * @vport: virtual port structure
+ * @adapter: Driver specific private structure
* @ctlq_msg: message to copy from
*
* Receive virtchnl event message
*/
-static void idpf_recv_event_msg(struct idpf_vport *vport,
+static void idpf_recv_event_msg(struct idpf_adapter *adapter,
struct idpf_ctlq_msg *ctlq_msg)
{
- struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
+ int payload_size = ctlq_msg->ctx.indirect.payload->size;
struct virtchnl2_event *v2e;
- bool link_status;
u32 event;
+ if (payload_size < sizeof(*v2e)) {
+ dev_err_ratelimited(&adapter->pdev->dev, "Failed to receive valid payload for event msg (op %d len %d)\n",
+ ctlq_msg->cookie.mbx.chnl_opcode,
+ payload_size);
+ return;
+ }
+
v2e = (struct virtchnl2_event *)ctlq_msg->ctx.indirect.payload->va;
event = le32_to_cpu(v2e->event);
switch (event) {
case VIRTCHNL2_EVENT_LINK_CHANGE:
- vport->link_speed_mbps = le32_to_cpu(v2e->link_speed);
- link_status = v2e->link_status;
-
- if (vport->link_up == link_status)
- break;
-
- vport->link_up = link_status;
- if (np->state == __IDPF_VPORT_UP) {
- if (vport->link_up) {
- netif_carrier_on(vport->netdev);
- netif_tx_start_all_queues(vport->netdev);
- } else {
- netif_tx_stop_all_queues(vport->netdev);
- netif_carrier_off(vport->netdev);
- }
- }
- break;
+ idpf_handle_event_link(adapter, v2e);
+ return;
default:
- dev_err(&vport->adapter->pdev->dev,
+ dev_err(&adapter->pdev->dev,
"Unknown event %d from PF\n", event);
break;
}
@@ -93,13 +239,14 @@ err_kfree:
* @op: virtchnl opcode
* @msg_size: size of the payload
* @msg: pointer to buffer holding the payload
+ * @cookie: unique SW generated cookie per message
*
* Will prepare the control queue message and initiates the send api
*
* Returns 0 on success, negative on failure
*/
int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op,
- u16 msg_size, u8 *msg)
+ u16 msg_size, u8 *msg, u16 cookie)
{
struct idpf_ctlq_msg *ctlq_msg;
struct idpf_dma_mem *dma_mem;
@@ -139,8 +286,12 @@ int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op,
err = -ENOMEM;
goto dma_alloc_error;
}
- memcpy(dma_mem->va, msg, msg_size);
+
+ /* It's possible we're just sending an opcode but no buffer */
+ if (msg && msg_size)
+ memcpy(dma_mem->va, msg, msg_size);
ctlq_msg->ctx.indirect.payload = dma_mem;
+ ctlq_msg->ctx.sw_cookie.data = cookie;
err = idpf_ctlq_send(&adapter->hw, adapter->hw.asq, 1, ctlq_msg);
if (err)
@@ -159,592 +310,432 @@ dma_mem_error:
return err;
}
-/**
- * idpf_find_vport - Find vport pointer from control queue message
- * @adapter: driver specific private structure
- * @vport: address of vport pointer to copy the vport from adapters vport list
- * @ctlq_msg: control queue message
+/* API for virtchnl "transaction" support ("xn" for short).
*
- * Return 0 on success, error value on failure. Also this function does check
- * for the opcodes which expect to receive payload and return error value if
- * it is not the case.
+ * We are reusing the completion lock to serialize the accesses to the
+ * transaction state for simplicity, but it could be its own separate synchro
+ * as well. For now, this API is only used from within a workqueue context;
+ * raw_spin_lock() is enough.
*/
-static int idpf_find_vport(struct idpf_adapter *adapter,
- struct idpf_vport **vport,
- struct idpf_ctlq_msg *ctlq_msg)
-{
- bool no_op = false, vid_found = false;
- int i, err = 0;
- char *vc_msg;
- u32 v_id;
+/**
+ * idpf_vc_xn_lock - Request exclusive access to vc transaction
+ * @xn: struct idpf_vc_xn* to access
+ */
+#define idpf_vc_xn_lock(xn) \
+ raw_spin_lock(&(xn)->completed.wait.lock)
- vc_msg = kcalloc(IDPF_CTLQ_MAX_BUF_LEN, sizeof(char), GFP_KERNEL);
- if (!vc_msg)
- return -ENOMEM;
+/**
+ * idpf_vc_xn_unlock - Release exclusive access to vc transaction
+ * @xn: struct idpf_vc_xn* to access
+ */
+#define idpf_vc_xn_unlock(xn) \
+ raw_spin_unlock(&(xn)->completed.wait.lock)
- if (ctlq_msg->data_len) {
- size_t payload_size = ctlq_msg->ctx.indirect.payload->size;
+/**
+ * idpf_vc_xn_release_bufs - Release reference to reply buffer(s) and
+ * reset the transaction state.
+ * @xn: struct idpf_vc_xn to update
+ */
+static void idpf_vc_xn_release_bufs(struct idpf_vc_xn *xn)
+{
+ xn->reply.iov_base = NULL;
+ xn->reply.iov_len = 0;
- if (!payload_size) {
- dev_err(&adapter->pdev->dev, "Failed to receive payload buffer\n");
- kfree(vc_msg);
+ if (xn->state != IDPF_VC_XN_SHUTDOWN)
+ xn->state = IDPF_VC_XN_IDLE;
+}
- return -EINVAL;
- }
+/**
+ * idpf_vc_xn_init - Initialize virtchnl transaction object
+ * @vcxn_mngr: pointer to vc transaction manager struct
+ */
+static void idpf_vc_xn_init(struct idpf_vc_xn_manager *vcxn_mngr)
+{
+ int i;
- memcpy(vc_msg, ctlq_msg->ctx.indirect.payload->va,
- min_t(size_t, payload_size, IDPF_CTLQ_MAX_BUF_LEN));
- }
-
- switch (ctlq_msg->cookie.mbx.chnl_opcode) {
- case VIRTCHNL2_OP_VERSION:
- case VIRTCHNL2_OP_GET_CAPS:
- case VIRTCHNL2_OP_CREATE_VPORT:
- case VIRTCHNL2_OP_SET_SRIOV_VFS:
- case VIRTCHNL2_OP_ALLOC_VECTORS:
- case VIRTCHNL2_OP_DEALLOC_VECTORS:
- case VIRTCHNL2_OP_GET_PTYPE_INFO:
- goto free_vc_msg;
- case VIRTCHNL2_OP_ENABLE_VPORT:
- case VIRTCHNL2_OP_DISABLE_VPORT:
- case VIRTCHNL2_OP_DESTROY_VPORT:
- v_id = le32_to_cpu(((struct virtchnl2_vport *)vc_msg)->vport_id);
- break;
- case VIRTCHNL2_OP_CONFIG_TX_QUEUES:
- v_id = le32_to_cpu(((struct virtchnl2_config_tx_queues *)vc_msg)->vport_id);
- break;
- case VIRTCHNL2_OP_CONFIG_RX_QUEUES:
- v_id = le32_to_cpu(((struct virtchnl2_config_rx_queues *)vc_msg)->vport_id);
- break;
- case VIRTCHNL2_OP_ENABLE_QUEUES:
- case VIRTCHNL2_OP_DISABLE_QUEUES:
- case VIRTCHNL2_OP_DEL_QUEUES:
- v_id = le32_to_cpu(((struct virtchnl2_del_ena_dis_queues *)vc_msg)->vport_id);
- break;
- case VIRTCHNL2_OP_ADD_QUEUES:
- v_id = le32_to_cpu(((struct virtchnl2_add_queues *)vc_msg)->vport_id);
- break;
- case VIRTCHNL2_OP_MAP_QUEUE_VECTOR:
- case VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR:
- v_id = le32_to_cpu(((struct virtchnl2_queue_vector_maps *)vc_msg)->vport_id);
- break;
- case VIRTCHNL2_OP_GET_STATS:
- v_id = le32_to_cpu(((struct virtchnl2_vport_stats *)vc_msg)->vport_id);
- break;
- case VIRTCHNL2_OP_GET_RSS_LUT:
- case VIRTCHNL2_OP_SET_RSS_LUT:
- v_id = le32_to_cpu(((struct virtchnl2_rss_lut *)vc_msg)->vport_id);
- break;
- case VIRTCHNL2_OP_GET_RSS_KEY:
- case VIRTCHNL2_OP_SET_RSS_KEY:
- v_id = le32_to_cpu(((struct virtchnl2_rss_key *)vc_msg)->vport_id);
- break;
- case VIRTCHNL2_OP_EVENT:
- v_id = le32_to_cpu(((struct virtchnl2_event *)vc_msg)->vport_id);
- break;
- case VIRTCHNL2_OP_LOOPBACK:
- v_id = le32_to_cpu(((struct virtchnl2_loopback *)vc_msg)->vport_id);
- break;
- case VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE:
- v_id = le32_to_cpu(((struct virtchnl2_promisc_info *)vc_msg)->vport_id);
- break;
- case VIRTCHNL2_OP_ADD_MAC_ADDR:
- case VIRTCHNL2_OP_DEL_MAC_ADDR:
- v_id = le32_to_cpu(((struct virtchnl2_mac_addr_list *)vc_msg)->vport_id);
- break;
- default:
- no_op = true;
- break;
- }
+ spin_lock_init(&vcxn_mngr->xn_bm_lock);
- if (no_op)
- goto free_vc_msg;
+ for (i = 0; i < ARRAY_SIZE(vcxn_mngr->ring); i++) {
+ struct idpf_vc_xn *xn = &vcxn_mngr->ring[i];
- for (i = 0; i < idpf_get_max_vports(adapter); i++) {
- if (adapter->vport_ids[i] == v_id) {
- vid_found = true;
- break;
- }
+ xn->state = IDPF_VC_XN_IDLE;
+ xn->idx = i;
+ idpf_vc_xn_release_bufs(xn);
+ init_completion(&xn->completed);
}
- if (vid_found)
- *vport = adapter->vports[i];
- else
- err = -EINVAL;
-
-free_vc_msg:
- kfree(vc_msg);
-
- return err;
+ bitmap_fill(vcxn_mngr->free_xn_bm, IDPF_VC_XN_RING_LEN);
}
/**
- * idpf_copy_data_to_vc_buf - Copy the virtchnl response data into the buffer.
- * @adapter: driver specific private structure
- * @vport: virtual port structure
- * @ctlq_msg: msg to copy from
- * @err_enum: err bit to set on error
+ * idpf_vc_xn_shutdown - Uninitialize virtchnl transaction object
+ * @vcxn_mngr: pointer to vc transaction manager struct
*
- * Copies the payload from ctlq_msg into virtchnl buffer. Returns 0 on success,
- * negative on failure.
+ * All waiting threads will be woken-up and their transaction aborted. Further
+ * operations on that object will fail.
*/
-static int idpf_copy_data_to_vc_buf(struct idpf_adapter *adapter,
- struct idpf_vport *vport,
- struct idpf_ctlq_msg *ctlq_msg,
- enum idpf_vport_vc_state err_enum)
+static void idpf_vc_xn_shutdown(struct idpf_vc_xn_manager *vcxn_mngr)
{
- if (ctlq_msg->cookie.mbx.chnl_retval) {
- if (vport)
- set_bit(err_enum, vport->vc_state);
- else
- set_bit(err_enum, adapter->vc_state);
+ int i;
- return -EINVAL;
- }
+ spin_lock_bh(&vcxn_mngr->xn_bm_lock);
+ bitmap_zero(vcxn_mngr->free_xn_bm, IDPF_VC_XN_RING_LEN);
+ spin_unlock_bh(&vcxn_mngr->xn_bm_lock);
- if (vport)
- memcpy(vport->vc_msg, ctlq_msg->ctx.indirect.payload->va,
- min_t(int, ctlq_msg->ctx.indirect.payload->size,
- IDPF_CTLQ_MAX_BUF_LEN));
- else
- memcpy(adapter->vc_msg, ctlq_msg->ctx.indirect.payload->va,
- min_t(int, ctlq_msg->ctx.indirect.payload->size,
- IDPF_CTLQ_MAX_BUF_LEN));
+ for (i = 0; i < ARRAY_SIZE(vcxn_mngr->ring); i++) {
+ struct idpf_vc_xn *xn = &vcxn_mngr->ring[i];
- return 0;
+ idpf_vc_xn_lock(xn);
+ xn->state = IDPF_VC_XN_SHUTDOWN;
+ idpf_vc_xn_release_bufs(xn);
+ idpf_vc_xn_unlock(xn);
+ complete_all(&xn->completed);
+ }
}
/**
- * idpf_recv_vchnl_op - helper function with common logic when handling the
- * reception of VIRTCHNL OPs.
- * @adapter: driver specific private structure
- * @vport: virtual port structure
- * @ctlq_msg: msg to copy from
- * @state: state bit used on timeout check
- * @err_state: err bit to set on error
+ * idpf_vc_xn_pop_free - Pop a free transaction from free list
+ * @vcxn_mngr: transaction manager to pop from
+ *
+ * Returns NULL if no free transactions
*/
-static void idpf_recv_vchnl_op(struct idpf_adapter *adapter,
- struct idpf_vport *vport,
- struct idpf_ctlq_msg *ctlq_msg,
- enum idpf_vport_vc_state state,
- enum idpf_vport_vc_state err_state)
+static
+struct idpf_vc_xn *idpf_vc_xn_pop_free(struct idpf_vc_xn_manager *vcxn_mngr)
{
- wait_queue_head_t *vchnl_wq;
- int err;
+ struct idpf_vc_xn *xn = NULL;
+ unsigned long free_idx;
- if (vport)
- vchnl_wq = &vport->vchnl_wq;
- else
- vchnl_wq = &adapter->vchnl_wq;
+ spin_lock_bh(&vcxn_mngr->xn_bm_lock);
+ free_idx = find_first_bit(vcxn_mngr->free_xn_bm, IDPF_VC_XN_RING_LEN);
+ if (free_idx == IDPF_VC_XN_RING_LEN)
+ goto do_unlock;
- err = idpf_copy_data_to_vc_buf(adapter, vport, ctlq_msg, err_state);
- if (wq_has_sleeper(vchnl_wq)) {
- if (vport)
- set_bit(state, vport->vc_state);
- else
- set_bit(state, adapter->vc_state);
+ clear_bit(free_idx, vcxn_mngr->free_xn_bm);
+ xn = &vcxn_mngr->ring[free_idx];
+ xn->salt = vcxn_mngr->salt++;
- wake_up(vchnl_wq);
- } else {
- if (!err) {
- dev_warn(&adapter->pdev->dev, "opcode %d received without waiting thread\n",
- ctlq_msg->cookie.mbx.chnl_opcode);
- } else {
- /* Clear the errors since there is no sleeper to pass
- * them on
- */
- if (vport)
- clear_bit(err_state, vport->vc_state);
- else
- clear_bit(err_state, adapter->vc_state);
- }
- }
+do_unlock:
+ spin_unlock_bh(&vcxn_mngr->xn_bm_lock);
+
+ return xn;
}
/**
- * idpf_recv_mb_msg - Receive message over mailbox
- * @adapter: Driver specific private structure
- * @op: virtchannel operation code
- * @msg: Received message holding buffer
- * @msg_size: message size
- *
- * Will receive control queue message and posts the receive buffer. Returns 0
- * on success and negative on failure.
+ * idpf_vc_xn_push_free - Push a free transaction to free list
+ * @vcxn_mngr: transaction manager to push to
+ * @xn: transaction to push
*/
-int idpf_recv_mb_msg(struct idpf_adapter *adapter, u32 op,
- void *msg, int msg_size)
+static void idpf_vc_xn_push_free(struct idpf_vc_xn_manager *vcxn_mngr,
+ struct idpf_vc_xn *xn)
{
- struct idpf_vport *vport = NULL;
- struct idpf_ctlq_msg ctlq_msg;
- struct idpf_dma_mem *dma_mem;
- bool work_done = false;
- int num_retry = 2000;
- u16 num_q_msg;
- int err;
-
- while (1) {
- struct idpf_vport_config *vport_config;
- int payload_size = 0;
-
- /* Try to get one message */
- num_q_msg = 1;
- dma_mem = NULL;
- err = idpf_ctlq_recv(adapter->hw.arq, &num_q_msg, &ctlq_msg);
- /* If no message then decide if we have to retry based on
- * opcode
- */
- if (err || !num_q_msg) {
- /* Increasing num_retry to consider the delayed
- * responses because of large number of VF's mailbox
- * messages. If the mailbox message is received from
- * the other side, we come out of the sleep cycle
- * immediately else we wait for more time.
- */
- if (!op || !num_retry--)
- break;
- if (test_bit(IDPF_REMOVE_IN_PROG, adapter->flags)) {
- err = -EIO;
- break;
- }
- msleep(20);
- continue;
- }
+ idpf_vc_xn_release_bufs(xn);
+ set_bit(xn->idx, vcxn_mngr->free_xn_bm);
+}
- /* If we are here a message is received. Check if we are looking
- * for a specific message based on opcode. If it is different
- * ignore and post buffers
+/**
+ * idpf_vc_xn_exec - Perform a send/recv virtchnl transaction
+ * @adapter: driver specific private structure with vcxn_mngr
+ * @params: parameters for this particular transaction including
+ * -vc_op: virtchannel operation to send
+ * -send_buf: kvec iov for send buf and len
+ * -recv_buf: kvec iov for recv buf and len (ignored if NULL)
+ * -timeout_ms: timeout waiting for a reply (milliseconds)
+ * -async: don't wait for message reply, will lose caller context
+ * -async_handler: callback to handle async replies
+ *
+ * @returns >= 0 for success, the size of the initial reply (may or may not be
+ * >= @recv_buf.iov_len, but we never overflow @@recv_buf_iov_base). < 0 for
+ * error.
+ */
+static ssize_t idpf_vc_xn_exec(struct idpf_adapter *adapter,
+ const struct idpf_vc_xn_params *params)
+{
+ const struct kvec *send_buf = &params->send_buf;
+ struct idpf_vc_xn *xn;
+ ssize_t retval;
+ u16 cookie;
+
+ xn = idpf_vc_xn_pop_free(adapter->vcxn_mngr);
+ /* no free transactions available */
+ if (!xn)
+ return -ENOSPC;
+
+ idpf_vc_xn_lock(xn);
+ if (xn->state == IDPF_VC_XN_SHUTDOWN) {
+ retval = -ENXIO;
+ goto only_unlock;
+ } else if (xn->state != IDPF_VC_XN_IDLE) {
+ /* We're just going to clobber this transaction even though
+ * it's not IDLE. If we don't reuse it we could theoretically
+ * eventually leak all the free transactions and not be able to
+ * send any messages. At least this way we make an attempt to
+ * remain functional even though something really bad is
+ * happening that's corrupting what was supposed to be free
+ * transactions.
*/
- if (op && ctlq_msg.cookie.mbx.chnl_opcode != op)
- goto post_buffs;
+ WARN_ONCE(1, "There should only be idle transactions in free list (idx %d op %d)\n",
+ xn->idx, xn->vc_op);
+ }
- err = idpf_find_vport(adapter, &vport, &ctlq_msg);
- if (err)
- goto post_buffs;
+ xn->reply = params->recv_buf;
+ xn->reply_sz = 0;
+ xn->state = params->async ? IDPF_VC_XN_ASYNC : IDPF_VC_XN_WAITING;
+ xn->vc_op = params->vc_op;
+ xn->async_handler = params->async_handler;
+ idpf_vc_xn_unlock(xn);
- if (ctlq_msg.data_len)
- payload_size = ctlq_msg.ctx.indirect.payload->size;
+ if (!params->async)
+ reinit_completion(&xn->completed);
+ cookie = FIELD_PREP(IDPF_VC_XN_SALT_M, xn->salt) |
+ FIELD_PREP(IDPF_VC_XN_IDX_M, xn->idx);
- /* All conditions are met. Either a message requested is
- * received or we received a message to be processed
- */
- switch (ctlq_msg.cookie.mbx.chnl_opcode) {
- case VIRTCHNL2_OP_VERSION:
- case VIRTCHNL2_OP_GET_CAPS:
- if (ctlq_msg.cookie.mbx.chnl_retval) {
- dev_err(&adapter->pdev->dev, "Failure initializing, vc op: %u retval: %u\n",
- ctlq_msg.cookie.mbx.chnl_opcode,
- ctlq_msg.cookie.mbx.chnl_retval);
- err = -EBADMSG;
- } else if (msg) {
- memcpy(msg, ctlq_msg.ctx.indirect.payload->va,
- min_t(int, payload_size, msg_size));
- }
- work_done = true;
- break;
- case VIRTCHNL2_OP_CREATE_VPORT:
- idpf_recv_vchnl_op(adapter, NULL, &ctlq_msg,
- IDPF_VC_CREATE_VPORT,
- IDPF_VC_CREATE_VPORT_ERR);
- break;
- case VIRTCHNL2_OP_ENABLE_VPORT:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_ENA_VPORT,
- IDPF_VC_ENA_VPORT_ERR);
- break;
- case VIRTCHNL2_OP_DISABLE_VPORT:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_DIS_VPORT,
- IDPF_VC_DIS_VPORT_ERR);
- break;
- case VIRTCHNL2_OP_DESTROY_VPORT:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_DESTROY_VPORT,
- IDPF_VC_DESTROY_VPORT_ERR);
- break;
- case VIRTCHNL2_OP_CONFIG_TX_QUEUES:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_CONFIG_TXQ,
- IDPF_VC_CONFIG_TXQ_ERR);
- break;
- case VIRTCHNL2_OP_CONFIG_RX_QUEUES:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_CONFIG_RXQ,
- IDPF_VC_CONFIG_RXQ_ERR);
- break;
- case VIRTCHNL2_OP_ENABLE_QUEUES:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_ENA_QUEUES,
- IDPF_VC_ENA_QUEUES_ERR);
- break;
- case VIRTCHNL2_OP_DISABLE_QUEUES:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_DIS_QUEUES,
- IDPF_VC_DIS_QUEUES_ERR);
- break;
- case VIRTCHNL2_OP_ADD_QUEUES:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_ADD_QUEUES,
- IDPF_VC_ADD_QUEUES_ERR);
- break;
- case VIRTCHNL2_OP_DEL_QUEUES:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_DEL_QUEUES,
- IDPF_VC_DEL_QUEUES_ERR);
- break;
- case VIRTCHNL2_OP_MAP_QUEUE_VECTOR:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_MAP_IRQ,
- IDPF_VC_MAP_IRQ_ERR);
- break;
- case VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_UNMAP_IRQ,
- IDPF_VC_UNMAP_IRQ_ERR);
- break;
- case VIRTCHNL2_OP_GET_STATS:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_GET_STATS,
- IDPF_VC_GET_STATS_ERR);
- break;
- case VIRTCHNL2_OP_GET_RSS_LUT:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_GET_RSS_LUT,
- IDPF_VC_GET_RSS_LUT_ERR);
- break;
- case VIRTCHNL2_OP_SET_RSS_LUT:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_SET_RSS_LUT,
- IDPF_VC_SET_RSS_LUT_ERR);
- break;
- case VIRTCHNL2_OP_GET_RSS_KEY:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_GET_RSS_KEY,
- IDPF_VC_GET_RSS_KEY_ERR);
- break;
- case VIRTCHNL2_OP_SET_RSS_KEY:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_SET_RSS_KEY,
- IDPF_VC_SET_RSS_KEY_ERR);
- break;
- case VIRTCHNL2_OP_SET_SRIOV_VFS:
- idpf_recv_vchnl_op(adapter, NULL, &ctlq_msg,
- IDPF_VC_SET_SRIOV_VFS,
- IDPF_VC_SET_SRIOV_VFS_ERR);
- break;
- case VIRTCHNL2_OP_ALLOC_VECTORS:
- idpf_recv_vchnl_op(adapter, NULL, &ctlq_msg,
- IDPF_VC_ALLOC_VECTORS,
- IDPF_VC_ALLOC_VECTORS_ERR);
- break;
- case VIRTCHNL2_OP_DEALLOC_VECTORS:
- idpf_recv_vchnl_op(adapter, NULL, &ctlq_msg,
- IDPF_VC_DEALLOC_VECTORS,
- IDPF_VC_DEALLOC_VECTORS_ERR);
- break;
- case VIRTCHNL2_OP_GET_PTYPE_INFO:
- idpf_recv_vchnl_op(adapter, NULL, &ctlq_msg,
- IDPF_VC_GET_PTYPE_INFO,
- IDPF_VC_GET_PTYPE_INFO_ERR);
- break;
- case VIRTCHNL2_OP_LOOPBACK:
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_LOOPBACK_STATE,
- IDPF_VC_LOOPBACK_STATE_ERR);
- break;
- case VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE:
- /* This message can only be sent asynchronously. As
- * such we'll have lost the context in which it was
- * called and thus can only really report if it looks
- * like an error occurred. Don't bother setting ERR bit
- * or waking chnl_wq since no work queue will be waiting
- * to read the message.
- */
- if (ctlq_msg.cookie.mbx.chnl_retval) {
- dev_err(&adapter->pdev->dev, "Failed to set promiscuous mode: %d\n",
- ctlq_msg.cookie.mbx.chnl_retval);
- }
- break;
- case VIRTCHNL2_OP_ADD_MAC_ADDR:
- vport_config = adapter->vport_config[vport->idx];
- if (test_and_clear_bit(IDPF_VPORT_ADD_MAC_REQ,
- vport_config->flags)) {
- /* Message was sent asynchronously. We don't
- * normally print errors here, instead
- * prefer to handle errors in the function
- * calling wait_for_event. However, if
- * asynchronous, the context in which the
- * message was sent is lost. We can't really do
- * anything about at it this point, but we
- * should at a minimum indicate that it looks
- * like something went wrong. Also don't bother
- * setting ERR bit or waking vchnl_wq since no
- * one will be waiting to read the async
- * message.
- */
- if (ctlq_msg.cookie.mbx.chnl_retval)
- dev_err(&adapter->pdev->dev, "Failed to add MAC address: %d\n",
- ctlq_msg.cookie.mbx.chnl_retval);
- break;
- }
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_ADD_MAC_ADDR,
- IDPF_VC_ADD_MAC_ADDR_ERR);
- break;
- case VIRTCHNL2_OP_DEL_MAC_ADDR:
- vport_config = adapter->vport_config[vport->idx];
- if (test_and_clear_bit(IDPF_VPORT_DEL_MAC_REQ,
- vport_config->flags)) {
- /* Message was sent asynchronously like the
- * VIRTCHNL2_OP_ADD_MAC_ADDR
- */
- if (ctlq_msg.cookie.mbx.chnl_retval)
- dev_err(&adapter->pdev->dev, "Failed to delete MAC address: %d\n",
- ctlq_msg.cookie.mbx.chnl_retval);
- break;
- }
- idpf_recv_vchnl_op(adapter, vport, &ctlq_msg,
- IDPF_VC_DEL_MAC_ADDR,
- IDPF_VC_DEL_MAC_ADDR_ERR);
- break;
- case VIRTCHNL2_OP_EVENT:
- idpf_recv_event_msg(vport, &ctlq_msg);
- break;
- default:
- dev_warn(&adapter->pdev->dev,
- "Unhandled virtchnl response %d\n",
- ctlq_msg.cookie.mbx.chnl_opcode);
- break;
- }
+ retval = idpf_send_mb_msg(adapter, params->vc_op,
+ send_buf->iov_len, send_buf->iov_base,
+ cookie);
+ if (retval) {
+ idpf_vc_xn_lock(xn);
+ goto release_and_unlock;
+ }
-post_buffs:
- if (ctlq_msg.data_len)
- dma_mem = ctlq_msg.ctx.indirect.payload;
- else
- num_q_msg = 0;
+ if (params->async)
+ return 0;
- err = idpf_ctlq_post_rx_buffs(&adapter->hw, adapter->hw.arq,
- &num_q_msg, &dma_mem);
- /* If post failed clear the only buffer we supplied */
- if (err && dma_mem)
- dma_free_coherent(&adapter->pdev->dev, dma_mem->size,
- dma_mem->va, dma_mem->pa);
+ wait_for_completion_timeout(&xn->completed,
+ msecs_to_jiffies(params->timeout_ms));
- /* Applies only if we are looking for a specific opcode */
- if (work_done)
- break;
+ /* No need to check the return value; we check the final state of the
+ * transaction below. It's possible the transaction actually gets more
+ * timeout than specified if we get preempted here but after
+ * wait_for_completion_timeout returns. This should be non-issue
+ * however.
+ */
+ idpf_vc_xn_lock(xn);
+ switch (xn->state) {
+ case IDPF_VC_XN_SHUTDOWN:
+ retval = -ENXIO;
+ goto only_unlock;
+ case IDPF_VC_XN_WAITING:
+ dev_notice_ratelimited(&adapter->pdev->dev, "Transaction timed-out (op %d, %dms)\n",
+ params->vc_op, params->timeout_ms);
+ retval = -ETIME;
+ break;
+ case IDPF_VC_XN_COMPLETED_SUCCESS:
+ retval = xn->reply_sz;
+ break;
+ case IDPF_VC_XN_COMPLETED_FAILED:
+ dev_notice_ratelimited(&adapter->pdev->dev, "Transaction failed (op %d)\n",
+ params->vc_op);
+ retval = -EIO;
+ break;
+ default:
+ /* Invalid state. */
+ WARN_ON_ONCE(1);
+ retval = -EIO;
+ break;
}
- return err;
+release_and_unlock:
+ idpf_vc_xn_push_free(adapter->vcxn_mngr, xn);
+ /* If we receive a VC reply after here, it will be dropped. */
+only_unlock:
+ idpf_vc_xn_unlock(xn);
+
+ return retval;
}
/**
- * __idpf_wait_for_event - wrapper function for wait on virtchannel response
- * @adapter: Driver private data structure
- * @vport: virtual port structure
- * @state: check on state upon timeout
- * @err_check: check if this specific error bit is set
- * @timeout: Max time to wait
+ * idpf_vc_xn_forward_async - Handle async reply receives
+ * @adapter: private data struct
+ * @xn: transaction to handle
+ * @ctlq_msg: corresponding ctlq_msg
*
- * Checks if state is set upon expiry of timeout. Returns 0 on success,
- * negative on failure.
+ * For async sends we're going to lose the caller's context so, if an
+ * async_handler was provided, it can deal with the reply, otherwise we'll just
+ * check and report if there is an error.
*/
-static int __idpf_wait_for_event(struct idpf_adapter *adapter,
- struct idpf_vport *vport,
- enum idpf_vport_vc_state state,
- enum idpf_vport_vc_state err_check,
- int timeout)
+static int
+idpf_vc_xn_forward_async(struct idpf_adapter *adapter, struct idpf_vc_xn *xn,
+ const struct idpf_ctlq_msg *ctlq_msg)
{
- int time_to_wait, num_waits;
- wait_queue_head_t *vchnl_wq;
- unsigned long *vc_state;
+ int err = 0;
- time_to_wait = ((timeout <= IDPF_MAX_WAIT) ? timeout : IDPF_MAX_WAIT);
- num_waits = ((timeout <= IDPF_MAX_WAIT) ? 1 : timeout / IDPF_MAX_WAIT);
+ if (ctlq_msg->cookie.mbx.chnl_opcode != xn->vc_op) {
+ dev_err_ratelimited(&adapter->pdev->dev, "Async message opcode does not match transaction opcode (msg: %d) (xn: %d)\n",
+ ctlq_msg->cookie.mbx.chnl_opcode, xn->vc_op);
+ xn->reply_sz = 0;
+ err = -EINVAL;
+ goto release_bufs;
+ }
- if (vport) {
- vchnl_wq = &vport->vchnl_wq;
- vc_state = vport->vc_state;
- } else {
- vchnl_wq = &adapter->vchnl_wq;
- vc_state = adapter->vc_state;
+ if (xn->async_handler) {
+ err = xn->async_handler(adapter, xn, ctlq_msg);
+ goto release_bufs;
+ }
+
+ if (ctlq_msg->cookie.mbx.chnl_retval) {
+ xn->reply_sz = 0;
+ dev_err_ratelimited(&adapter->pdev->dev, "Async message failure (op %d)\n",
+ ctlq_msg->cookie.mbx.chnl_opcode);
+ err = -EINVAL;
}
- while (num_waits) {
- int event;
+release_bufs:
+ idpf_vc_xn_push_free(adapter->vcxn_mngr, xn);
+
+ return err;
+}
+
+/**
+ * idpf_vc_xn_forward_reply - copy a reply back to receiving thread
+ * @adapter: driver specific private structure with vcxn_mngr
+ * @ctlq_msg: controlq message to send back to receiving thread
+ */
+static int
+idpf_vc_xn_forward_reply(struct idpf_adapter *adapter,
+ const struct idpf_ctlq_msg *ctlq_msg)
+{
+ const void *payload = NULL;
+ size_t payload_size = 0;
+ struct idpf_vc_xn *xn;
+ u16 msg_info;
+ int err = 0;
+ u16 xn_idx;
+ u16 salt;
+
+ msg_info = ctlq_msg->ctx.sw_cookie.data;
+ xn_idx = FIELD_GET(IDPF_VC_XN_IDX_M, msg_info);
+ if (xn_idx >= ARRAY_SIZE(adapter->vcxn_mngr->ring)) {
+ dev_err_ratelimited(&adapter->pdev->dev, "Out of bounds cookie received: %02x\n",
+ xn_idx);
+ return -EINVAL;
+ }
+ xn = &adapter->vcxn_mngr->ring[xn_idx];
+ salt = FIELD_GET(IDPF_VC_XN_SALT_M, msg_info);
+ if (xn->salt != salt) {
+ dev_err_ratelimited(&adapter->pdev->dev, "Transaction salt does not match (%02x != %02x)\n",
+ xn->salt, salt);
+ return -EINVAL;
+ }
- /* If we are here and a reset is detected do not wait but
- * return. Reset timing is out of drivers control. So
- * while we are cleaning resources as part of reset if the
- * underlying HW mailbox is gone, wait on mailbox messages
- * is not meaningful
+ idpf_vc_xn_lock(xn);
+ switch (xn->state) {
+ case IDPF_VC_XN_WAITING:
+ /* success */
+ break;
+ case IDPF_VC_XN_IDLE:
+ dev_err_ratelimited(&adapter->pdev->dev, "Unexpected or belated VC reply (op %d)\n",
+ ctlq_msg->cookie.mbx.chnl_opcode);
+ err = -EINVAL;
+ goto out_unlock;
+ case IDPF_VC_XN_SHUTDOWN:
+ /* ENXIO is a bit special here as the recv msg loop uses that
+ * know if it should stop trying to clean the ring if we lost
+ * the virtchnl. We need to stop playing with registers and
+ * yield.
*/
- if (idpf_is_reset_detected(adapter))
- return 0;
+ err = -ENXIO;
+ goto out_unlock;
+ case IDPF_VC_XN_ASYNC:
+ err = idpf_vc_xn_forward_async(adapter, xn, ctlq_msg);
+ idpf_vc_xn_unlock(xn);
+ return err;
+ default:
+ dev_err_ratelimited(&adapter->pdev->dev, "Overwriting VC reply (op %d)\n",
+ ctlq_msg->cookie.mbx.chnl_opcode);
+ err = -EBUSY;
+ goto out_unlock;
+ }
- event = wait_event_timeout(*vchnl_wq,
- test_and_clear_bit(state, vc_state),
- msecs_to_jiffies(time_to_wait));
- if (event) {
- if (test_and_clear_bit(err_check, vc_state)) {
- dev_err(&adapter->pdev->dev, "VC response error %s\n",
- idpf_vport_vc_state_str[err_check]);
+ if (ctlq_msg->cookie.mbx.chnl_opcode != xn->vc_op) {
+ dev_err_ratelimited(&adapter->pdev->dev, "Message opcode does not match transaction opcode (msg: %d) (xn: %d)\n",
+ ctlq_msg->cookie.mbx.chnl_opcode, xn->vc_op);
+ xn->reply_sz = 0;
+ xn->state = IDPF_VC_XN_COMPLETED_FAILED;
+ err = -EINVAL;
+ goto out_unlock;
+ }
- return -EINVAL;
- }
+ if (ctlq_msg->cookie.mbx.chnl_retval) {
+ xn->reply_sz = 0;
+ xn->state = IDPF_VC_XN_COMPLETED_FAILED;
+ err = -EINVAL;
+ goto out_unlock;
+ }
- return 0;
- }
- num_waits--;
+ if (ctlq_msg->data_len) {
+ payload = ctlq_msg->ctx.indirect.payload->va;
+ payload_size = ctlq_msg->ctx.indirect.payload->size;
}
- /* Timeout occurred */
- dev_err(&adapter->pdev->dev, "VC timeout, state = %s\n",
- idpf_vport_vc_state_str[state]);
+ xn->reply_sz = payload_size;
+ xn->state = IDPF_VC_XN_COMPLETED_SUCCESS;
- return -ETIMEDOUT;
+ if (xn->reply.iov_base && xn->reply.iov_len && payload_size)
+ memcpy(xn->reply.iov_base, payload,
+ min_t(size_t, xn->reply.iov_len, payload_size));
+
+out_unlock:
+ idpf_vc_xn_unlock(xn);
+ /* we _cannot_ hold lock while calling complete */
+ complete(&xn->completed);
+
+ return err;
}
/**
- * idpf_min_wait_for_event - wait for virtchannel response
- * @adapter: Driver private data structure
- * @vport: virtual port structure
- * @state: check on state upon timeout
- * @err_check: check if this specific error bit is set
+ * idpf_recv_mb_msg - Receive message over mailbox
+ * @adapter: Driver specific private structure
*
- * Returns 0 on success, negative on failure.
+ * Will receive control queue message and posts the receive buffer. Returns 0
+ * on success and negative on failure.
*/
-static int idpf_min_wait_for_event(struct idpf_adapter *adapter,
- struct idpf_vport *vport,
- enum idpf_vport_vc_state state,
- enum idpf_vport_vc_state err_check)
+int idpf_recv_mb_msg(struct idpf_adapter *adapter)
{
- return __idpf_wait_for_event(adapter, vport, state, err_check,
- IDPF_WAIT_FOR_EVENT_TIMEO_MIN);
-}
+ struct idpf_ctlq_msg ctlq_msg;
+ struct idpf_dma_mem *dma_mem;
+ int post_err, err;
+ u16 num_recv;
-/**
- * idpf_wait_for_event - wait for virtchannel response
- * @adapter: Driver private data structure
- * @vport: virtual port structure
- * @state: check on state upon timeout after 500ms
- * @err_check: check if this specific error bit is set
- *
- * Returns 0 on success, negative on failure.
- */
-static int idpf_wait_for_event(struct idpf_adapter *adapter,
- struct idpf_vport *vport,
- enum idpf_vport_vc_state state,
- enum idpf_vport_vc_state err_check)
-{
- /* Increasing the timeout in __IDPF_INIT_SW flow to consider large
- * number of VF's mailbox message responses. When a message is received
- * on mailbox, this thread is woken up by the idpf_recv_mb_msg before
- * the timeout expires. Only in the error case i.e. if no message is
- * received on mailbox, we wait for the complete timeout which is
- * less likely to happen.
- */
- return __idpf_wait_for_event(adapter, vport, state, err_check,
- IDPF_WAIT_FOR_EVENT_TIMEO);
+ while (1) {
+ /* This will get <= num_recv messages and output how many
+ * actually received on num_recv.
+ */
+ num_recv = 1;
+ err = idpf_ctlq_recv(adapter->hw.arq, &num_recv, &ctlq_msg);
+ if (err || !num_recv)
+ break;
+
+ if (ctlq_msg.data_len) {
+ dma_mem = ctlq_msg.ctx.indirect.payload;
+ } else {
+ dma_mem = NULL;
+ num_recv = 0;
+ }
+
+ if (ctlq_msg.cookie.mbx.chnl_opcode == VIRTCHNL2_OP_EVENT)
+ idpf_recv_event_msg(adapter, &ctlq_msg);
+ else
+ err = idpf_vc_xn_forward_reply(adapter, &ctlq_msg);
+
+ post_err = idpf_ctlq_post_rx_buffs(&adapter->hw,
+ adapter->hw.arq,
+ &num_recv, &dma_mem);
+
+ /* If post failed clear the only buffer we supplied */
+ if (post_err) {
+ if (dma_mem)
+ dmam_free_coherent(&adapter->pdev->dev,
+ dma_mem->size, dma_mem->va,
+ dma_mem->pa);
+ break;
+ }
+
+ /* virtchnl trying to shutdown, stop cleaning */
+ if (err == -ENXIO)
+ break;
+ }
+
+ return err;
}
/**
@@ -785,7 +776,11 @@ static int idpf_wait_for_marker_event(struct idpf_vport *vport)
*/
static int idpf_send_ver_msg(struct idpf_adapter *adapter)
{
+ struct idpf_vc_xn_params xn_params = {};
struct virtchnl2_version_info vvi;
+ ssize_t reply_sz;
+ u32 major, minor;
+ int err = 0;
if (adapter->virt_ver_maj) {
vvi.major = cpu_to_le32(adapter->virt_ver_maj);
@@ -795,43 +790,29 @@ static int idpf_send_ver_msg(struct idpf_adapter *adapter)
vvi.minor = cpu_to_le32(IDPF_VIRTCHNL_VERSION_MINOR);
}
- return idpf_send_mb_msg(adapter, VIRTCHNL2_OP_VERSION, sizeof(vvi),
- (u8 *)&vvi);
-}
-
-/**
- * idpf_recv_ver_msg - Receive virtchnl version message
- * @adapter: Driver specific private structure
- *
- * Receive virtchnl version message. Returns 0 on success, -EAGAIN if we need
- * to send version message again, otherwise negative on failure.
- */
-static int idpf_recv_ver_msg(struct idpf_adapter *adapter)
-{
- struct virtchnl2_version_info vvi;
- u32 major, minor;
- int err;
+ xn_params.vc_op = VIRTCHNL2_OP_VERSION;
+ xn_params.send_buf.iov_base = &vvi;
+ xn_params.send_buf.iov_len = sizeof(vvi);
+ xn_params.recv_buf = xn_params.send_buf;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
- err = idpf_recv_mb_msg(adapter, VIRTCHNL2_OP_VERSION, &vvi,
- sizeof(vvi));
- if (err)
- return err;
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
+ if (reply_sz < sizeof(vvi))
+ return -EIO;
major = le32_to_cpu(vvi.major);
minor = le32_to_cpu(vvi.minor);
if (major > IDPF_VIRTCHNL_VERSION_MAJOR) {
- dev_warn(&adapter->pdev->dev,
- "Virtchnl major version (%d) greater than supported\n",
- major);
-
+ dev_warn(&adapter->pdev->dev, "Virtchnl major version greater than supported\n");
return -EINVAL;
}
if (major == IDPF_VIRTCHNL_VERSION_MAJOR &&
minor > IDPF_VIRTCHNL_VERSION_MINOR)
- dev_warn(&adapter->pdev->dev,
- "Virtchnl minor version (%d) didn't match\n", minor);
+ dev_warn(&adapter->pdev->dev, "Virtchnl minor version didn't match\n");
/* If we have a mismatch, resend version to update receiver on what
* version we will use.
@@ -856,7 +837,9 @@ static int idpf_recv_ver_msg(struct idpf_adapter *adapter)
*/
static int idpf_send_get_caps_msg(struct idpf_adapter *adapter)
{
- struct virtchnl2_get_capabilities caps = { };
+ struct virtchnl2_get_capabilities caps = {};
+ struct idpf_vc_xn_params xn_params = {};
+ ssize_t reply_sz;
caps.csum_caps =
cpu_to_le32(VIRTCHNL2_CAP_TX_CSUM_L3_IPV4 |
@@ -913,21 +896,20 @@ static int idpf_send_get_caps_msg(struct idpf_adapter *adapter)
VIRTCHNL2_CAP_PROMISC |
VIRTCHNL2_CAP_LOOPBACK);
- return idpf_send_mb_msg(adapter, VIRTCHNL2_OP_GET_CAPS, sizeof(caps),
- (u8 *)&caps);
-}
+ xn_params.vc_op = VIRTCHNL2_OP_GET_CAPS;
+ xn_params.send_buf.iov_base = &caps;
+ xn_params.send_buf.iov_len = sizeof(caps);
+ xn_params.recv_buf.iov_base = &adapter->caps;
+ xn_params.recv_buf.iov_len = sizeof(adapter->caps);
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
-/**
- * idpf_recv_get_caps_msg - Receive virtchnl get capabilities message
- * @adapter: Driver specific private structure
- *
- * Receive virtchnl get capabilities message. Returns 0 on success, negative on
- * failure.
- */
-static int idpf_recv_get_caps_msg(struct idpf_adapter *adapter)
-{
- return idpf_recv_mb_msg(adapter, VIRTCHNL2_OP_GET_CAPS, &adapter->caps,
- sizeof(struct virtchnl2_get_capabilities));
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
+ if (reply_sz < sizeof(adapter->caps))
+ return -EIO;
+
+ return 0;
}
/**
@@ -1254,8 +1236,10 @@ int idpf_send_create_vport_msg(struct idpf_adapter *adapter,
struct idpf_vport_max_q *max_q)
{
struct virtchnl2_create_vport *vport_msg;
+ struct idpf_vc_xn_params xn_params = {};
u16 idx = adapter->next_vport;
int err, buf_size;
+ ssize_t reply_sz;
buf_size = sizeof(struct virtchnl2_create_vport);
if (!adapter->vport_params_reqd[idx]) {
@@ -1286,35 +1270,38 @@ int idpf_send_create_vport_msg(struct idpf_adapter *adapter,
return err;
}
- mutex_lock(&adapter->vc_buf_lock);
-
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_CREATE_VPORT, buf_size,
- (u8 *)vport_msg);
- if (err)
- goto rel_lock;
-
- err = idpf_wait_for_event(adapter, NULL, IDPF_VC_CREATE_VPORT,
- IDPF_VC_CREATE_VPORT_ERR);
- if (err) {
- dev_err(&adapter->pdev->dev, "Failed to receive create vport message");
-
- goto rel_lock;
- }
-
if (!adapter->vport_params_recvd[idx]) {
adapter->vport_params_recvd[idx] = kzalloc(IDPF_CTLQ_MAX_BUF_LEN,
GFP_KERNEL);
if (!adapter->vport_params_recvd[idx]) {
err = -ENOMEM;
- goto rel_lock;
+ goto free_vport_params;
}
}
- vport_msg = adapter->vport_params_recvd[idx];
- memcpy(vport_msg, adapter->vc_msg, IDPF_CTLQ_MAX_BUF_LEN);
+ xn_params.vc_op = VIRTCHNL2_OP_CREATE_VPORT;
+ xn_params.send_buf.iov_base = vport_msg;
+ xn_params.send_buf.iov_len = buf_size;
+ xn_params.recv_buf.iov_base = adapter->vport_params_recvd[idx];
+ xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0) {
+ err = reply_sz;
+ goto free_vport_params;
+ }
+ if (reply_sz < IDPF_CTLQ_MAX_BUF_LEN) {
+ err = -EIO;
+ goto free_vport_params;
+ }
-rel_lock:
- mutex_unlock(&adapter->vc_buf_lock);
+ return 0;
+
+free_vport_params:
+ kfree(adapter->vport_params_recvd[idx]);
+ adapter->vport_params_recvd[idx] = NULL;
+ kfree(adapter->vport_params_reqd[idx]);
+ adapter->vport_params_reqd[idx] = NULL;
return err;
}
@@ -1366,26 +1353,19 @@ int idpf_check_supported_desc_ids(struct idpf_vport *vport)
*/
int idpf_send_destroy_vport_msg(struct idpf_vport *vport)
{
- struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_vc_xn_params xn_params = {};
struct virtchnl2_vport v_id;
- int err;
+ ssize_t reply_sz;
v_id.vport_id = cpu_to_le32(vport->vport_id);
- mutex_lock(&vport->vc_buf_lock);
+ xn_params.vc_op = VIRTCHNL2_OP_DESTROY_VPORT;
+ xn_params.send_buf.iov_base = &v_id;
+ xn_params.send_buf.iov_len = sizeof(v_id);
+ xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_DESTROY_VPORT,
- sizeof(v_id), (u8 *)&v_id);
- if (err)
- goto rel_lock;
-
- err = idpf_min_wait_for_event(adapter, vport, IDPF_VC_DESTROY_VPORT,
- IDPF_VC_DESTROY_VPORT_ERR);
-
-rel_lock:
- mutex_unlock(&vport->vc_buf_lock);
-
- return err;
+ return reply_sz < 0 ? reply_sz : 0;
}
/**
@@ -1397,26 +1377,19 @@ rel_lock:
*/
int idpf_send_enable_vport_msg(struct idpf_vport *vport)
{
- struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_vc_xn_params xn_params = {};
struct virtchnl2_vport v_id;
- int err;
+ ssize_t reply_sz;
v_id.vport_id = cpu_to_le32(vport->vport_id);
- mutex_lock(&vport->vc_buf_lock);
-
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_ENABLE_VPORT,
- sizeof(v_id), (u8 *)&v_id);
- if (err)
- goto rel_lock;
-
- err = idpf_wait_for_event(adapter, vport, IDPF_VC_ENA_VPORT,
- IDPF_VC_ENA_VPORT_ERR);
+ xn_params.vc_op = VIRTCHNL2_OP_ENABLE_VPORT;
+ xn_params.send_buf.iov_base = &v_id;
+ xn_params.send_buf.iov_len = sizeof(v_id);
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
-rel_lock:
- mutex_unlock(&vport->vc_buf_lock);
-
- return err;
+ return reply_sz < 0 ? reply_sz : 0;
}
/**
@@ -1428,26 +1401,19 @@ rel_lock:
*/
int idpf_send_disable_vport_msg(struct idpf_vport *vport)
{
- struct idpf_adapter *adapter = vport->adapter;
+ struct idpf_vc_xn_params xn_params = {};
struct virtchnl2_vport v_id;
- int err;
+ ssize_t reply_sz;
v_id.vport_id = cpu_to_le32(vport->vport_id);
- mutex_lock(&vport->vc_buf_lock);
+ xn_params.vc_op = VIRTCHNL2_OP_DISABLE_VPORT;
+ xn_params.send_buf.iov_base = &v_id;
+ xn_params.send_buf.iov_len = sizeof(v_id);
+ xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_DISABLE_VPORT,
- sizeof(v_id), (u8 *)&v_id);
- if (err)
- goto rel_lock;
-
- err = idpf_min_wait_for_event(adapter, vport, IDPF_VC_DIS_VPORT,
- IDPF_VC_DIS_VPORT_ERR);
-
-rel_lock:
- mutex_unlock(&vport->vc_buf_lock);
-
- return err;
+ return reply_sz < 0 ? reply_sz : 0;
}
/**
@@ -1459,11 +1425,13 @@ rel_lock:
*/
static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
{
- struct virtchnl2_config_tx_queues *ctq;
+ struct virtchnl2_config_tx_queues *ctq __free(kfree) = NULL;
+ struct virtchnl2_txq_info *qi __free(kfree) = NULL;
+ struct idpf_vc_xn_params xn_params = {};
u32 config_sz, chunk_sz, buf_sz;
int totqs, num_msgs, num_chunks;
- struct virtchnl2_txq_info *qi;
- int err = 0, i, k = 0;
+ ssize_t reply_sz;
+ int i, k = 0;
totqs = vport->num_txq + vport->num_complq;
qi = kcalloc(totqs, sizeof(struct virtchnl2_txq_info), GFP_KERNEL);
@@ -1524,10 +1492,8 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
}
/* Make sure accounting agrees */
- if (k != totqs) {
- err = -EINVAL;
- goto error;
- }
+ if (k != totqs)
+ return -EINVAL;
/* Chunk up the queue contexts into multiple messages to avoid
* sending a control queue message buffer that is too large
@@ -1541,12 +1507,11 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
buf_sz = struct_size(ctq, qinfo, num_chunks);
ctq = kzalloc(buf_sz, GFP_KERNEL);
- if (!ctq) {
- err = -ENOMEM;
- goto error;
- }
+ if (!ctq)
+ return -ENOMEM;
- mutex_lock(&vport->vc_buf_lock);
+ xn_params.vc_op = VIRTCHNL2_OP_CONFIG_TX_QUEUES;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
for (i = 0, k = 0; i < num_msgs; i++) {
memset(ctq, 0, buf_sz);
@@ -1554,17 +1519,11 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
ctq->num_qinfo = cpu_to_le16(num_chunks);
memcpy(ctq->qinfo, &qi[k], chunk_sz * num_chunks);
- err = idpf_send_mb_msg(vport->adapter,
- VIRTCHNL2_OP_CONFIG_TX_QUEUES,
- buf_sz, (u8 *)ctq);
- if (err)
- goto mbx_error;
-
- err = idpf_wait_for_event(vport->adapter, vport,
- IDPF_VC_CONFIG_TXQ,
- IDPF_VC_CONFIG_TXQ_ERR);
- if (err)
- goto mbx_error;
+ xn_params.send_buf.iov_base = ctq;
+ xn_params.send_buf.iov_len = buf_sz;
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
k += num_chunks;
totqs -= num_chunks;
@@ -1573,13 +1532,7 @@ static int idpf_send_config_tx_queues_msg(struct idpf_vport *vport)
buf_sz = struct_size(ctq, qinfo, num_chunks);
}
-mbx_error:
- mutex_unlock(&vport->vc_buf_lock);
- kfree(ctq);
-error:
- kfree(qi);
-
- return err;
+ return 0;
}
/**
@@ -1591,11 +1544,13 @@ error:
*/
static int idpf_send_config_rx_queues_msg(struct idpf_vport *vport)
{
- struct virtchnl2_config_rx_queues *crq;
+ struct virtchnl2_config_rx_queues *crq __free(kfree) = NULL;
+ struct virtchnl2_rxq_info *qi __free(kfree) = NULL;
+ struct idpf_vc_xn_params xn_params = {};
u32 config_sz, chunk_sz, buf_sz;
int totqs, num_msgs, num_chunks;
- struct virtchnl2_rxq_info *qi;
- int err = 0, i, k = 0;
+ ssize_t reply_sz;
+ int i, k = 0;
totqs = vport->num_rxq + vport->num_bufq;
qi = kcalloc(totqs, sizeof(struct virtchnl2_rxq_info), GFP_KERNEL);
@@ -1676,10 +1631,8 @@ common_qi_fields:
}
/* Make sure accounting agrees */
- if (k != totqs) {
- err = -EINVAL;
- goto error;
- }
+ if (k != totqs)
+ return -EINVAL;
/* Chunk up the queue contexts into multiple messages to avoid
* sending a control queue message buffer that is too large
@@ -1693,12 +1646,11 @@ common_qi_fields:
buf_sz = struct_size(crq, qinfo, num_chunks);
crq = kzalloc(buf_sz, GFP_KERNEL);
- if (!crq) {
- err = -ENOMEM;
- goto error;
- }
+ if (!crq)
+ return -ENOMEM;
- mutex_lock(&vport->vc_buf_lock);
+ xn_params.vc_op = VIRTCHNL2_OP_CONFIG_RX_QUEUES;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
for (i = 0, k = 0; i < num_msgs; i++) {
memset(crq, 0, buf_sz);
@@ -1706,17 +1658,11 @@ common_qi_fields:
crq->num_qinfo = cpu_to_le16(num_chunks);
memcpy(crq->qinfo, &qi[k], chunk_sz * num_chunks);
- err = idpf_send_mb_msg(vport->adapter,
- VIRTCHNL2_OP_CONFIG_RX_QUEUES,
- buf_sz, (u8 *)crq);
- if (err)
- goto mbx_error;
-
- err = idpf_wait_for_event(vport->adapter, vport,
- IDPF_VC_CONFIG_RXQ,
- IDPF_VC_CONFIG_RXQ_ERR);
- if (err)
- goto mbx_error;
+ xn_params.send_buf.iov_base = crq;
+ xn_params.send_buf.iov_len = buf_sz;
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
k += num_chunks;
totqs -= num_chunks;
@@ -1725,42 +1671,28 @@ common_qi_fields:
buf_sz = struct_size(crq, qinfo, num_chunks);
}
-mbx_error:
- mutex_unlock(&vport->vc_buf_lock);
- kfree(crq);
-error:
- kfree(qi);
-
- return err;
+ return 0;
}
/**
* idpf_send_ena_dis_queues_msg - Send virtchnl enable or disable
* queues message
* @vport: virtual port data structure
- * @vc_op: virtchnl op code to send
+ * @ena: if true enable, false disable
*
* Send enable or disable queues virtchnl message. Returns 0 on success,
* negative on failure.
*/
-static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, u32 vc_op)
+static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, bool ena)
{
+ struct virtchnl2_del_ena_dis_queues *eq __free(kfree) = NULL;
+ struct virtchnl2_queue_chunk *qc __free(kfree) = NULL;
u32 num_msgs, num_chunks, num_txq, num_rxq, num_q;
- struct idpf_adapter *adapter = vport->adapter;
- struct virtchnl2_del_ena_dis_queues *eq;
+ struct idpf_vc_xn_params xn_params = {};
struct virtchnl2_queue_chunks *qcs;
- struct virtchnl2_queue_chunk *qc;
u32 config_sz, chunk_sz, buf_sz;
- int i, j, k = 0, err = 0;
-
- /* validate virtchnl op */
- switch (vc_op) {
- case VIRTCHNL2_OP_ENABLE_QUEUES:
- case VIRTCHNL2_OP_DISABLE_QUEUES:
- break;
- default:
- return -EINVAL;
- }
+ ssize_t reply_sz;
+ int i, j, k = 0;
num_txq = vport->num_txq + vport->num_complq;
num_rxq = vport->num_rxq + vport->num_bufq;
@@ -1779,10 +1711,8 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, u32 vc_op)
qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
}
}
- if (vport->num_txq != k) {
- err = -EINVAL;
- goto error;
- }
+ if (vport->num_txq != k)
+ return -EINVAL;
if (!idpf_is_queue_model_split(vport->txq_model))
goto setup_rx;
@@ -1794,10 +1724,8 @@ static int idpf_send_ena_dis_queues_msg(struct idpf_vport *vport, u32 vc_op)
qc[k].start_queue_id = cpu_to_le32(tx_qgrp->complq->q_id);
qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
}
- if (vport->num_complq != (k - vport->num_txq)) {
- err = -EINVAL;
- goto error;
- }
+ if (vport->num_complq != (k - vport->num_txq))
+ return -EINVAL;
setup_rx:
for (i = 0; i < vport->num_rxq_grp; i++) {
@@ -1823,10 +1751,8 @@ setup_rx:
qc[k].num_queues = cpu_to_le32(IDPF_NUMQ_PER_CHUNK);
}
}
- if (vport->num_rxq != k - (vport->num_txq + vport->num_complq)) {
- err = -EINVAL;
- goto error;
- }
+ if (vport->num_rxq != k - (vport->num_txq + vport->num_complq))
+ return -EINVAL;
if (!idpf_is_queue_model_split(vport->rxq_model))
goto send_msg;
@@ -1845,10 +1771,8 @@ setup_rx:
}
if (vport->num_bufq != k - (vport->num_txq +
vport->num_complq +
- vport->num_rxq)) {
- err = -EINVAL;
- goto error;
- }
+ vport->num_rxq))
+ return -EINVAL;
send_msg:
/* Chunk up the queue info into multiple messages */
@@ -1861,12 +1785,16 @@ send_msg:
buf_sz = struct_size(eq, chunks.chunks, num_chunks);
eq = kzalloc(buf_sz, GFP_KERNEL);
- if (!eq) {
- err = -ENOMEM;
- goto error;
- }
+ if (!eq)
+ return -ENOMEM;
- mutex_lock(&vport->vc_buf_lock);
+ if (ena) {
+ xn_params.vc_op = VIRTCHNL2_OP_ENABLE_QUEUES;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ } else {
+ xn_params.vc_op = VIRTCHNL2_OP_DISABLE_QUEUES;
+ xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
+ }
for (i = 0, k = 0; i < num_msgs; i++) {
memset(eq, 0, buf_sz);
@@ -1875,20 +1803,11 @@ send_msg:
qcs = &eq->chunks;
memcpy(qcs->chunks, &qc[k], chunk_sz * num_chunks);
- err = idpf_send_mb_msg(adapter, vc_op, buf_sz, (u8 *)eq);
- if (err)
- goto mbx_error;
-
- if (vc_op == VIRTCHNL2_OP_ENABLE_QUEUES)
- err = idpf_wait_for_event(adapter, vport,
- IDPF_VC_ENA_QUEUES,
- IDPF_VC_ENA_QUEUES_ERR);
- else
- err = idpf_min_wait_for_event(adapter, vport,
- IDPF_VC_DIS_QUEUES,
- IDPF_VC_DIS_QUEUES_ERR);
- if (err)
- goto mbx_error;
+ xn_params.send_buf.iov_base = eq;
+ xn_params.send_buf.iov_len = buf_sz;
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
k += num_chunks;
num_q -= num_chunks;
@@ -1897,13 +1816,7 @@ send_msg:
buf_sz = struct_size(eq, chunks.chunks, num_chunks);
}
-mbx_error:
- mutex_unlock(&vport->vc_buf_lock);
- kfree(eq);
-error:
- kfree(qc);
-
- return err;
+ return 0;
}
/**
@@ -1917,12 +1830,13 @@ error:
*/
int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
{
- struct idpf_adapter *adapter = vport->adapter;
- struct virtchnl2_queue_vector_maps *vqvm;
- struct virtchnl2_queue_vector *vqv;
+ struct virtchnl2_queue_vector_maps *vqvm __free(kfree) = NULL;
+ struct virtchnl2_queue_vector *vqv __free(kfree) = NULL;
+ struct idpf_vc_xn_params xn_params = {};
u32 config_sz, chunk_sz, buf_sz;
u32 num_msgs, num_chunks, num_q;
- int i, j, k = 0, err = 0;
+ ssize_t reply_sz;
+ int i, j, k = 0;
num_q = vport->num_txq + vport->num_rxq;
@@ -1952,10 +1866,8 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
}
}
- if (vport->num_txq != k) {
- err = -EINVAL;
- goto error;
- }
+ if (vport->num_txq != k)
+ return -EINVAL;
for (i = 0; i < vport->num_rxq_grp; i++) {
struct idpf_rxq_group *rx_qgrp = &vport->rxq_grps[i];
@@ -1982,15 +1894,11 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
}
if (idpf_is_queue_model_split(vport->txq_model)) {
- if (vport->num_rxq != k - vport->num_complq) {
- err = -EINVAL;
- goto error;
- }
+ if (vport->num_rxq != k - vport->num_complq)
+ return -EINVAL;
} else {
- if (vport->num_rxq != k - vport->num_txq) {
- err = -EINVAL;
- goto error;
- }
+ if (vport->num_rxq != k - vport->num_txq)
+ return -EINVAL;
}
/* Chunk up the vector info into multiple messages */
@@ -2003,39 +1911,28 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
buf_sz = struct_size(vqvm, qv_maps, num_chunks);
vqvm = kzalloc(buf_sz, GFP_KERNEL);
- if (!vqvm) {
- err = -ENOMEM;
- goto error;
- }
+ if (!vqvm)
+ return -ENOMEM;
- mutex_lock(&vport->vc_buf_lock);
+ if (map) {
+ xn_params.vc_op = VIRTCHNL2_OP_MAP_QUEUE_VECTOR;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ } else {
+ xn_params.vc_op = VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR;
+ xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
+ }
for (i = 0, k = 0; i < num_msgs; i++) {
memset(vqvm, 0, buf_sz);
+ xn_params.send_buf.iov_base = vqvm;
+ xn_params.send_buf.iov_len = buf_sz;
vqvm->vport_id = cpu_to_le32(vport->vport_id);
vqvm->num_qv_maps = cpu_to_le16(num_chunks);
memcpy(vqvm->qv_maps, &vqv[k], chunk_sz * num_chunks);
- if (map) {
- err = idpf_send_mb_msg(adapter,
- VIRTCHNL2_OP_MAP_QUEUE_VECTOR,
- buf_sz, (u8 *)vqvm);
- if (!err)
- err = idpf_wait_for_event(adapter, vport,
- IDPF_VC_MAP_IRQ,
- IDPF_VC_MAP_IRQ_ERR);
- } else {
- err = idpf_send_mb_msg(adapter,
- VIRTCHNL2_OP_UNMAP_QUEUE_VECTOR,
- buf_sz, (u8 *)vqvm);
- if (!err)
- err =
- idpf_min_wait_for_event(adapter, vport,
- IDPF_VC_UNMAP_IRQ,
- IDPF_VC_UNMAP_IRQ_ERR);
- }
- if (err)
- goto mbx_error;
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
k += num_chunks;
num_q -= num_chunks;
@@ -2044,13 +1941,7 @@ int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map)
buf_sz = struct_size(vqvm, qv_maps, num_chunks);
}
-mbx_error:
- mutex_unlock(&vport->vc_buf_lock);
- kfree(vqvm);
-error:
- kfree(vqv);
-
- return err;
+ return 0;
}
/**
@@ -2062,7 +1953,7 @@ error:
*/
int idpf_send_enable_queues_msg(struct idpf_vport *vport)
{
- return idpf_send_ena_dis_queues_msg(vport, VIRTCHNL2_OP_ENABLE_QUEUES);
+ return idpf_send_ena_dis_queues_msg(vport, true);
}
/**
@@ -2076,7 +1967,7 @@ int idpf_send_disable_queues_msg(struct idpf_vport *vport)
{
int err, i;
- err = idpf_send_ena_dis_queues_msg(vport, VIRTCHNL2_OP_DISABLE_QUEUES);
+ err = idpf_send_ena_dis_queues_msg(vport, false);
if (err)
return err;
@@ -2124,22 +2015,21 @@ static void idpf_convert_reg_to_queue_chunks(struct virtchnl2_queue_chunk *dchun
*/
int idpf_send_delete_queues_msg(struct idpf_vport *vport)
{
- struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_del_ena_dis_queues *eq __free(kfree) = NULL;
struct virtchnl2_create_vport *vport_params;
struct virtchnl2_queue_reg_chunks *chunks;
- struct virtchnl2_del_ena_dis_queues *eq;
+ struct idpf_vc_xn_params xn_params = {};
struct idpf_vport_config *vport_config;
u16 vport_idx = vport->idx;
- int buf_size, err;
+ ssize_t reply_sz;
u16 num_chunks;
+ int buf_size;
- vport_config = adapter->vport_config[vport_idx];
+ vport_config = vport->adapter->vport_config[vport_idx];
if (vport_config->req_qs_chunks) {
- struct virtchnl2_add_queues *vc_aq =
- (struct virtchnl2_add_queues *)vport_config->req_qs_chunks;
- chunks = &vc_aq->chunks;
+ chunks = &vport_config->req_qs_chunks->chunks;
} else {
- vport_params = adapter->vport_params_recvd[vport_idx];
+ vport_params = vport->adapter->vport_params_recvd[vport_idx];
chunks = &vport_params->chunks;
}
@@ -2156,21 +2046,13 @@ int idpf_send_delete_queues_msg(struct idpf_vport *vport)
idpf_convert_reg_to_queue_chunks(eq->chunks.chunks, chunks->chunks,
num_chunks);
- mutex_lock(&vport->vc_buf_lock);
-
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_DEL_QUEUES,
- buf_size, (u8 *)eq);
- if (err)
- goto rel_lock;
-
- err = idpf_min_wait_for_event(adapter, vport, IDPF_VC_DEL_QUEUES,
- IDPF_VC_DEL_QUEUES_ERR);
-
-rel_lock:
- mutex_unlock(&vport->vc_buf_lock);
- kfree(eq);
+ xn_params.vc_op = VIRTCHNL2_OP_DEL_QUEUES;
+ xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
+ xn_params.send_buf.iov_base = eq;
+ xn_params.send_buf.iov_len = buf_size;
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
- return err;
+ return reply_sz < 0 ? reply_sz : 0;
}
/**
@@ -2205,14 +2087,21 @@ int idpf_send_config_queues_msg(struct idpf_vport *vport)
int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q,
u16 num_complq, u16 num_rx_q, u16 num_rx_bufq)
{
- struct idpf_adapter *adapter = vport->adapter;
+ struct virtchnl2_add_queues *vc_msg __free(kfree) = NULL;
+ struct idpf_vc_xn_params xn_params = {};
struct idpf_vport_config *vport_config;
- struct virtchnl2_add_queues aq = { };
- struct virtchnl2_add_queues *vc_msg;
+ struct virtchnl2_add_queues aq = {};
u16 vport_idx = vport->idx;
- int size, err;
+ ssize_t reply_sz;
+ int size;
+
+ vc_msg = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
+ if (!vc_msg)
+ return -ENOMEM;
- vport_config = adapter->vport_config[vport_idx];
+ vport_config = vport->adapter->vport_config[vport_idx];
+ kfree(vport_config->req_qs_chunks);
+ vport_config->req_qs_chunks = NULL;
aq.vport_id = cpu_to_le32(vport->vport_id);
aq.num_tx_q = cpu_to_le16(num_tx_q);
@@ -2220,47 +2109,33 @@ int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q,
aq.num_rx_q = cpu_to_le16(num_rx_q);
aq.num_rx_bufq = cpu_to_le16(num_rx_bufq);
- mutex_lock(&((struct idpf_vport *)vport)->vc_buf_lock);
-
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_ADD_QUEUES,
- sizeof(struct virtchnl2_add_queues), (u8 *)&aq);
- if (err)
- goto rel_lock;
-
- /* We want vport to be const to prevent incidental code changes making
- * changes to the vport config. We're making a special exception here
- * to discard const to use the virtchnl.
- */
- err = idpf_wait_for_event(adapter, (struct idpf_vport *)vport,
- IDPF_VC_ADD_QUEUES, IDPF_VC_ADD_QUEUES_ERR);
- if (err)
- goto rel_lock;
-
- kfree(vport_config->req_qs_chunks);
- vport_config->req_qs_chunks = NULL;
+ xn_params.vc_op = VIRTCHNL2_OP_ADD_QUEUES;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ xn_params.send_buf.iov_base = &aq;
+ xn_params.send_buf.iov_len = sizeof(aq);
+ xn_params.recv_buf.iov_base = vc_msg;
+ xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
- vc_msg = (struct virtchnl2_add_queues *)vport->vc_msg;
/* compare vc_msg num queues with vport num queues */
if (le16_to_cpu(vc_msg->num_tx_q) != num_tx_q ||
le16_to_cpu(vc_msg->num_rx_q) != num_rx_q ||
le16_to_cpu(vc_msg->num_tx_complq) != num_complq ||
- le16_to_cpu(vc_msg->num_rx_bufq) != num_rx_bufq) {
- err = -EINVAL;
- goto rel_lock;
- }
+ le16_to_cpu(vc_msg->num_rx_bufq) != num_rx_bufq)
+ return -EINVAL;
size = struct_size(vc_msg, chunks.chunks,
le16_to_cpu(vc_msg->chunks.num_chunks));
- vport_config->req_qs_chunks = kmemdup(vc_msg, size, GFP_KERNEL);
- if (!vport_config->req_qs_chunks) {
- err = -ENOMEM;
- goto rel_lock;
- }
+ if (reply_sz < size)
+ return -EIO;
-rel_lock:
- mutex_unlock(&((struct idpf_vport *)vport)->vc_buf_lock);
+ vport_config->req_qs_chunks = kmemdup(vc_msg, size, GFP_KERNEL);
+ if (!vport_config->req_qs_chunks)
+ return -ENOMEM;
- return err;
+ return 0;
}
/**
@@ -2272,53 +2147,49 @@ rel_lock:
*/
int idpf_send_alloc_vectors_msg(struct idpf_adapter *adapter, u16 num_vectors)
{
- struct virtchnl2_alloc_vectors *alloc_vec, *rcvd_vec;
- struct virtchnl2_alloc_vectors ac = { };
+ struct virtchnl2_alloc_vectors *rcvd_vec __free(kfree) = NULL;
+ struct idpf_vc_xn_params xn_params = {};
+ struct virtchnl2_alloc_vectors ac = {};
+ ssize_t reply_sz;
u16 num_vchunks;
- int size, err;
+ int size;
ac.num_vectors = cpu_to_le16(num_vectors);
- mutex_lock(&adapter->vc_buf_lock);
-
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_ALLOC_VECTORS,
- sizeof(ac), (u8 *)&ac);
- if (err)
- goto rel_lock;
+ rcvd_vec = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
+ if (!rcvd_vec)
+ return -ENOMEM;
- err = idpf_wait_for_event(adapter, NULL, IDPF_VC_ALLOC_VECTORS,
- IDPF_VC_ALLOC_VECTORS_ERR);
- if (err)
- goto rel_lock;
+ xn_params.vc_op = VIRTCHNL2_OP_ALLOC_VECTORS;
+ xn_params.send_buf.iov_base = &ac;
+ xn_params.send_buf.iov_len = sizeof(ac);
+ xn_params.recv_buf.iov_base = rcvd_vec;
+ xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
- rcvd_vec = (struct virtchnl2_alloc_vectors *)adapter->vc_msg;
num_vchunks = le16_to_cpu(rcvd_vec->vchunks.num_vchunks);
-
size = struct_size(rcvd_vec, vchunks.vchunks, num_vchunks);
- if (size > sizeof(adapter->vc_msg)) {
- err = -EINVAL;
- goto rel_lock;
- }
+ if (reply_sz < size)
+ return -EIO;
+
+ if (size > IDPF_CTLQ_MAX_BUF_LEN)
+ return -EINVAL;
kfree(adapter->req_vec_chunks);
- adapter->req_vec_chunks = NULL;
- adapter->req_vec_chunks = kmemdup(adapter->vc_msg, size, GFP_KERNEL);
- if (!adapter->req_vec_chunks) {
- err = -ENOMEM;
- goto rel_lock;
- }
+ adapter->req_vec_chunks = kmemdup(rcvd_vec, size, GFP_KERNEL);
+ if (!adapter->req_vec_chunks)
+ return -ENOMEM;
- alloc_vec = adapter->req_vec_chunks;
- if (le16_to_cpu(alloc_vec->num_vectors) < num_vectors) {
+ if (le16_to_cpu(adapter->req_vec_chunks->num_vectors) < num_vectors) {
kfree(adapter->req_vec_chunks);
adapter->req_vec_chunks = NULL;
- err = -EINVAL;
+ return -EINVAL;
}
-rel_lock:
- mutex_unlock(&adapter->vc_buf_lock);
-
- return err;
+ return 0;
}
/**
@@ -2331,29 +2202,24 @@ int idpf_send_dealloc_vectors_msg(struct idpf_adapter *adapter)
{
struct virtchnl2_alloc_vectors *ac = adapter->req_vec_chunks;
struct virtchnl2_vector_chunks *vcs = &ac->vchunks;
- int buf_size, err;
+ struct idpf_vc_xn_params xn_params = {};
+ ssize_t reply_sz;
+ int buf_size;
buf_size = struct_size(vcs, vchunks, le16_to_cpu(vcs->num_vchunks));
- mutex_lock(&adapter->vc_buf_lock);
-
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_DEALLOC_VECTORS, buf_size,
- (u8 *)vcs);
- if (err)
- goto rel_lock;
-
- err = idpf_min_wait_for_event(adapter, NULL, IDPF_VC_DEALLOC_VECTORS,
- IDPF_VC_DEALLOC_VECTORS_ERR);
- if (err)
- goto rel_lock;
+ xn_params.vc_op = VIRTCHNL2_OP_DEALLOC_VECTORS;
+ xn_params.send_buf.iov_base = vcs;
+ xn_params.send_buf.iov_len = buf_size;
+ xn_params.timeout_ms = IDPF_VC_XN_MIN_TIMEOUT_MSEC;
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
kfree(adapter->req_vec_chunks);
adapter->req_vec_chunks = NULL;
-rel_lock:
- mutex_unlock(&adapter->vc_buf_lock);
-
- return err;
+ return 0;
}
/**
@@ -2376,25 +2242,18 @@ static int idpf_get_max_vfs(struct idpf_adapter *adapter)
*/
int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs)
{
- struct virtchnl2_sriov_vfs_info svi = { };
- int err;
+ struct virtchnl2_sriov_vfs_info svi = {};
+ struct idpf_vc_xn_params xn_params = {};
+ ssize_t reply_sz;
svi.num_vfs = cpu_to_le16(num_vfs);
+ xn_params.vc_op = VIRTCHNL2_OP_SET_SRIOV_VFS;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ xn_params.send_buf.iov_base = &svi;
+ xn_params.send_buf.iov_len = sizeof(svi);
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
- mutex_lock(&adapter->vc_buf_lock);
-
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_SET_SRIOV_VFS,
- sizeof(svi), (u8 *)&svi);
- if (err)
- goto rel_lock;
-
- err = idpf_wait_for_event(adapter, NULL, IDPF_VC_SET_SRIOV_VFS,
- IDPF_VC_SET_SRIOV_VFS_ERR);
-
-rel_lock:
- mutex_unlock(&adapter->vc_buf_lock);
-
- return err;
+ return reply_sz < 0 ? reply_sz : 0;
}
/**
@@ -2407,10 +2266,10 @@ int idpf_send_get_stats_msg(struct idpf_vport *vport)
{
struct idpf_netdev_priv *np = netdev_priv(vport->netdev);
struct rtnl_link_stats64 *netstats = &np->netstats;
- struct idpf_adapter *adapter = vport->adapter;
- struct virtchnl2_vport_stats stats_msg = { };
- struct virtchnl2_vport_stats *stats;
- int err;
+ struct virtchnl2_vport_stats stats_msg = {};
+ struct idpf_vc_xn_params xn_params = {};
+ ssize_t reply_sz;
+
/* Don't send get_stats message if the link is down */
if (np->state <= __IDPF_VPORT_DOWN)
@@ -2418,46 +2277,38 @@ int idpf_send_get_stats_msg(struct idpf_vport *vport)
stats_msg.vport_id = cpu_to_le32(vport->vport_id);
- mutex_lock(&vport->vc_buf_lock);
+ xn_params.vc_op = VIRTCHNL2_OP_GET_STATS;
+ xn_params.send_buf.iov_base = &stats_msg;
+ xn_params.send_buf.iov_len = sizeof(stats_msg);
+ xn_params.recv_buf = xn_params.send_buf;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_GET_STATS,
- sizeof(struct virtchnl2_vport_stats),
- (u8 *)&stats_msg);
- if (err)
- goto rel_lock;
-
- err = idpf_wait_for_event(adapter, vport, IDPF_VC_GET_STATS,
- IDPF_VC_GET_STATS_ERR);
- if (err)
- goto rel_lock;
-
- stats = (struct virtchnl2_vport_stats *)vport->vc_msg;
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
+ if (reply_sz < sizeof(stats_msg))
+ return -EIO;
spin_lock_bh(&np->stats_lock);
- netstats->rx_packets = le64_to_cpu(stats->rx_unicast) +
- le64_to_cpu(stats->rx_multicast) +
- le64_to_cpu(stats->rx_broadcast);
- netstats->rx_bytes = le64_to_cpu(stats->rx_bytes);
- netstats->rx_dropped = le64_to_cpu(stats->rx_discards);
- netstats->rx_over_errors = le64_to_cpu(stats->rx_overflow_drop);
- netstats->rx_length_errors = le64_to_cpu(stats->rx_invalid_frame_length);
-
- netstats->tx_packets = le64_to_cpu(stats->tx_unicast) +
- le64_to_cpu(stats->tx_multicast) +
- le64_to_cpu(stats->tx_broadcast);
- netstats->tx_bytes = le64_to_cpu(stats->tx_bytes);
- netstats->tx_errors = le64_to_cpu(stats->tx_errors);
- netstats->tx_dropped = le64_to_cpu(stats->tx_discards);
-
- vport->port_stats.vport_stats = *stats;
+ netstats->rx_packets = le64_to_cpu(stats_msg.rx_unicast) +
+ le64_to_cpu(stats_msg.rx_multicast) +
+ le64_to_cpu(stats_msg.rx_broadcast);
+ netstats->tx_packets = le64_to_cpu(stats_msg.tx_unicast) +
+ le64_to_cpu(stats_msg.tx_multicast) +
+ le64_to_cpu(stats_msg.tx_broadcast);
+ netstats->rx_bytes = le64_to_cpu(stats_msg.rx_bytes);
+ netstats->tx_bytes = le64_to_cpu(stats_msg.tx_bytes);
+ netstats->rx_errors = le64_to_cpu(stats_msg.rx_errors);
+ netstats->tx_errors = le64_to_cpu(stats_msg.tx_errors);
+ netstats->rx_dropped = le64_to_cpu(stats_msg.rx_discards);
+ netstats->tx_dropped = le64_to_cpu(stats_msg.tx_discards);
+
+ vport->port_stats.vport_stats = stats_msg;
spin_unlock_bh(&np->stats_lock);
-rel_lock:
- mutex_unlock(&vport->vc_buf_lock);
-
- return err;
+ return 0;
}
/**
@@ -2469,70 +2320,70 @@ rel_lock:
*/
int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get)
{
- struct idpf_adapter *adapter = vport->adapter;
- struct virtchnl2_rss_lut *recv_rl;
+ struct virtchnl2_rss_lut *recv_rl __free(kfree) = NULL;
+ struct virtchnl2_rss_lut *rl __free(kfree) = NULL;
+ struct idpf_vc_xn_params xn_params = {};
struct idpf_rss_data *rss_data;
- struct virtchnl2_rss_lut *rl;
int buf_size, lut_buf_size;
- int i, err;
+ ssize_t reply_sz;
+ int i;
- rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
+ rss_data =
+ &vport->adapter->vport_config[vport->idx]->user_config.rss_data;
buf_size = struct_size(rl, lut, rss_data->rss_lut_size);
rl = kzalloc(buf_size, GFP_KERNEL);
if (!rl)
return -ENOMEM;
rl->vport_id = cpu_to_le32(vport->vport_id);
- mutex_lock(&vport->vc_buf_lock);
- if (!get) {
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ xn_params.send_buf.iov_base = rl;
+ xn_params.send_buf.iov_len = buf_size;
+
+ if (get) {
+ recv_rl = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
+ if (!recv_rl)
+ return -ENOMEM;
+ xn_params.vc_op = VIRTCHNL2_OP_GET_RSS_LUT;
+ xn_params.recv_buf.iov_base = recv_rl;
+ xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
+ } else {
rl->lut_entries = cpu_to_le16(rss_data->rss_lut_size);
for (i = 0; i < rss_data->rss_lut_size; i++)
rl->lut[i] = cpu_to_le32(rss_data->rss_lut[i]);
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_SET_RSS_LUT,
- buf_size, (u8 *)rl);
- if (err)
- goto free_mem;
-
- err = idpf_wait_for_event(adapter, vport, IDPF_VC_SET_RSS_LUT,
- IDPF_VC_SET_RSS_LUT_ERR);
-
- goto free_mem;
+ xn_params.vc_op = VIRTCHNL2_OP_SET_RSS_LUT;
}
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
+ if (!get)
+ return 0;
+ if (reply_sz < sizeof(struct virtchnl2_rss_lut))
+ return -EIO;
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_GET_RSS_LUT,
- buf_size, (u8 *)rl);
- if (err)
- goto free_mem;
+ lut_buf_size = le16_to_cpu(recv_rl->lut_entries) * sizeof(u32);
+ if (reply_sz < lut_buf_size)
+ return -EIO;
- err = idpf_wait_for_event(adapter, vport, IDPF_VC_GET_RSS_LUT,
- IDPF_VC_GET_RSS_LUT_ERR);
- if (err)
- goto free_mem;
-
- recv_rl = (struct virtchnl2_rss_lut *)vport->vc_msg;
+ /* size didn't change, we can reuse existing lut buf */
if (rss_data->rss_lut_size == le16_to_cpu(recv_rl->lut_entries))
goto do_memcpy;
rss_data->rss_lut_size = le16_to_cpu(recv_rl->lut_entries);
kfree(rss_data->rss_lut);
- lut_buf_size = rss_data->rss_lut_size * sizeof(u32);
rss_data->rss_lut = kzalloc(lut_buf_size, GFP_KERNEL);
if (!rss_data->rss_lut) {
rss_data->rss_lut_size = 0;
- err = -ENOMEM;
- goto free_mem;
+ return -ENOMEM;
}
do_memcpy:
- memcpy(rss_data->rss_lut, vport->vc_msg, rss_data->rss_lut_size);
-free_mem:
- mutex_unlock(&vport->vc_buf_lock);
- kfree(rl);
+ memcpy(rss_data->rss_lut, recv_rl->lut, rss_data->rss_lut_size);
- return err;
+ return 0;
}
/**
@@ -2544,68 +2395,70 @@ free_mem:
*/
int idpf_send_get_set_rss_key_msg(struct idpf_vport *vport, bool get)
{
- struct idpf_adapter *adapter = vport->adapter;
- struct virtchnl2_rss_key *recv_rk;
+ struct virtchnl2_rss_key *recv_rk __free(kfree) = NULL;
+ struct virtchnl2_rss_key *rk __free(kfree) = NULL;
+ struct idpf_vc_xn_params xn_params = {};
struct idpf_rss_data *rss_data;
- struct virtchnl2_rss_key *rk;
- int i, buf_size, err;
+ ssize_t reply_sz;
+ int i, buf_size;
+ u16 key_size;
- rss_data = &adapter->vport_config[vport->idx]->user_config.rss_data;
+ rss_data =
+ &vport->adapter->vport_config[vport->idx]->user_config.rss_data;
buf_size = struct_size(rk, key_flex, rss_data->rss_key_size);
rk = kzalloc(buf_size, GFP_KERNEL);
if (!rk)
return -ENOMEM;
rk->vport_id = cpu_to_le32(vport->vport_id);
- mutex_lock(&vport->vc_buf_lock);
-
+ xn_params.send_buf.iov_base = rk;
+ xn_params.send_buf.iov_len = buf_size;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
if (get) {
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_GET_RSS_KEY,
- buf_size, (u8 *)rk);
- if (err)
- goto error;
-
- err = idpf_wait_for_event(adapter, vport, IDPF_VC_GET_RSS_KEY,
- IDPF_VC_GET_RSS_KEY_ERR);
- if (err)
- goto error;
-
- recv_rk = (struct virtchnl2_rss_key *)vport->vc_msg;
- if (rss_data->rss_key_size !=
- le16_to_cpu(recv_rk->key_len)) {
- rss_data->rss_key_size =
- min_t(u16, NETDEV_RSS_KEY_LEN,
- le16_to_cpu(recv_rk->key_len));
- kfree(rss_data->rss_key);
- rss_data->rss_key = kzalloc(rss_data->rss_key_size,
- GFP_KERNEL);
- if (!rss_data->rss_key) {
- rss_data->rss_key_size = 0;
- err = -ENOMEM;
- goto error;
- }
- }
- memcpy(rss_data->rss_key, recv_rk->key_flex,
- rss_data->rss_key_size);
+ recv_rk = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
+ if (!recv_rk)
+ return -ENOMEM;
+
+ xn_params.vc_op = VIRTCHNL2_OP_GET_RSS_KEY;
+ xn_params.recv_buf.iov_base = recv_rk;
+ xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
} else {
rk->key_len = cpu_to_le16(rss_data->rss_key_size);
for (i = 0; i < rss_data->rss_key_size; i++)
rk->key_flex[i] = rss_data->rss_key[i];
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_SET_RSS_KEY,
- buf_size, (u8 *)rk);
- if (err)
- goto error;
+ xn_params.vc_op = VIRTCHNL2_OP_SET_RSS_KEY;
+ }
+
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
+ if (!get)
+ return 0;
+ if (reply_sz < sizeof(struct virtchnl2_rss_key))
+ return -EIO;
+
+ key_size = min_t(u16, NETDEV_RSS_KEY_LEN,
+ le16_to_cpu(recv_rk->key_len));
+ if (reply_sz < key_size)
+ return -EIO;
- err = idpf_wait_for_event(adapter, vport, IDPF_VC_SET_RSS_KEY,
- IDPF_VC_SET_RSS_KEY_ERR);
+ /* key len didn't change, reuse existing buf */
+ if (rss_data->rss_key_size == key_size)
+ goto do_memcpy;
+
+ rss_data->rss_key_size = key_size;
+ kfree(rss_data->rss_key);
+ rss_data->rss_key = kzalloc(key_size, GFP_KERNEL);
+ if (!rss_data->rss_key) {
+ rss_data->rss_key_size = 0;
+ return -ENOMEM;
}
-error:
- mutex_unlock(&vport->vc_buf_lock);
- kfree(rk);
+do_memcpy:
+ memcpy(rss_data->rss_key, recv_rk->key_flex, rss_data->rss_key_size);
- return err;
+ return 0;
}
/**
@@ -2657,13 +2510,15 @@ static void idpf_fill_ptype_lookup(struct idpf_rx_ptype_decoded *ptype,
*/
int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
{
+ struct virtchnl2_get_ptype_info *get_ptype_info __free(kfree) = NULL;
+ struct virtchnl2_get_ptype_info *ptype_info __free(kfree) = NULL;
struct idpf_rx_ptype_decoded *ptype_lkup = vport->rx_ptype_lkup;
- struct virtchnl2_get_ptype_info get_ptype_info;
int max_ptype, ptypes_recvd = 0, ptype_offset;
struct idpf_adapter *adapter = vport->adapter;
- struct virtchnl2_get_ptype_info *ptype_info;
+ struct idpf_vc_xn_params xn_params = {};
u16 next_ptype_id = 0;
- int err = 0, i, j, k;
+ ssize_t reply_sz;
+ int i, j, k;
if (idpf_is_queue_model_split(vport->rxq_model))
max_ptype = IDPF_RX_MAX_PTYPE;
@@ -2672,43 +2527,44 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
memset(vport->rx_ptype_lkup, 0, sizeof(vport->rx_ptype_lkup));
+ get_ptype_info = kzalloc(sizeof(*get_ptype_info), GFP_KERNEL);
+ if (!get_ptype_info)
+ return -ENOMEM;
+
ptype_info = kzalloc(IDPF_CTLQ_MAX_BUF_LEN, GFP_KERNEL);
if (!ptype_info)
return -ENOMEM;
- mutex_lock(&adapter->vc_buf_lock);
+ xn_params.vc_op = VIRTCHNL2_OP_GET_PTYPE_INFO;
+ xn_params.send_buf.iov_base = get_ptype_info;
+ xn_params.send_buf.iov_len = sizeof(*get_ptype_info);
+ xn_params.recv_buf.iov_base = ptype_info;
+ xn_params.recv_buf.iov_len = IDPF_CTLQ_MAX_BUF_LEN;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
while (next_ptype_id < max_ptype) {
- get_ptype_info.start_ptype_id = cpu_to_le16(next_ptype_id);
+ get_ptype_info->start_ptype_id = cpu_to_le16(next_ptype_id);
if ((next_ptype_id + IDPF_RX_MAX_PTYPES_PER_BUF) > max_ptype)
- get_ptype_info.num_ptypes =
+ get_ptype_info->num_ptypes =
cpu_to_le16(max_ptype - next_ptype_id);
else
- get_ptype_info.num_ptypes =
+ get_ptype_info->num_ptypes =
cpu_to_le16(IDPF_RX_MAX_PTYPES_PER_BUF);
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_GET_PTYPE_INFO,
- sizeof(struct virtchnl2_get_ptype_info),
- (u8 *)&get_ptype_info);
- if (err)
- goto vc_buf_unlock;
-
- err = idpf_wait_for_event(adapter, NULL, IDPF_VC_GET_PTYPE_INFO,
- IDPF_VC_GET_PTYPE_INFO_ERR);
- if (err)
- goto vc_buf_unlock;
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
- memcpy(ptype_info, adapter->vc_msg, IDPF_CTLQ_MAX_BUF_LEN);
+ if (reply_sz < IDPF_CTLQ_MAX_BUF_LEN)
+ return -EIO;
ptypes_recvd += le16_to_cpu(ptype_info->num_ptypes);
- if (ptypes_recvd > max_ptype) {
- err = -EINVAL;
- goto vc_buf_unlock;
- }
+ if (ptypes_recvd > max_ptype)
+ return -EINVAL;
- next_ptype_id = le16_to_cpu(get_ptype_info.start_ptype_id) +
- le16_to_cpu(get_ptype_info.num_ptypes);
+ next_ptype_id = le16_to_cpu(get_ptype_info->start_ptype_id) +
+ le16_to_cpu(get_ptype_info->num_ptypes);
ptype_offset = IDPF_RX_PTYPE_HDR_SZ;
@@ -2721,17 +2577,13 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
((u8 *)ptype_info + ptype_offset);
ptype_offset += IDPF_GET_PTYPE_SIZE(ptype);
- if (ptype_offset > IDPF_CTLQ_MAX_BUF_LEN) {
- err = -EINVAL;
- goto vc_buf_unlock;
- }
+ if (ptype_offset > IDPF_CTLQ_MAX_BUF_LEN)
+ return -EINVAL;
/* 0xFFFF indicates end of ptypes */
if (le16_to_cpu(ptype->ptype_id_10) ==
- IDPF_INVALID_PTYPE_ID) {
- err = 0;
- goto vc_buf_unlock;
- }
+ IDPF_INVALID_PTYPE_ID)
+ return 0;
if (idpf_is_queue_model_split(vport->rxq_model))
k = le16_to_cpu(ptype->ptype_id_10);
@@ -2859,11 +2711,7 @@ int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport)
}
}
-vc_buf_unlock:
- mutex_unlock(&adapter->vc_buf_lock);
- kfree(ptype_info);
-
- return err;
+ return 0;
}
/**
@@ -2875,27 +2723,20 @@ vc_buf_unlock:
*/
int idpf_send_ena_dis_loopback_msg(struct idpf_vport *vport)
{
+ struct idpf_vc_xn_params xn_params = {};
struct virtchnl2_loopback loopback;
- int err;
+ ssize_t reply_sz;
loopback.vport_id = cpu_to_le32(vport->vport_id);
loopback.enable = idpf_is_feature_ena(vport, NETIF_F_LOOPBACK);
- mutex_lock(&vport->vc_buf_lock);
-
- err = idpf_send_mb_msg(vport->adapter, VIRTCHNL2_OP_LOOPBACK,
- sizeof(loopback), (u8 *)&loopback);
- if (err)
- goto rel_lock;
+ xn_params.vc_op = VIRTCHNL2_OP_LOOPBACK;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ xn_params.send_buf.iov_base = &loopback;
+ xn_params.send_buf.iov_len = sizeof(loopback);
+ reply_sz = idpf_vc_xn_exec(vport->adapter, &xn_params);
- err = idpf_wait_for_event(vport->adapter, vport,
- IDPF_VC_LOOPBACK_STATE,
- IDPF_VC_LOOPBACK_STATE_ERR);
-
-rel_lock:
- mutex_unlock(&vport->vc_buf_lock);
-
- return err;
+ return reply_sz < 0 ? reply_sz : 0;
}
/**
@@ -2960,7 +2801,7 @@ int idpf_init_dflt_mbx(struct idpf_adapter *adapter)
return -ENOENT;
}
- adapter->state = __IDPF_STARTUP;
+ adapter->state = __IDPF_VER_CHECK;
return 0;
}
@@ -3057,35 +2898,42 @@ int idpf_vc_core_init(struct idpf_adapter *adapter)
u16 num_max_vports;
int err = 0;
+ if (!adapter->vcxn_mngr) {
+ adapter->vcxn_mngr = kzalloc(sizeof(*adapter->vcxn_mngr), GFP_KERNEL);
+ if (!adapter->vcxn_mngr) {
+ err = -ENOMEM;
+ goto init_failed;
+ }
+ }
+ idpf_vc_xn_init(adapter->vcxn_mngr);
+
while (adapter->state != __IDPF_INIT_SW) {
switch (adapter->state) {
- case __IDPF_STARTUP:
- if (idpf_send_ver_msg(adapter))
- goto init_failed;
- adapter->state = __IDPF_VER_CHECK;
- goto restart;
case __IDPF_VER_CHECK:
- err = idpf_recv_ver_msg(adapter);
- if (err == -EIO) {
- return err;
- } else if (err == -EAGAIN) {
- adapter->state = __IDPF_STARTUP;
+ err = idpf_send_ver_msg(adapter);
+ switch (err) {
+ case 0:
+ /* success, move state machine forward */
+ adapter->state = __IDPF_GET_CAPS;
+ fallthrough;
+ case -EAGAIN:
goto restart;
- } else if (err) {
+ default:
+ /* Something bad happened, try again but only a
+ * few times.
+ */
goto init_failed;
}
- if (idpf_send_get_caps_msg(adapter))
- goto init_failed;
- adapter->state = __IDPF_GET_CAPS;
- goto restart;
case __IDPF_GET_CAPS:
- if (idpf_recv_get_caps_msg(adapter))
+ err = idpf_send_get_caps_msg(adapter);
+ if (err)
goto init_failed;
adapter->state = __IDPF_INIT_SW;
break;
default:
dev_err(&adapter->pdev->dev, "Device is in bad state: %d\n",
adapter->state);
+ err = -EINVAL;
goto init_failed;
}
break;
@@ -3144,7 +2992,9 @@ restart:
queue_delayed_work(adapter->init_wq, &adapter->init_task,
msecs_to_jiffies(5 * (adapter->pdev->devfn & 0x07)));
- goto no_err;
+ set_bit(IDPF_VC_CORE_INIT, adapter->flags);
+
+ return 0;
err_intr_req:
cancel_delayed_work_sync(&adapter->serv_task);
@@ -3153,7 +3003,6 @@ err_intr_req:
err_netdev_alloc:
kfree(adapter->vports);
adapter->vports = NULL;
-no_err:
return err;
init_failed:
@@ -3170,7 +3019,9 @@ init_failed:
* register writes might not have taken effect. Retry to initialize
* the mailbox again
*/
- adapter->state = __IDPF_STARTUP;
+ adapter->state = __IDPF_VER_CHECK;
+ if (adapter->vcxn_mngr)
+ idpf_vc_xn_shutdown(adapter->vcxn_mngr);
idpf_deinit_dflt_mbx(adapter);
set_bit(IDPF_HR_DRV_LOAD, adapter->flags);
queue_delayed_work(adapter->vc_event_wq, &adapter->vc_event_task,
@@ -3186,29 +3037,22 @@ init_failed:
*/
void idpf_vc_core_deinit(struct idpf_adapter *adapter)
{
- int i;
+ if (!test_bit(IDPF_VC_CORE_INIT, adapter->flags))
+ return;
+ idpf_vc_xn_shutdown(adapter->vcxn_mngr);
idpf_deinit_task(adapter);
idpf_intr_rel(adapter);
- /* Set all bits as we dont know on which vc_state the vhnl_wq is
- * waiting on and wakeup the virtchnl workqueue even if it is waiting
- * for the response as we are going down
- */
- for (i = 0; i < IDPF_VC_NBITS; i++)
- set_bit(i, adapter->vc_state);
- wake_up(&adapter->vchnl_wq);
cancel_delayed_work_sync(&adapter->serv_task);
cancel_delayed_work_sync(&adapter->mbx_task);
idpf_vport_params_buf_rel(adapter);
- /* Clear all the bits */
- for (i = 0; i < IDPF_VC_NBITS; i++)
- clear_bit(i, adapter->vc_state);
-
kfree(adapter->vports);
adapter->vports = NULL;
+
+ clear_bit(IDPF_VC_CORE_INIT, adapter->flags);
}
/**
@@ -3624,6 +3468,75 @@ u32 idpf_get_vport_id(struct idpf_vport *vport)
}
/**
+ * idpf_mac_filter_async_handler - Async callback for mac filters
+ * @adapter: private data struct
+ * @xn: transaction for message
+ * @ctlq_msg: received message
+ *
+ * In some scenarios driver can't sleep and wait for a reply (e.g.: stack is
+ * holding rtnl_lock) when adding a new mac filter. It puts us in a difficult
+ * situation to deal with errors returned on the reply. The best we can
+ * ultimately do is remove it from our list of mac filters and report the
+ * error.
+ */
+static int idpf_mac_filter_async_handler(struct idpf_adapter *adapter,
+ struct idpf_vc_xn *xn,
+ const struct idpf_ctlq_msg *ctlq_msg)
+{
+ struct virtchnl2_mac_addr_list *ma_list;
+ struct idpf_vport_config *vport_config;
+ struct virtchnl2_mac_addr *mac_addr;
+ struct idpf_mac_filter *f, *tmp;
+ struct list_head *ma_list_head;
+ struct idpf_vport *vport;
+ u16 num_entries;
+ int i;
+
+ /* if success we're done, we're only here if something bad happened */
+ if (!ctlq_msg->cookie.mbx.chnl_retval)
+ return 0;
+
+ /* make sure at least struct is there */
+ if (xn->reply_sz < sizeof(*ma_list))
+ goto invalid_payload;
+
+ ma_list = ctlq_msg->ctx.indirect.payload->va;
+ mac_addr = ma_list->mac_addr_list;
+ num_entries = le16_to_cpu(ma_list->num_mac_addr);
+ /* we should have received a buffer at least this big */
+ if (xn->reply_sz < struct_size(ma_list, mac_addr_list, num_entries))
+ goto invalid_payload;
+
+ vport = idpf_vid_to_vport(adapter, le32_to_cpu(ma_list->vport_id));
+ if (!vport)
+ goto invalid_payload;
+
+ vport_config = adapter->vport_config[le32_to_cpu(ma_list->vport_id)];
+ ma_list_head = &vport_config->user_config.mac_filter_list;
+
+ /* We can't do much to reconcile bad filters at this point, however we
+ * should at least remove them from our list one way or the other so we
+ * have some idea what good filters we have.
+ */
+ spin_lock_bh(&vport_config->mac_filter_list_lock);
+ list_for_each_entry_safe(f, tmp, ma_list_head, list)
+ for (i = 0; i < num_entries; i++)
+ if (ether_addr_equal(mac_addr[i].addr, f->macaddr))
+ list_del(&f->list);
+ spin_unlock_bh(&vport_config->mac_filter_list_lock);
+ dev_err_ratelimited(&adapter->pdev->dev, "Received error sending MAC filter request (op %d)\n",
+ xn->vc_op);
+
+ return 0;
+
+invalid_payload:
+ dev_err_ratelimited(&adapter->pdev->dev, "Received invalid MAC filter payload (op %d) (len %zd)\n",
+ xn->vc_op, xn->reply_sz);
+
+ return -EINVAL;
+}
+
+/**
* idpf_add_del_mac_filters - Add/del mac filters
* @vport: Virtual port data structure
* @np: Netdev private structure
@@ -3636,17 +3549,21 @@ int idpf_add_del_mac_filters(struct idpf_vport *vport,
struct idpf_netdev_priv *np,
bool add, bool async)
{
- struct virtchnl2_mac_addr_list *ma_list = NULL;
+ struct virtchnl2_mac_addr_list *ma_list __free(kfree) = NULL;
+ struct virtchnl2_mac_addr *mac_addr __free(kfree) = NULL;
struct idpf_adapter *adapter = np->adapter;
+ struct idpf_vc_xn_params xn_params = {};
struct idpf_vport_config *vport_config;
- enum idpf_vport_config_flags mac_flag;
- struct pci_dev *pdev = adapter->pdev;
- enum idpf_vport_vc_state vc, vc_err;
- struct virtchnl2_mac_addr *mac_addr;
- struct idpf_mac_filter *f, *tmp;
u32 num_msgs, total_filters = 0;
- int i = 0, k, err = 0;
- u32 vop;
+ struct idpf_mac_filter *f;
+ ssize_t reply_sz;
+ int i = 0, k;
+
+ xn_params.vc_op = add ? VIRTCHNL2_OP_ADD_MAC_ADDR :
+ VIRTCHNL2_OP_DEL_MAC_ADDR;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ xn_params.async = async;
+ xn_params.async_handler = idpf_mac_filter_async_handler;
vport_config = adapter->vport_config[np->vport_idx];
spin_lock_bh(&vport_config->mac_filter_list_lock);
@@ -3670,13 +3587,13 @@ int idpf_add_del_mac_filters(struct idpf_vport *vport,
mac_addr = kcalloc(total_filters, sizeof(struct virtchnl2_mac_addr),
GFP_ATOMIC);
if (!mac_addr) {
- err = -ENOMEM;
spin_unlock_bh(&vport_config->mac_filter_list_lock);
- goto error;
+
+ return -ENOMEM;
}
- list_for_each_entry_safe(f, tmp, &vport_config->user_config.mac_filter_list,
- list) {
+ list_for_each_entry(f, &vport_config->user_config.mac_filter_list,
+ list) {
if (add && f->add) {
ether_addr_copy(mac_addr[i].addr, f->macaddr);
i++;
@@ -3695,26 +3612,11 @@ int idpf_add_del_mac_filters(struct idpf_vport *vport,
spin_unlock_bh(&vport_config->mac_filter_list_lock);
- if (add) {
- vop = VIRTCHNL2_OP_ADD_MAC_ADDR;
- vc = IDPF_VC_ADD_MAC_ADDR;
- vc_err = IDPF_VC_ADD_MAC_ADDR_ERR;
- mac_flag = IDPF_VPORT_ADD_MAC_REQ;
- } else {
- vop = VIRTCHNL2_OP_DEL_MAC_ADDR;
- vc = IDPF_VC_DEL_MAC_ADDR;
- vc_err = IDPF_VC_DEL_MAC_ADDR_ERR;
- mac_flag = IDPF_VPORT_DEL_MAC_REQ;
- }
-
/* Chunk up the filters into multiple messages to avoid
* sending a control queue message buffer that is too large
*/
num_msgs = DIV_ROUND_UP(total_filters, IDPF_NUM_FILTERS_PER_MSG);
- if (!async)
- mutex_lock(&vport->vc_buf_lock);
-
for (i = 0, k = 0; i < num_msgs; i++) {
u32 entries_size, buf_size, num_entries;
@@ -3726,10 +3628,8 @@ int idpf_add_del_mac_filters(struct idpf_vport *vport,
if (!ma_list || num_entries != IDPF_NUM_FILTERS_PER_MSG) {
kfree(ma_list);
ma_list = kzalloc(buf_size, GFP_ATOMIC);
- if (!ma_list) {
- err = -ENOMEM;
- goto list_prep_error;
- }
+ if (!ma_list)
+ return -ENOMEM;
} else {
memset(ma_list, 0, buf_size);
}
@@ -3738,34 +3638,17 @@ int idpf_add_del_mac_filters(struct idpf_vport *vport,
ma_list->num_mac_addr = cpu_to_le16(num_entries);
memcpy(ma_list->mac_addr_list, &mac_addr[k], entries_size);
- if (async)
- set_bit(mac_flag, vport_config->flags);
-
- err = idpf_send_mb_msg(adapter, vop, buf_size, (u8 *)ma_list);
- if (err)
- goto mbx_error;
-
- if (!async) {
- err = idpf_wait_for_event(adapter, vport, vc, vc_err);
- if (err)
- goto mbx_error;
- }
+ xn_params.send_buf.iov_base = ma_list;
+ xn_params.send_buf.iov_len = buf_size;
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
+ if (reply_sz < 0)
+ return reply_sz;
k += num_entries;
total_filters -= num_entries;
}
-mbx_error:
- if (!async)
- mutex_unlock(&vport->vc_buf_lock);
- kfree(ma_list);
-list_prep_error:
- kfree(mac_addr);
-error:
- if (err)
- dev_err(&pdev->dev, "Failed to add or del mac filters %d", err);
-
- return err;
+ return 0;
}
/**
@@ -3782,9 +3665,10 @@ int idpf_set_promiscuous(struct idpf_adapter *adapter,
struct idpf_vport_user_config_data *config_data,
u32 vport_id)
{
+ struct idpf_vc_xn_params xn_params = {};
struct virtchnl2_promisc_info vpi;
+ ssize_t reply_sz;
u16 flags = 0;
- int err;
if (test_bit(__IDPF_PROMISC_UC, config_data->user_flags))
flags |= VIRTCHNL2_UNICAST_PROMISC;
@@ -3794,9 +3678,13 @@ int idpf_set_promiscuous(struct idpf_adapter *adapter,
vpi.vport_id = cpu_to_le32(vport_id);
vpi.flags = cpu_to_le16(flags);
- err = idpf_send_mb_msg(adapter, VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE,
- sizeof(struct virtchnl2_promisc_info),
- (u8 *)&vpi);
+ xn_params.vc_op = VIRTCHNL2_OP_CONFIG_PROMISCUOUS_MODE;
+ xn_params.timeout_ms = IDPF_VC_XN_DEFAULT_TIMEOUT_MSEC;
+ xn_params.send_buf.iov_base = &vpi;
+ xn_params.send_buf.iov_len = sizeof(vpi);
+ /* setting promiscuous is only ever done asynchronously */
+ xn_params.async = true;
+ reply_sz = idpf_vc_xn_exec(adapter, &xn_params);
- return err;
+ return reply_sz < 0 ? reply_sz : 0;
}
diff --git a/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h
new file mode 100644
index 0000000000..83da5d8da5
--- /dev/null
+++ b/drivers/net/ethernet/intel/idpf/idpf_virtchnl.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* Copyright (C) 2024 Intel Corporation */
+
+#ifndef _IDPF_VIRTCHNL_H_
+#define _IDPF_VIRTCHNL_H_
+
+struct idpf_adapter;
+struct idpf_netdev_priv;
+struct idpf_vec_regs;
+struct idpf_vport;
+struct idpf_vport_max_q;
+struct idpf_vport_user_config_data;
+
+int idpf_init_dflt_mbx(struct idpf_adapter *adapter);
+void idpf_deinit_dflt_mbx(struct idpf_adapter *adapter);
+int idpf_vc_core_init(struct idpf_adapter *adapter);
+void idpf_vc_core_deinit(struct idpf_adapter *adapter);
+
+int idpf_get_reg_intr_vecs(struct idpf_vport *vport,
+ struct idpf_vec_regs *reg_vals);
+int idpf_queue_reg_init(struct idpf_vport *vport);
+int idpf_vport_queue_ids_init(struct idpf_vport *vport);
+
+int idpf_recv_mb_msg(struct idpf_adapter *adapter);
+int idpf_send_mb_msg(struct idpf_adapter *adapter, u32 op,
+ u16 msg_size, u8 *msg, u16 cookie);
+
+void idpf_vport_init(struct idpf_vport *vport, struct idpf_vport_max_q *max_q);
+u32 idpf_get_vport_id(struct idpf_vport *vport);
+int idpf_send_create_vport_msg(struct idpf_adapter *adapter,
+ struct idpf_vport_max_q *max_q);
+int idpf_send_destroy_vport_msg(struct idpf_vport *vport);
+int idpf_send_enable_vport_msg(struct idpf_vport *vport);
+int idpf_send_disable_vport_msg(struct idpf_vport *vport);
+
+int idpf_vport_adjust_qs(struct idpf_vport *vport);
+int idpf_vport_alloc_max_qs(struct idpf_adapter *adapter,
+ struct idpf_vport_max_q *max_q);
+void idpf_vport_dealloc_max_qs(struct idpf_adapter *adapter,
+ struct idpf_vport_max_q *max_q);
+int idpf_send_add_queues_msg(const struct idpf_vport *vport, u16 num_tx_q,
+ u16 num_complq, u16 num_rx_q, u16 num_rx_bufq);
+int idpf_send_delete_queues_msg(struct idpf_vport *vport);
+int idpf_send_enable_queues_msg(struct idpf_vport *vport);
+int idpf_send_disable_queues_msg(struct idpf_vport *vport);
+int idpf_send_config_queues_msg(struct idpf_vport *vport);
+
+int idpf_vport_alloc_vec_indexes(struct idpf_vport *vport);
+int idpf_get_vec_ids(struct idpf_adapter *adapter,
+ u16 *vecids, int num_vecids,
+ struct virtchnl2_vector_chunks *chunks);
+int idpf_send_alloc_vectors_msg(struct idpf_adapter *adapter, u16 num_vectors);
+int idpf_send_dealloc_vectors_msg(struct idpf_adapter *adapter);
+int idpf_send_map_unmap_queue_vector_msg(struct idpf_vport *vport, bool map);
+
+int idpf_add_del_mac_filters(struct idpf_vport *vport,
+ struct idpf_netdev_priv *np,
+ bool add, bool async);
+int idpf_set_promiscuous(struct idpf_adapter *adapter,
+ struct idpf_vport_user_config_data *config_data,
+ u32 vport_id);
+int idpf_check_supported_desc_ids(struct idpf_vport *vport);
+int idpf_send_get_rx_ptype_msg(struct idpf_vport *vport);
+int idpf_send_ena_dis_loopback_msg(struct idpf_vport *vport);
+int idpf_send_get_stats_msg(struct idpf_vport *vport);
+int idpf_send_set_sriov_vfs_msg(struct idpf_adapter *adapter, u16 num_vfs);
+int idpf_send_get_set_rss_key_msg(struct idpf_vport *vport, bool get);
+int idpf_send_get_set_rss_lut_msg(struct idpf_vport *vport, bool get);
+
+#endif /* _IDPF_VIRTCHNL_H_ */