diff options
Diffstat (limited to 'drivers/net/ethernet/intel/i40e')
-rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e.h | 29 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e_adminq.h | 4 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e_client.c | 28 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e_common.c | 253 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e_ddp.c | 3 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e_debugfs.c | 36 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e_ethtool.c | 29 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e_main.c | 484 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e_nvm.c | 1010 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e_prototype.h | 7 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e_ptp.c | 6 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e_trace.h | 10 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e_txrx.c | 92 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e_txrx.h | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e_type.h | 88 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c | 14 | ||||
-rw-r--r-- | drivers/net/ethernet/intel/i40e/i40e_xsk.c | 5 |
17 files changed, 878 insertions, 1222 deletions
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 2fbabcdb5b..bca2084cc5 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -788,7 +788,6 @@ struct i40e_veb { u16 stats_idx; /* index of VEB parent */ u8 enabled_tc; u16 bridge_mode; /* Bridge Mode (VEB/VEPA) */ - u16 flags; u16 bw_limit; u8 bw_max_quanta; bool is_abs_credits; @@ -1213,7 +1212,7 @@ void i40e_vsi_stop_rings(struct i40e_vsi *vsi); void i40e_vsi_stop_rings_no_wait(struct i40e_vsi *vsi); int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi); int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count); -struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, u16 uplink_seid, +struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 uplink_seid, u16 downlink_seid, u8 enabled_tc); void i40e_veb_release(struct i40e_veb *veb); @@ -1237,8 +1236,8 @@ static inline void i40e_dbg_exit(void) {} int i40e_lan_add_device(struct i40e_pf *pf); int i40e_lan_del_device(struct i40e_pf *pf); void i40e_client_subtask(struct i40e_pf *pf); -void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi); -void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset); +void i40e_notify_client_of_l2_param_changes(struct i40e_pf *pf); +void i40e_notify_client_of_netdev_close(struct i40e_pf *pf, bool reset); void i40e_notify_client_of_vf_enable(struct i40e_pf *pf, u32 num_vfs); void i40e_notify_client_of_vf_reset(struct i40e_pf *pf, u32 vf_id); void i40e_client_update_msix_info(struct i40e_pf *pf); @@ -1374,6 +1373,17 @@ i40e_pf_get_vsi_by_seid(struct i40e_pf *pf, u16 seid) } /** + * i40e_pf_get_main_vsi - get pointer to main VSI + * @pf: pointer to a PF + * + * Return: pointer to main VSI or NULL if it does not exist + **/ +static inline struct i40e_vsi *i40e_pf_get_main_vsi(struct i40e_pf *pf) +{ + return (pf->lan_vsi != I40E_NO_VSI) ? pf->vsi[pf->lan_vsi] : NULL; +} + +/** * i40e_pf_get_veb_by_seid - find VEB by SEID * @pf: pointer to a PF * @seid: SEID of the VSI @@ -1391,4 +1401,15 @@ i40e_pf_get_veb_by_seid(struct i40e_pf *pf, u16 seid) return NULL; } +/** + * i40e_pf_get_main_veb - get pointer to main VEB + * @pf: pointer to a PF + * + * Return: pointer to main VEB or NULL if it does not exist + **/ +static inline struct i40e_veb *i40e_pf_get_main_veb(struct i40e_pf *pf) +{ + return (pf->lan_veb != I40E_NO_VEB) ? pf->veb[pf->lan_veb] : NULL; +} + #endif /* _I40E_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.h b/drivers/net/ethernet/intel/i40e/i40e_adminq.h index ee86d2c530..55b5bb884d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.h @@ -109,10 +109,6 @@ static inline int i40e_aq_rc_to_posix(int aq_ret, int aq_rc) -EFBIG, /* I40E_AQ_RC_EFBIG */ }; - /* aq_rc is invalid if AQ timed out */ - if (aq_ret == -EIO) - return -EAGAIN; - if (!((u32)aq_rc < (sizeof(aq_to_posix) / sizeof((aq_to_posix)[0])))) return -ERANGE; diff --git a/drivers/net/ethernet/intel/i40e/i40e_client.c b/drivers/net/ethernet/intel/i40e/i40e_client.c index b32071ee84..59263551c3 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_client.c +++ b/drivers/net/ethernet/intel/i40e/i40e_client.c @@ -101,25 +101,26 @@ i40e_notify_client_of_vf_msg(struct i40e_vsi *vsi, u32 vf_id, u8 *msg, u16 len) /** * i40e_notify_client_of_l2_param_changes - call the client notify callback - * @vsi: the VSI with l2 param changes + * @pf: PF device pointer * - * If there is a client to this VSI, call the client + * If there is a client, call its callback **/ -void i40e_notify_client_of_l2_param_changes(struct i40e_vsi *vsi) +void i40e_notify_client_of_l2_param_changes(struct i40e_pf *pf) { - struct i40e_pf *pf = vsi->back; + struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf); struct i40e_client_instance *cdev = pf->cinst; struct i40e_params params; if (!cdev || !cdev->client) return; if (!cdev->client->ops || !cdev->client->ops->l2_param_change) { - dev_dbg(&vsi->back->pdev->dev, + dev_dbg(&pf->pdev->dev, "Cannot locate client instance l2_param_change routine\n"); return; } if (!test_bit(__I40E_CLIENT_INSTANCE_OPENED, &cdev->state)) { - dev_dbg(&vsi->back->pdev->dev, "Client is not open, abort l2 param change\n"); + dev_dbg(&pf->pdev->dev, + "Client is not open, abort l2 param change\n"); return; } memset(¶ms, 0, sizeof(params)); @@ -157,20 +158,19 @@ static void i40e_client_release_qvlist(struct i40e_info *ldev) /** * i40e_notify_client_of_netdev_close - call the client close callback - * @vsi: the VSI with netdev closed + * @pf: PF device pointer * @reset: true when close called due to a reset pending * * If there is a client to this netdev, call the client with close **/ -void i40e_notify_client_of_netdev_close(struct i40e_vsi *vsi, bool reset) +void i40e_notify_client_of_netdev_close(struct i40e_pf *pf, bool reset) { - struct i40e_pf *pf = vsi->back; struct i40e_client_instance *cdev = pf->cinst; if (!cdev || !cdev->client) return; if (!cdev->client->ops || !cdev->client->ops->close) { - dev_dbg(&vsi->back->pdev->dev, + dev_dbg(&pf->pdev->dev, "Cannot locate client instance close routine\n"); return; } @@ -333,9 +333,9 @@ static int i40e_register_auxiliary_dev(struct i40e_info *ldev, const char *name) **/ static void i40e_client_add_instance(struct i40e_pf *pf) { + struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf); struct i40e_client_instance *cdev = NULL; struct netdev_hw_addr *mac = NULL; - struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); if (!cdev) @@ -399,9 +399,9 @@ void i40e_client_del_instance(struct i40e_pf *pf) **/ void i40e_client_subtask(struct i40e_pf *pf) { - struct i40e_client *client; + struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf); struct i40e_client_instance *cdev; - struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; + struct i40e_client *client; int ret = 0; if (!test_and_clear_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state)) @@ -665,8 +665,8 @@ static int i40e_client_update_vsi_ctxt(struct i40e_info *ldev, bool is_vf, u32 vf_id, u32 flag, u32 valid_flag) { + struct i40e_vsi *vsi = i40e_pf_get_main_vsi(ldev->pf); struct i40e_pf *pf = ldev->pf; - struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; struct i40e_vsi_context ctxt; bool update = true; int err; diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index de6ca62957..e8031f1a9b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -381,259 +381,6 @@ int i40e_aq_set_rss_key(struct i40e_hw *hw, return i40e_aq_get_set_rss_key(hw, vsi_id, key, true); } -/* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the - * hardware to a bit-field that can be used by SW to more easily determine the - * packet type. - * - * Macros are used to shorten the table lines and make this table human - * readable. - * - * We store the PTYPE in the top byte of the bit field - this is just so that - * we can check that the table doesn't have a row missing, as the index into - * the table should be the PTYPE. - * - * Typical work flow: - * - * IF NOT i40e_ptype_lookup[ptype].known - * THEN - * Packet is unknown - * ELSE IF i40e_ptype_lookup[ptype].outer_ip == I40E_RX_PTYPE_OUTER_IP - * Use the rest of the fields to look at the tunnels, inner protocols, etc - * ELSE - * Use the enum i40e_rx_l2_ptype to decode the packet type - * ENDIF - */ - -/* macro to make the table lines short, use explicit indexing with [PTYPE] */ -#define I40E_PTT(PTYPE, OUTER_IP, OUTER_IP_VER, OUTER_FRAG, T, TE, TEF, I, PL)\ - [PTYPE] = { \ - 1, \ - I40E_RX_PTYPE_OUTER_##OUTER_IP, \ - I40E_RX_PTYPE_OUTER_##OUTER_IP_VER, \ - I40E_RX_PTYPE_##OUTER_FRAG, \ - I40E_RX_PTYPE_TUNNEL_##T, \ - I40E_RX_PTYPE_TUNNEL_END_##TE, \ - I40E_RX_PTYPE_##TEF, \ - I40E_RX_PTYPE_INNER_PROT_##I, \ - I40E_RX_PTYPE_PAYLOAD_LAYER_##PL } - -#define I40E_PTT_UNUSED_ENTRY(PTYPE) [PTYPE] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 } - -/* shorter macros makes the table fit but are terse */ -#define I40E_RX_PTYPE_NOF I40E_RX_PTYPE_NOT_FRAG -#define I40E_RX_PTYPE_FRG I40E_RX_PTYPE_FRAG -#define I40E_RX_PTYPE_INNER_PROT_TS I40E_RX_PTYPE_INNER_PROT_TIMESYNC - -/* Lookup table mapping in the 8-bit HW PTYPE to the bit field for decoding */ -struct i40e_rx_ptype_decoded i40e_ptype_lookup[BIT(8)] = { - /* L2 Packet types */ - I40E_PTT_UNUSED_ENTRY(0), - I40E_PTT(1, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), - I40E_PTT(2, L2, NONE, NOF, NONE, NONE, NOF, TS, PAY2), - I40E_PTT(3, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), - I40E_PTT_UNUSED_ENTRY(4), - I40E_PTT_UNUSED_ENTRY(5), - I40E_PTT(6, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), - I40E_PTT(7, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), - I40E_PTT_UNUSED_ENTRY(8), - I40E_PTT_UNUSED_ENTRY(9), - I40E_PTT(10, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY2), - I40E_PTT(11, L2, NONE, NOF, NONE, NONE, NOF, NONE, NONE), - I40E_PTT(12, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(13, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(14, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(15, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(16, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(17, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(18, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(19, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(20, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(21, L2, NONE, NOF, NONE, NONE, NOF, NONE, PAY3), - - /* Non Tunneled IPv4 */ - I40E_PTT(22, IP, IPV4, FRG, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(23, IP, IPV4, NOF, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(24, IP, IPV4, NOF, NONE, NONE, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(25), - I40E_PTT(26, IP, IPV4, NOF, NONE, NONE, NOF, TCP, PAY4), - I40E_PTT(27, IP, IPV4, NOF, NONE, NONE, NOF, SCTP, PAY4), - I40E_PTT(28, IP, IPV4, NOF, NONE, NONE, NOF, ICMP, PAY4), - - /* IPv4 --> IPv4 */ - I40E_PTT(29, IP, IPV4, NOF, IP_IP, IPV4, FRG, NONE, PAY3), - I40E_PTT(30, IP, IPV4, NOF, IP_IP, IPV4, NOF, NONE, PAY3), - I40E_PTT(31, IP, IPV4, NOF, IP_IP, IPV4, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(32), - I40E_PTT(33, IP, IPV4, NOF, IP_IP, IPV4, NOF, TCP, PAY4), - I40E_PTT(34, IP, IPV4, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), - I40E_PTT(35, IP, IPV4, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), - - /* IPv4 --> IPv6 */ - I40E_PTT(36, IP, IPV4, NOF, IP_IP, IPV6, FRG, NONE, PAY3), - I40E_PTT(37, IP, IPV4, NOF, IP_IP, IPV6, NOF, NONE, PAY3), - I40E_PTT(38, IP, IPV4, NOF, IP_IP, IPV6, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(39), - I40E_PTT(40, IP, IPV4, NOF, IP_IP, IPV6, NOF, TCP, PAY4), - I40E_PTT(41, IP, IPV4, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), - I40E_PTT(42, IP, IPV4, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), - - /* IPv4 --> GRE/NAT */ - I40E_PTT(43, IP, IPV4, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), - - /* IPv4 --> GRE/NAT --> IPv4 */ - I40E_PTT(44, IP, IPV4, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), - I40E_PTT(45, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), - I40E_PTT(46, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(47), - I40E_PTT(48, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), - I40E_PTT(49, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), - I40E_PTT(50, IP, IPV4, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), - - /* IPv4 --> GRE/NAT --> IPv6 */ - I40E_PTT(51, IP, IPV4, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), - I40E_PTT(52, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), - I40E_PTT(53, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(54), - I40E_PTT(55, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), - I40E_PTT(56, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), - I40E_PTT(57, IP, IPV4, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), - - /* IPv4 --> GRE/NAT --> MAC */ - I40E_PTT(58, IP, IPV4, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), - - /* IPv4 --> GRE/NAT --> MAC --> IPv4 */ - I40E_PTT(59, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), - I40E_PTT(60, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), - I40E_PTT(61, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(62), - I40E_PTT(63, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), - I40E_PTT(64, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), - I40E_PTT(65, IP, IPV4, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), - - /* IPv4 --> GRE/NAT -> MAC --> IPv6 */ - I40E_PTT(66, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), - I40E_PTT(67, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), - I40E_PTT(68, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(69), - I40E_PTT(70, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), - I40E_PTT(71, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), - I40E_PTT(72, IP, IPV4, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), - - /* IPv4 --> GRE/NAT --> MAC/VLAN */ - I40E_PTT(73, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), - - /* IPv4 ---> GRE/NAT -> MAC/VLAN --> IPv4 */ - I40E_PTT(74, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), - I40E_PTT(75, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), - I40E_PTT(76, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(77), - I40E_PTT(78, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), - I40E_PTT(79, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), - I40E_PTT(80, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), - - /* IPv4 -> GRE/NAT -> MAC/VLAN --> IPv6 */ - I40E_PTT(81, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), - I40E_PTT(82, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), - I40E_PTT(83, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(84), - I40E_PTT(85, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), - I40E_PTT(86, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), - I40E_PTT(87, IP, IPV4, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), - - /* Non Tunneled IPv6 */ - I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3), - I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(91), - I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4), - I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4), - I40E_PTT(94, IP, IPV6, NOF, NONE, NONE, NOF, ICMP, PAY4), - - /* IPv6 --> IPv4 */ - I40E_PTT(95, IP, IPV6, NOF, IP_IP, IPV4, FRG, NONE, PAY3), - I40E_PTT(96, IP, IPV6, NOF, IP_IP, IPV4, NOF, NONE, PAY3), - I40E_PTT(97, IP, IPV6, NOF, IP_IP, IPV4, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(98), - I40E_PTT(99, IP, IPV6, NOF, IP_IP, IPV4, NOF, TCP, PAY4), - I40E_PTT(100, IP, IPV6, NOF, IP_IP, IPV4, NOF, SCTP, PAY4), - I40E_PTT(101, IP, IPV6, NOF, IP_IP, IPV4, NOF, ICMP, PAY4), - - /* IPv6 --> IPv6 */ - I40E_PTT(102, IP, IPV6, NOF, IP_IP, IPV6, FRG, NONE, PAY3), - I40E_PTT(103, IP, IPV6, NOF, IP_IP, IPV6, NOF, NONE, PAY3), - I40E_PTT(104, IP, IPV6, NOF, IP_IP, IPV6, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(105), - I40E_PTT(106, IP, IPV6, NOF, IP_IP, IPV6, NOF, TCP, PAY4), - I40E_PTT(107, IP, IPV6, NOF, IP_IP, IPV6, NOF, SCTP, PAY4), - I40E_PTT(108, IP, IPV6, NOF, IP_IP, IPV6, NOF, ICMP, PAY4), - - /* IPv6 --> GRE/NAT */ - I40E_PTT(109, IP, IPV6, NOF, IP_GRENAT, NONE, NOF, NONE, PAY3), - - /* IPv6 --> GRE/NAT -> IPv4 */ - I40E_PTT(110, IP, IPV6, NOF, IP_GRENAT, IPV4, FRG, NONE, PAY3), - I40E_PTT(111, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, NONE, PAY3), - I40E_PTT(112, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(113), - I40E_PTT(114, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, TCP, PAY4), - I40E_PTT(115, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, SCTP, PAY4), - I40E_PTT(116, IP, IPV6, NOF, IP_GRENAT, IPV4, NOF, ICMP, PAY4), - - /* IPv6 --> GRE/NAT -> IPv6 */ - I40E_PTT(117, IP, IPV6, NOF, IP_GRENAT, IPV6, FRG, NONE, PAY3), - I40E_PTT(118, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, NONE, PAY3), - I40E_PTT(119, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(120), - I40E_PTT(121, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, TCP, PAY4), - I40E_PTT(122, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, SCTP, PAY4), - I40E_PTT(123, IP, IPV6, NOF, IP_GRENAT, IPV6, NOF, ICMP, PAY4), - - /* IPv6 --> GRE/NAT -> MAC */ - I40E_PTT(124, IP, IPV6, NOF, IP_GRENAT_MAC, NONE, NOF, NONE, PAY3), - - /* IPv6 --> GRE/NAT -> MAC -> IPv4 */ - I40E_PTT(125, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, FRG, NONE, PAY3), - I40E_PTT(126, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, NONE, PAY3), - I40E_PTT(127, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(128), - I40E_PTT(129, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, TCP, PAY4), - I40E_PTT(130, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, SCTP, PAY4), - I40E_PTT(131, IP, IPV6, NOF, IP_GRENAT_MAC, IPV4, NOF, ICMP, PAY4), - - /* IPv6 --> GRE/NAT -> MAC -> IPv6 */ - I40E_PTT(132, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, FRG, NONE, PAY3), - I40E_PTT(133, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, NONE, PAY3), - I40E_PTT(134, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(135), - I40E_PTT(136, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, TCP, PAY4), - I40E_PTT(137, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, SCTP, PAY4), - I40E_PTT(138, IP, IPV6, NOF, IP_GRENAT_MAC, IPV6, NOF, ICMP, PAY4), - - /* IPv6 --> GRE/NAT -> MAC/VLAN */ - I40E_PTT(139, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, NONE, NOF, NONE, PAY3), - - /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv4 */ - I40E_PTT(140, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, FRG, NONE, PAY3), - I40E_PTT(141, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, NONE, PAY3), - I40E_PTT(142, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(143), - I40E_PTT(144, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, TCP, PAY4), - I40E_PTT(145, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, SCTP, PAY4), - I40E_PTT(146, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV4, NOF, ICMP, PAY4), - - /* IPv6 --> GRE/NAT -> MAC/VLAN --> IPv6 */ - I40E_PTT(147, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, FRG, NONE, PAY3), - I40E_PTT(148, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, NONE, PAY3), - I40E_PTT(149, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, UDP, PAY4), - I40E_PTT_UNUSED_ENTRY(150), - I40E_PTT(151, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, TCP, PAY4), - I40E_PTT(152, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, SCTP, PAY4), - I40E_PTT(153, IP, IPV6, NOF, IP_GRENAT_MAC_VLAN, IPV6, NOF, ICMP, PAY4), - - /* unused entries */ - [154 ... 255] = { 0, 0, 0, 0, 0, 0, 0, 0, 0 } -}; - /** * i40e_init_shared_code - Initialize the shared code * @hw: pointer to hardware structure diff --git a/drivers/net/ethernet/intel/i40e/i40e_ddp.c b/drivers/net/ethernet/intel/i40e/i40e_ddp.c index 2f53f0f53b..daa9f2c42f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ddp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ddp.c @@ -407,8 +407,9 @@ static int i40e_ddp_load(struct net_device *netdev, const u8 *data, size_t size, **/ static int i40e_ddp_restore(struct i40e_pf *pf) { + struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf); + struct net_device *netdev = vsi->netdev; struct i40e_ddp_old_profile_list *entry; - struct net_device *netdev = pf->vsi[pf->lan_vsi]->netdev; int status = 0; if (!list_empty(&pf->ddp_old_prof)) { diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index f9ba45f596..abf624d770 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -53,6 +53,7 @@ static ssize_t i40e_dbg_command_read(struct file *filp, char __user *buffer, size_t count, loff_t *ppos) { struct i40e_pf *pf = filp->private_data; + struct i40e_vsi *main_vsi; int bytes_not_copied; int buf_size = 256; char *buf; @@ -68,8 +69,8 @@ static ssize_t i40e_dbg_command_read(struct file *filp, char __user *buffer, if (!buf) return -ENOSPC; - len = snprintf(buf, buf_size, "%s: %s\n", - pf->vsi[pf->lan_vsi]->netdev->name, + main_vsi = i40e_pf_get_main_vsi(pf); + len = snprintf(buf, buf_size, "%s: %s\n", main_vsi->netdev->name, i40e_dbg_command_buf); bytes_not_copied = copy_to_user(buffer, buf, len); @@ -128,7 +129,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) dev_info(&pf->pdev->dev, " state[%d] = %08lx\n", i, vsi->state[i]); - if (vsi == pf->vsi[pf->lan_vsi]) + if (vsi->type == I40E_VSI_MAIN) dev_info(&pf->pdev->dev, " MAC address: %pM Port MAC: %pM\n", pf->hw.mac.addr, pf->hw.mac.port_addr); @@ -786,7 +787,8 @@ static ssize_t i40e_dbg_command_write(struct file *filp, cnt = sscanf(&cmd_buf[7], "%i", &vsi_seid); if (cnt == 0) { /* default to PF VSI */ - vsi_seid = pf->vsi[pf->lan_vsi]->seid; + vsi = i40e_pf_get_main_vsi(pf); + vsi_seid = vsi->seid; } else if (vsi_seid < 0) { dev_info(&pf->pdev->dev, "add VSI %d: bad vsi seid\n", vsi_seid); @@ -867,7 +869,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp, goto command_write_done; } - veb = i40e_veb_setup(pf, 0, uplink_seid, vsi_seid, enabled_tc); + veb = i40e_veb_setup(pf, uplink_seid, vsi_seid, enabled_tc); if (veb) dev_info(&pf->pdev->dev, "added relay %d\n", veb->seid); else @@ -1030,7 +1032,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp, goto command_write_done; } - vsi = pf->vsi[pf->lan_vsi]; + vsi = i40e_pf_get_main_vsi(pf); switch_id = le16_to_cpu(vsi->info.switch_id) & I40E_AQ_VSI_SW_ID_MASK; @@ -1380,6 +1382,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp, dev_info(&pf->pdev->dev, "FD current total filter count for this interface: %d\n", i40e_get_current_fd_count(pf)); } else if (strncmp(cmd_buf, "lldp", 4) == 0) { + /* Get main VSI */ + struct i40e_vsi *main_vsi = i40e_pf_get_main_vsi(pf); + if (strncmp(&cmd_buf[5], "stop", 4) == 0) { int ret; @@ -1391,10 +1396,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp, goto command_write_done; } ret = i40e_aq_add_rem_control_packet_filter(&pf->hw, - pf->hw.mac.addr, - ETH_P_LLDP, 0, - pf->vsi[pf->lan_vsi]->seid, - 0, true, NULL, NULL); + pf->hw.mac.addr, ETH_P_LLDP, 0, + main_vsi->seid, 0, true, NULL, + NULL); if (ret) { dev_info(&pf->pdev->dev, "%s: Add Control Packet Filter AQ command failed =0x%x\n", @@ -1409,10 +1413,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp, int ret; ret = i40e_aq_add_rem_control_packet_filter(&pf->hw, - pf->hw.mac.addr, - ETH_P_LLDP, 0, - pf->vsi[pf->lan_vsi]->seid, - 0, false, NULL, NULL); + pf->hw.mac.addr, ETH_P_LLDP, 0, + main_vsi->seid, 0, false, NULL, + NULL); if (ret) { dev_info(&pf->pdev->dev, "%s: Remove Control Packet Filter AQ command failed =0x%x\n", @@ -1639,6 +1642,7 @@ static ssize_t i40e_dbg_netdev_ops_read(struct file *filp, char __user *buffer, size_t count, loff_t *ppos) { struct i40e_pf *pf = filp->private_data; + struct i40e_vsi *main_vsi; int bytes_not_copied; int buf_size = 256; char *buf; @@ -1654,8 +1658,8 @@ static ssize_t i40e_dbg_netdev_ops_read(struct file *filp, char __user *buffer, if (!buf) return -ENOSPC; - len = snprintf(buf, buf_size, "%s: %s\n", - pf->vsi[pf->lan_vsi]->netdev->name, + main_vsi = i40e_pf_get_main_vsi(pf); + len = snprintf(buf, buf_size, "%s: %s\n", main_vsi->netdev->name, i40e_dbg_netdev_ops_buf); bytes_not_copied = copy_to_user(buffer, buf, len); diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 42e7e6cdaa..4e28785c9f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -1241,7 +1241,7 @@ static int i40e_set_link_ksettings(struct net_device *netdev, i40e_partition_setting_complaint(pf); return -EOPNOTSUPP; } - if (vsi != pf->vsi[pf->lan_vsi]) + if (vsi->type != I40E_VSI_MAIN) return -EOPNOTSUPP; if (hw->phy.media_type != I40E_MEDIA_TYPE_BASET && hw->phy.media_type != I40E_MEDIA_TYPE_FIBER && @@ -1710,7 +1710,7 @@ static int i40e_set_pauseparam(struct net_device *netdev, return -EOPNOTSUPP; } - if (vsi != pf->vsi[pf->lan_vsi]) + if (vsi->type != I40E_VSI_MAIN) return -EOPNOTSUPP; is_an = hw_link_info->an_info & I40E_AQ_AN_COMPLETED; @@ -2029,7 +2029,7 @@ static void i40e_get_ringparam(struct net_device *netdev, { struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_pf *pf = np->vsi->back; - struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; + struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf); ring->rx_max_pending = i40e_get_max_num_descriptors(pf); ring->tx_max_pending = i40e_get_max_num_descriptors(pf); @@ -2292,7 +2292,7 @@ static int i40e_get_stats_count(struct net_device *netdev) struct i40e_pf *pf = vsi->back; int stats_len; - if (vsi == pf->vsi[pf->lan_vsi] && pf->hw.partition_id == 1) + if (vsi->type == I40E_VSI_MAIN && pf->hw.partition_id == 1) stats_len = I40E_PF_STATS_LEN; else stats_len = I40E_VSI_STATS_LEN; @@ -2422,17 +2422,14 @@ static void i40e_get_ethtool_stats(struct net_device *netdev, } rcu_read_unlock(); - if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1) + if (vsi->type != I40E_VSI_MAIN || pf->hw.partition_id != 1) goto check_data_pointer; - veb_stats = ((pf->lan_veb != I40E_NO_VEB) && - (pf->lan_veb < I40E_MAX_VEB) && - test_bit(I40E_FLAG_VEB_STATS_ENA, pf->flags)); + veb = i40e_pf_get_main_veb(pf); + veb_stats = veb && test_bit(I40E_FLAG_VEB_STATS_ENA, pf->flags); - if (veb_stats) { - veb = pf->veb[pf->lan_veb]; + if (veb_stats) i40e_update_veb_stats(veb); - } /* If veb stats aren't enabled, pass NULL instead of the veb so that * we initialize stats to zero and update the data pointer @@ -2495,7 +2492,7 @@ static void i40e_get_stat_strings(struct net_device *netdev, u8 *data) "rx", i); } - if (vsi != pf->vsi[pf->lan_vsi] || pf->hw.partition_id != 1) + if (vsi->type != I40E_VSI_MAIN || pf->hw.partition_id != 1) goto check_data_pointer; i40e_add_stat_strings(&data, i40e_gstrings_veb_stats); @@ -2792,7 +2789,7 @@ static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) return -EOPNOTSUPP; } - if (vsi != pf->vsi[pf->lan_vsi]) + if (vsi->type != I40E_VSI_MAIN) return -EOPNOTSUPP; /* NVM bit on means WoL disabled for the port */ @@ -3370,6 +3367,7 @@ static int i40e_get_ethtool_fdir_entry(struct i40e_pf *pf, struct i40e_rx_flow_userdef userdef = {0}; struct i40e_fdir_filter *rule = NULL; struct hlist_node *node2; + struct i40e_vsi *vsi; u64 input_set; u16 index; @@ -3493,9 +3491,8 @@ no_input_set: fsp->flow_type |= FLOW_EXT; } - if (rule->dest_vsi != pf->vsi[pf->lan_vsi]->id) { - struct i40e_vsi *vsi; - + vsi = i40e_pf_get_main_vsi(pf); + if (rule->dest_vsi != vsi->id) { vsi = i40e_find_vsi_from_id(pf, rule->dest_vsi); if (vsi && vsi->type == I40E_VSI_SRIOV) { /* VFs are zero-indexed by the driver, but ethtool diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index ffb9f9f15c..310513d932 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -100,6 +100,7 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all), Debug mask (0x8XXXXXXX MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>"); MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver"); +MODULE_IMPORT_NS(LIBIE); MODULE_LICENSE("GPL v2"); static struct workqueue_struct *i40e_wq; @@ -989,7 +990,7 @@ static void i40e_update_vsi_stats(struct i40e_vsi *vsi) ns->tx_dropped = es->tx_discards; /* pull in a couple PF stats if this is the main vsi */ - if (vsi == pf->vsi[pf->lan_vsi]) { + if (vsi->type == I40E_VSI_MAIN) { ns->rx_crc_errors = pf->stats.crc_errors; ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes; ns->rx_length_errors = pf->stats.rx_length_errors; @@ -1234,7 +1235,7 @@ void i40e_update_stats(struct i40e_vsi *vsi) { struct i40e_pf *pf = vsi->back; - if (vsi == pf->vsi[pf->lan_vsi]) + if (vsi->type == I40E_VSI_MAIN) i40e_update_pf_stats(pf); i40e_update_vsi_stats(vsi); @@ -2475,12 +2476,12 @@ i40e_aqc_broadcast_filter(struct i40e_vsi *vsi, const char *vsi_name, **/ static int i40e_set_promiscuous(struct i40e_pf *pf, bool promisc) { - struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; + struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf); struct i40e_hw *hw = &pf->hw; int aq_ret; if (vsi->type == I40E_VSI_MAIN && - pf->lan_veb != I40E_NO_VEB && + i40e_pf_get_main_veb(pf) && !test_bit(I40E_FLAG_MFP_ENA, pf->flags)) { /* set defport ON for Main VSI instead of true promisc * this way we will get all unicast/multicast and VLAN @@ -2960,7 +2961,7 @@ static int i40e_change_mtu(struct net_device *netdev, int new_mtu) netdev_dbg(netdev, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); - netdev->mtu = new_mtu; + WRITE_ONCE(netdev->mtu, new_mtu); if (netif_running(netdev)) i40e_vsi_reinit_locked(vsi); set_bit(__I40E_CLIENT_SERVICE_REQUESTED, pf->state); @@ -4322,7 +4323,7 @@ static irqreturn_t i40e_intr(int irq, void *data) /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */ if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) { - struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; + struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf); struct i40e_q_vector *q_vector = vsi->q_vectors[0]; /* We do not have a way to disarm Queue causes while leaving @@ -5472,7 +5473,7 @@ static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg) **/ static u8 i40e_mqprio_get_enabled_tc(struct i40e_pf *pf) { - struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; + struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf); u8 num_tc = vsi->mqprio_qopt.qopt.num_tc; u8 enabled_tc = 1, i; @@ -5489,13 +5490,14 @@ static u8 i40e_mqprio_get_enabled_tc(struct i40e_pf *pf) **/ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf) { - struct i40e_hw *hw = &pf->hw; u8 i, enabled_tc = 1; u8 num_tc = 0; - struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config; - if (i40e_is_tc_mqprio_enabled(pf)) - return pf->vsi[pf->lan_vsi]->mqprio_qopt.qopt.num_tc; + if (i40e_is_tc_mqprio_enabled(pf)) { + struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf); + + return vsi->mqprio_qopt.qopt.num_tc; + } /* If neither MQPRIO nor DCB is enabled, then always use single TC */ if (!test_bit(I40E_FLAG_DCB_ENA, pf->flags)) @@ -5503,7 +5505,7 @@ static u8 i40e_pf_get_num_tc(struct i40e_pf *pf) /* SFP mode will be enabled for all TCs on port */ if (!test_bit(I40E_FLAG_MFP_ENA, pf->flags)) - return i40e_dcb_get_num_tc(dcbcfg); + return i40e_dcb_get_num_tc(&pf->hw.local_dcbx_config); /* MFP mode return count of enabled TCs for this PF */ if (pf->hw.func_caps.iscsi) @@ -5916,6 +5918,28 @@ out: } /** + * i40e_vsi_reconfig_tc - Reconfigure VSI Tx Scheduler for stored TC map + * @vsi: VSI to be reconfigured + * + * This reconfigures a particular VSI for TCs that are mapped to the + * TC bitmap stored previously for the VSI. + * + * Context: It is expected that the VSI queues have been quisced before + * calling this function. + * + * Return: 0 on success, negative value on failure + **/ +static int i40e_vsi_reconfig_tc(struct i40e_vsi *vsi) +{ + u8 enabled_tc; + + enabled_tc = vsi->tc_config.enabled_tc; + vsi->tc_config.enabled_tc = 0; + + return i40e_vsi_config_tc(vsi, enabled_tc); +} + +/** * i40e_get_link_speed - Returns link speed for the interface * @vsi: VSI to be configured * @@ -6477,6 +6501,7 @@ static inline int i40e_setup_hw_channel(struct i40e_pf *pf, static bool i40e_setup_channel(struct i40e_pf *pf, struct i40e_vsi *vsi, struct i40e_channel *ch) { + struct i40e_vsi *main_vsi; u8 vsi_type; u16 seid; int ret; @@ -6490,7 +6515,8 @@ static bool i40e_setup_channel(struct i40e_pf *pf, struct i40e_vsi *vsi, } /* underlying switching element */ - seid = pf->vsi[pf->lan_vsi]->uplink_seid; + main_vsi = i40e_pf_get_main_vsi(pf); + seid = main_vsi->uplink_seid; /* create channel (VSI), configure TX rings */ ret = i40e_setup_hw_channel(pf, vsi, ch, seid, vsi_type); @@ -6808,7 +6834,7 @@ static void i40e_dcb_reconfigure(struct i40e_pf *pf) /* - Enable all TCs for the LAN VSI * - For all others keep them at TC0 for now */ - if (v == pf->lan_vsi) + if (vsi->type == I40E_VSI_MAIN) tc_map = i40e_pf_get_tc_map(pf); else tc_map = I40E_DEFAULT_TRAFFIC_CLASS; @@ -7047,7 +7073,9 @@ int i40e_hw_dcb_config(struct i40e_pf *pf, struct i40e_dcbx_config *new_cfg) /* Configure Rx Packet Buffers in HW */ for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) { - mfs_tc[i] = pf->vsi[pf->lan_vsi]->netdev->mtu; + struct i40e_vsi *main_vsi = i40e_pf_get_main_vsi(pf); + + mfs_tc[i] = main_vsi->netdev->mtu; mfs_tc[i] += I40E_PACKET_HDR_PAD; } @@ -8643,6 +8671,10 @@ static int i40e_parse_cls_flower(struct i40e_vsi *vsi, flow_rule_match_control(rule, &match); addr_type = match.key->addr_type; + + if (flow_rule_has_control_flags(match.mask->flags, + f->common.extack)) + return -EOPNOTSUPP; } if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { @@ -9113,7 +9145,7 @@ err_setup_rx: i40e_vsi_free_rx_resources(vsi); err_setup_tx: i40e_vsi_free_tx_resources(vsi); - if (vsi == pf->vsi[pf->lan_vsi]) + if (vsi->type == I40E_VSI_MAIN) i40e_do_reset(pf, I40E_PF_RESET_FLAG, true); return err; @@ -9804,7 +9836,7 @@ static void i40e_fdir_flush_and_replay(struct i40e_pf *pf) dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n"); } else { /* replay sideband filters */ - i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]); + i40e_fdir_filter_restore(i40e_pf_get_main_vsi(pf)); if (!disable_atr && !pf->fd_tcp4_filter_cnt) clear_bit(__I40E_FD_ATR_AUTO_DISABLED, pf->state); clear_bit(__I40E_FD_FLUSH_REQUESTED, pf->state); @@ -9902,7 +9934,8 @@ static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up) **/ static void i40e_link_event(struct i40e_pf *pf) { - struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; + struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf); + struct i40e_veb *veb = i40e_pf_get_main_veb(pf); u8 new_link_speed, old_link_speed; bool new_link, old_link; int status; @@ -9942,8 +9975,8 @@ static void i40e_link_event(struct i40e_pf *pf) /* Notify the base of the switch tree connected to * the link. Floating VEBs are not notified. */ - if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb]) - i40e_veb_link_event(pf->veb[pf->lan_veb], new_link); + if (veb) + i40e_veb_link_event(veb, new_link); else i40e_vsi_link_event(vsi, new_link); @@ -10273,7 +10306,7 @@ static void i40e_verify_eeprom(struct i40e_pf *pf) **/ static void i40e_enable_pf_switch_lb(struct i40e_pf *pf) { - struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; + struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf); struct i40e_vsi_context ctxt; int ret; @@ -10309,7 +10342,7 @@ static void i40e_enable_pf_switch_lb(struct i40e_pf *pf) **/ static void i40e_disable_pf_switch_lb(struct i40e_pf *pf) { - struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; + struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf); struct i40e_vsi_context ctxt; int ret; @@ -10385,7 +10418,7 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb) if (veb->uplink_seid == pf->mac_seid) { /* Check that the LAN VSI has VEB owning flag set */ - ctl_vsi = pf->vsi[pf->lan_vsi]; + ctl_vsi = i40e_pf_get_main_vsi(pf); if (WARN_ON(ctl_vsi->veb_idx != veb->idx || !(ctl_vsi->flags & I40E_VSI_FLAG_VEB_OWNER))) { @@ -10528,7 +10561,7 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi); **/ static void i40e_fdir_sb_setup(struct i40e_pf *pf) { - struct i40e_vsi *vsi; + struct i40e_vsi *main_vsi, *vsi; /* quick workaround for an NVM issue that leaves a critical register * uninitialized @@ -10553,8 +10586,8 @@ static void i40e_fdir_sb_setup(struct i40e_pf *pf) /* create a new VSI if none exists */ if (!vsi) { - vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, - pf->vsi[pf->lan_vsi]->seid, 0); + main_vsi = i40e_pf_get_main_vsi(pf); + vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR, main_vsi->seid, 0); if (!vsi) { dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n"); clear_bit(I40E_FLAG_FD_SB_ENA, pf->flags); @@ -10833,7 +10866,7 @@ static int i40e_reset(struct i40e_pf *pf) static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) { const bool is_recovery_mode_reported = i40e_check_recovery_mode(pf); - struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; + struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf); struct i40e_hw *hw = &pf->hw; struct i40e_veb *veb; int ret; @@ -10842,7 +10875,7 @@ static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) if (test_bit(__I40E_EMP_RESET_INTR_RECEIVED, pf->state) && is_recovery_mode_reported) - i40e_set_ethtool_ops(pf->vsi[pf->lan_vsi]->netdev); + i40e_set_ethtool_ops(vsi->netdev); if (test_bit(__I40E_DOWN, pf->state) && !test_bit(__I40E_RECOVERY_MODE, pf->state)) @@ -11138,6 +11171,8 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit, ret = i40e_reset(pf); if (!ret) i40e_rebuild(pf, reinit, lock_acquired); + else + dev_err(&pf->pdev->dev, "%s: i40e_reset() FAILED", __func__); } /** @@ -11266,7 +11301,7 @@ static void i40e_service_task(struct work_struct *work) return; if (!test_bit(__I40E_RECOVERY_MODE, pf->state)) { - i40e_detect_recover_hung(pf->vsi[pf->lan_vsi]); + i40e_detect_recover_hung(pf); i40e_sync_filters_subtask(pf); i40e_reset_subtask(pf); i40e_handle_mdd_event(pf); @@ -11275,14 +11310,12 @@ static void i40e_service_task(struct work_struct *work) i40e_fdir_reinit_subtask(pf); if (test_and_clear_bit(__I40E_CLIENT_RESET, pf->state)) { /* Client subtask will reopen next time through. */ - i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], - true); + i40e_notify_client_of_netdev_close(pf, true); } else { i40e_client_subtask(pf); if (test_and_clear_bit(__I40E_CLIENT_L2_CHANGE, pf->state)) - i40e_notify_client_of_l2_param_changes( - pf->vsi[pf->lan_vsi]); + i40e_notify_client_of_l2_param_changes(pf); } i40e_sync_filters_subtask(pf); } else { @@ -11990,7 +12023,7 @@ static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi) /* if not MSIX, give the one vector only to the LAN VSI */ if (test_bit(I40E_FLAG_MSIX_ENA, pf->flags)) num_q_vectors = vsi->num_q_vectors; - else if (vsi == pf->vsi[pf->lan_vsi]) + else if (vsi->type == I40E_VSI_MAIN) num_q_vectors = 1; else return -EINVAL; @@ -12396,7 +12429,7 @@ void i40e_fill_rss_lut(struct i40e_pf *pf, u8 *lut, **/ static int i40e_pf_config_rss(struct i40e_pf *pf) { - struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; + struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf); u8 seed[I40E_HKEY_ARRAY_SIZE]; u8 *lut; struct i40e_hw *hw = &pf->hw; @@ -12468,7 +12501,7 @@ static int i40e_pf_config_rss(struct i40e_pf *pf) **/ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count) { - struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi]; + struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf); int new_rss_size; if (!test_bit(I40E_FLAG_RSS_ENA, pf->flags)) @@ -13107,7 +13140,7 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev, int rem; /* Only for PF VSI for now */ - if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) + if (vsi->type != I40E_VSI_MAIN) return -EOPNOTSUPP; /* Find the HW bridge for PF VSI */ @@ -13117,20 +13150,16 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev, if (!br_spec) return -EINVAL; - nla_for_each_nested(attr, br_spec, rem) { - __u16 mode; + nla_for_each_nested_type(attr, IFLA_BRIDGE_MODE, br_spec, rem) { + __u16 mode = nla_get_u16(attr); - if (nla_type(attr) != IFLA_BRIDGE_MODE) - continue; - - mode = nla_get_u16(attr); if ((mode != BRIDGE_MODE_VEPA) && (mode != BRIDGE_MODE_VEB)) return -EINVAL; /* Insert a new HW bridge */ if (!veb) { - veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid, + veb = i40e_veb_setup(pf, vsi->uplink_seid, vsi->seid, vsi->tc_config.enabled_tc); if (veb) { veb->bridge_mode = mode; @@ -13179,7 +13208,7 @@ static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, struct i40e_veb *veb; /* Only for PF VSI for now */ - if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) + if (vsi->type != I40E_VSI_MAIN) return -EOPNOTSUPP; /* Find the HW bridge for the PF VSI */ @@ -13264,6 +13293,10 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog, bool need_reset; int i; + /* VSI shall be deleted in a moment, block loading new programs */ + if (prog && test_bit(__I40E_IN_REMOVE, pf->state)) + return -EINVAL; + /* Don't allow frames that span over multiple buffers */ if (vsi->netdev->mtu > frame_size - I40E_PACKET_HDR_PAD) { NL_SET_ERR_MSG_MOD(extack, "MTU too large for linear frames and XDP prog does not support frags"); @@ -13272,14 +13305,9 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog, /* When turning XDP on->off/off->on we reset and rebuild the rings. */ need_reset = (i40e_enabled_xdp_vsi(vsi) != !!prog); - if (need_reset) i40e_prep_for_reset(pf); - /* VSI shall be deleted in a moment, just return EINVAL */ - if (test_bit(__I40E_IN_REMOVE, pf->state)) - return -EINVAL; - old_prog = xchg(&vsi->xdp_prog, prog); if (need_reset) { @@ -13761,9 +13789,10 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) * the end, which is 4 bytes long, so force truncation of the * original name by IFNAMSIZ - 4 */ - snprintf(netdev->name, IFNAMSIZ, "%.*sv%%d", - IFNAMSIZ - 4, - pf->vsi[pf->lan_vsi]->netdev->name); + struct i40e_vsi *main_vsi = i40e_pf_get_main_vsi(pf); + + snprintf(netdev->name, IFNAMSIZ, "%.*sv%%d", IFNAMSIZ - 4, + main_vsi->netdev->name); eth_random_addr(mac_addr); spin_lock_bh(&vsi->mac_filter_hash_lock); @@ -14130,8 +14159,7 @@ int i40e_vsi_release(struct i40e_vsi *vsi) vsi->seid, vsi->uplink_seid); return -ENODEV; } - if (vsi == pf->vsi[pf->lan_vsi] && - !test_bit(__I40E_DOWN, pf->state)) { + if (vsi->type == I40E_VSI_MAIN && !test_bit(__I40E_DOWN, pf->state)) { dev_info(&pf->pdev->dev, "Can't remove PF VSI\n"); return -ENODEV; } @@ -14275,9 +14303,9 @@ vector_setup_out: **/ static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi) { + struct i40e_vsi *main_vsi; u16 alloc_queue_pairs; struct i40e_pf *pf; - u8 enabled_tc; int ret; if (!vsi) @@ -14309,10 +14337,10 @@ static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi) /* Update the FW view of the VSI. Force a reset of TC and queue * layout configurations. */ - enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc; - pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0; - pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid; - i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc); + main_vsi = i40e_pf_get_main_vsi(pf); + main_vsi->seid = pf->main_vsi_seid; + i40e_vsi_reconfig_tc(main_vsi); + if (vsi->type == I40E_VSI_MAIN) i40e_rm_default_mac_filter(vsi, pf->hw.mac.perm_addr); @@ -14386,13 +14414,13 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, } if (vsi->uplink_seid == pf->mac_seid) - veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid, + veb = i40e_veb_setup(pf, pf->mac_seid, vsi->seid, vsi->tc_config.enabled_tc); else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) - veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid, + veb = i40e_veb_setup(pf, vsi->uplink_seid, vsi->seid, vsi->tc_config.enabled_tc); if (veb) { - if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) { + if (vsi->type != I40E_VSI_MAIN) { dev_info(&vsi->back->pdev->dev, "New VSI creation error, uplink seid of LAN VSI expected.\n"); return NULL; @@ -14783,7 +14811,6 @@ static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi) /** * i40e_veb_setup - Set up a VEB * @pf: board private structure - * @flags: VEB setup flags * @uplink_seid: the switch element to link to * @vsi_seid: the initial VSI seid * @enabled_tc: Enabled TC bit-map @@ -14796,9 +14823,8 @@ static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi) * Returns pointer to the successfully allocated VEB sw struct on * success, otherwise returns NULL on failure. **/ -struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, - u16 uplink_seid, u16 vsi_seid, - u8 enabled_tc) +struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 uplink_seid, + u16 vsi_seid, u8 enabled_tc) { struct i40e_vsi *vsi = NULL; struct i40e_veb *veb; @@ -14829,7 +14855,6 @@ struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, if (veb_idx < 0) goto err_alloc; veb = pf->veb[veb_idx]; - veb->flags = flags; veb->uplink_seid = uplink_seid; veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1); @@ -14881,7 +14906,8 @@ static void i40e_setup_pf_switch_element(struct i40e_pf *pf, /* Main VEB? */ if (uplink_seid != pf->mac_seid) break; - if (pf->lan_veb >= I40E_MAX_VEB) { + veb = i40e_pf_get_main_veb(pf); + if (!veb) { int v; /* find existing or else empty VEB */ @@ -14895,12 +14921,15 @@ static void i40e_setup_pf_switch_element(struct i40e_pf *pf, pf->lan_veb = v; } } - if (pf->lan_veb >= I40E_MAX_VEB) + + /* Try to get again main VEB as pf->lan_veb may have changed */ + veb = i40e_pf_get_main_veb(pf); + if (!veb) break; - pf->veb[pf->lan_veb]->seid = seid; - pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid; - pf->veb[pf->lan_veb]->pf = pf; + veb->seid = seid; + veb->uplink_seid = pf->mac_seid; + veb->pf = pf; break; case I40E_SWITCH_ELEMENT_TYPE_VSI: if (num_reported != 1) @@ -14998,6 +15027,7 @@ int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig) **/ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acquired) { + struct i40e_vsi *main_vsi; u16 flags = 0; int ret; @@ -15042,22 +15072,25 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acqui } /* first time setup */ - if (pf->lan_vsi == I40E_NO_VSI || reinit) { - struct i40e_vsi *vsi = NULL; + main_vsi = i40e_pf_get_main_vsi(pf); + if (!main_vsi || reinit) { + struct i40e_veb *veb; u16 uplink_seid; /* Set up the PF VSI associated with the PF's main VSI * that is already in the HW switch */ - if (pf->lan_veb < I40E_MAX_VEB && pf->veb[pf->lan_veb]) - uplink_seid = pf->veb[pf->lan_veb]->seid; + veb = i40e_pf_get_main_veb(pf); + if (veb) + uplink_seid = veb->seid; else uplink_seid = pf->mac_seid; - if (pf->lan_vsi == I40E_NO_VSI) - vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0); + if (!main_vsi) + main_vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, + uplink_seid, 0); else if (reinit) - vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]); - if (!vsi) { + main_vsi = i40e_vsi_reinit_setup(main_vsi); + if (!main_vsi) { dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n"); i40e_cloud_filter_exit(pf); i40e_fdir_teardown(pf); @@ -15065,13 +15098,10 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acqui } } else { /* force a reset of TC and queue layout configurations */ - u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc; - - pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0; - pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid; - i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc); + main_vsi->seid = pf->main_vsi_seid; + i40e_vsi_reconfig_tc(main_vsi); } - i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]); + i40e_vlan_stripping_disable(main_vsi); i40e_fdir_sb_setup(pf); @@ -15098,7 +15128,7 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit, bool lock_acqui rtnl_lock(); /* repopulate tunnel port filters */ - udp_tunnel_nic_reset_ntf(pf->vsi[pf->lan_vsi]->netdev); + udp_tunnel_nic_reset_ntf(main_vsi->netdev); if (!lock_acquired) rtnl_unlock(); @@ -15242,6 +15272,7 @@ static int i40e_setup_pf_filter_control(struct i40e_pf *pf) #define REMAIN(__x) (INFO_STRING_LEN - (__x)) static void i40e_print_features(struct i40e_pf *pf) { + struct i40e_vsi *main_vsi = i40e_pf_get_main_vsi(pf); struct i40e_hw *hw = &pf->hw; char *buf; int i; @@ -15255,8 +15286,7 @@ static void i40e_print_features(struct i40e_pf *pf) i += scnprintf(&buf[i], REMAIN(i), " VFs: %d", pf->num_req_vfs); #endif i += scnprintf(&buf[i], REMAIN(i), " VSIs: %d QP: %d", - pf->hw.func_caps.num_vsis, - pf->vsi[pf->lan_vsi]->num_queue_pairs); + pf->hw.func_caps.num_vsis, main_vsi->num_queue_pairs); if (test_bit(I40E_FLAG_RSS_ENA, pf->flags)) i += scnprintf(&buf[i], REMAIN(i), " RSS"); if (test_bit(I40E_FLAG_FD_ATR_ENA, pf->flags)) @@ -15920,7 +15950,9 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err); goto err_vsis; } - INIT_LIST_HEAD(&pf->vsi[pf->lan_vsi]->ch_list); + + vsi = i40e_pf_get_main_vsi(pf); + INIT_LIST_HEAD(&vsi->ch_list); /* if FDIR VSI was set up, start it now */ vsi = i40e_find_vsi_by_type(pf, I40E_VSI_FDIR); @@ -16223,7 +16255,7 @@ static void i40e_remove(struct pci_dev *pdev) /* Client close must be called explicitly here because the timer * has been stopped. */ - i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false); + i40e_notify_client_of_netdev_close(pf, false); i40e_fdir_teardown(pf); @@ -16304,6 +16336,139 @@ unmap: } /** + * i40e_enable_mc_magic_wake - enable multicast magic packet wake up + * using the mac_address_write admin q function + * @pf: pointer to i40e_pf struct + **/ +static void i40e_enable_mc_magic_wake(struct i40e_pf *pf) +{ + struct i40e_vsi *main_vsi = i40e_pf_get_main_vsi(pf); + struct i40e_hw *hw = &pf->hw; + u8 mac_addr[6]; + u16 flags = 0; + int ret; + + /* Get current MAC address in case it's an LAA */ + if (main_vsi && main_vsi->netdev) { + ether_addr_copy(mac_addr, main_vsi->netdev->dev_addr); + } else { + dev_err(&pf->pdev->dev, + "Failed to retrieve MAC address; using default\n"); + ether_addr_copy(mac_addr, hw->mac.addr); + } + + /* The FW expects the mac address write cmd to first be called with + * one of these flags before calling it again with the multicast + * enable flags. + */ + flags = I40E_AQC_WRITE_TYPE_LAA_WOL; + + if (hw->func_caps.flex10_enable && hw->partition_id != 1) + flags = I40E_AQC_WRITE_TYPE_LAA_ONLY; + + ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL); + if (ret) { + dev_err(&pf->pdev->dev, + "Failed to update MAC address registers; cannot enable Multicast Magic packet wake up"); + return; + } + + flags = I40E_AQC_MC_MAG_EN + | I40E_AQC_WOL_PRESERVE_ON_PFR + | I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG; + ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL); + if (ret) + dev_err(&pf->pdev->dev, + "Failed to enable Multicast Magic Packet wake up\n"); +} + +/** + * i40e_io_suspend - suspend all IO operations + * @pf: pointer to i40e_pf struct + * + **/ +static int i40e_io_suspend(struct i40e_pf *pf) +{ + struct i40e_hw *hw = &pf->hw; + + set_bit(__I40E_DOWN, pf->state); + + /* Ensure service task will not be running */ + del_timer_sync(&pf->service_timer); + cancel_work_sync(&pf->service_task); + + /* Client close must be called explicitly here because the timer + * has been stopped. + */ + i40e_notify_client_of_netdev_close(pf, false); + + if (test_bit(I40E_HW_CAP_WOL_MC_MAGIC_PKT_WAKE, pf->hw.caps) && + pf->wol_en) + i40e_enable_mc_magic_wake(pf); + + /* Since we're going to destroy queues during the + * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this + * whole section + */ + rtnl_lock(); + + i40e_prep_for_reset(pf); + + wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); + wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); + + /* Clear the interrupt scheme and release our IRQs so that the system + * can safely hibernate even when there are a large number of CPUs. + * Otherwise hibernation might fail when mapping all the vectors back + * to CPU0. + */ + i40e_clear_interrupt_scheme(pf); + + rtnl_unlock(); + + return 0; +} + +/** + * i40e_io_resume - resume IO operations + * @pf: pointer to i40e_pf struct + * + **/ +static int i40e_io_resume(struct i40e_pf *pf) +{ + struct device *dev = &pf->pdev->dev; + int err; + + /* We need to hold the RTNL lock prior to restoring interrupt schemes, + * since we're going to be restoring queues + */ + rtnl_lock(); + + /* We cleared the interrupt scheme when we suspended, so we need to + * restore it now to resume device functionality. + */ + err = i40e_restore_interrupt_scheme(pf); + if (err) { + dev_err(dev, "Cannot restore interrupt scheme: %d\n", + err); + } + + clear_bit(__I40E_DOWN, pf->state); + i40e_reset_and_rebuild(pf, false, true); + + rtnl_unlock(); + + /* Clear suspended state last after everything is recovered */ + clear_bit(__I40E_SUSPENDED, pf->state); + + /* Restart the service task */ + mod_timer(&pf->service_timer, + round_jiffies(jiffies + pf->service_timer_period)); + + return 0; +} + +/** * i40e_pci_error_detected - warning that something funky happened in PCI land * @pdev: PCI device information struct * @error: the type of PCI error @@ -16327,7 +16492,7 @@ static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev, /* shutdown all operations */ if (!test_bit(__I40E_SUSPENDED, pf->state)) - i40e_prep_for_reset(pf); + i40e_io_suspend(pf); /* Request a slot reset */ return PCI_ERS_RESULT_NEED_RESET; @@ -16349,7 +16514,8 @@ static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev) u32 reg; dev_dbg(&pdev->dev, "%s\n", __func__); - if (pci_enable_device_mem(pdev)) { + /* enable I/O and memory of the device */ + if (pci_enable_device(pdev)) { dev_info(&pdev->dev, "Cannot re-enable PCI device after reset.\n"); result = PCI_ERS_RESULT_DISCONNECT; @@ -16412,54 +16578,7 @@ static void i40e_pci_error_resume(struct pci_dev *pdev) if (test_bit(__I40E_SUSPENDED, pf->state)) return; - i40e_handle_reset_warning(pf, false); -} - -/** - * i40e_enable_mc_magic_wake - enable multicast magic packet wake up - * using the mac_address_write admin q function - * @pf: pointer to i40e_pf struct - **/ -static void i40e_enable_mc_magic_wake(struct i40e_pf *pf) -{ - struct i40e_hw *hw = &pf->hw; - u8 mac_addr[6]; - u16 flags = 0; - int ret; - - /* Get current MAC address in case it's an LAA */ - if (pf->vsi[pf->lan_vsi] && pf->vsi[pf->lan_vsi]->netdev) { - ether_addr_copy(mac_addr, - pf->vsi[pf->lan_vsi]->netdev->dev_addr); - } else { - dev_err(&pf->pdev->dev, - "Failed to retrieve MAC address; using default\n"); - ether_addr_copy(mac_addr, hw->mac.addr); - } - - /* The FW expects the mac address write cmd to first be called with - * one of these flags before calling it again with the multicast - * enable flags. - */ - flags = I40E_AQC_WRITE_TYPE_LAA_WOL; - - if (hw->func_caps.flex10_enable && hw->partition_id != 1) - flags = I40E_AQC_WRITE_TYPE_LAA_ONLY; - - ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL); - if (ret) { - dev_err(&pf->pdev->dev, - "Failed to update MAC address registers; cannot enable Multicast Magic packet wake up"); - return; - } - - flags = I40E_AQC_MC_MAG_EN - | I40E_AQC_WOL_PRESERVE_ON_PFR - | I40E_AQC_WRITE_TYPE_UPDATE_MC_MAG; - ret = i40e_aq_mac_address_write(hw, flags, mac_addr, NULL); - if (ret) - dev_err(&pf->pdev->dev, - "Failed to enable Multicast Magic Packet wake up\n"); + i40e_io_resume(pf); } /** @@ -16482,7 +16601,7 @@ static void i40e_shutdown(struct pci_dev *pdev) /* Client close must be called explicitly here because the timer * has been stopped. */ - i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false); + i40e_notify_client_of_netdev_close(pf, false); if (test_bit(I40E_HW_CAP_WOL_MC_MAGIC_PKT_WAKE, pf->hw.caps) && pf->wol_en) @@ -16518,93 +16637,28 @@ static void i40e_shutdown(struct pci_dev *pdev) * i40e_suspend - PM callback for moving to D3 * @dev: generic device information structure **/ -static int __maybe_unused i40e_suspend(struct device *dev) +static int i40e_suspend(struct device *dev) { struct i40e_pf *pf = dev_get_drvdata(dev); - struct i40e_hw *hw = &pf->hw; /* If we're already suspended, then there is nothing to do */ if (test_and_set_bit(__I40E_SUSPENDED, pf->state)) return 0; - - set_bit(__I40E_DOWN, pf->state); - - /* Ensure service task will not be running */ - del_timer_sync(&pf->service_timer); - cancel_work_sync(&pf->service_task); - - /* Client close must be called explicitly here because the timer - * has been stopped. - */ - i40e_notify_client_of_netdev_close(pf->vsi[pf->lan_vsi], false); - - if (test_bit(I40E_HW_CAP_WOL_MC_MAGIC_PKT_WAKE, pf->hw.caps) && - pf->wol_en) - i40e_enable_mc_magic_wake(pf); - - /* Since we're going to destroy queues during the - * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this - * whole section - */ - rtnl_lock(); - - i40e_prep_for_reset(pf); - - wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0)); - wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0)); - - /* Clear the interrupt scheme and release our IRQs so that the system - * can safely hibernate even when there are a large number of CPUs. - * Otherwise hibernation might fail when mapping all the vectors back - * to CPU0. - */ - i40e_clear_interrupt_scheme(pf); - - rtnl_unlock(); - - return 0; + return i40e_io_suspend(pf); } /** * i40e_resume - PM callback for waking up from D3 * @dev: generic device information structure **/ -static int __maybe_unused i40e_resume(struct device *dev) +static int i40e_resume(struct device *dev) { struct i40e_pf *pf = dev_get_drvdata(dev); - int err; /* If we're not suspended, then there is nothing to do */ if (!test_bit(__I40E_SUSPENDED, pf->state)) return 0; - - /* We need to hold the RTNL lock prior to restoring interrupt schemes, - * since we're going to be restoring queues - */ - rtnl_lock(); - - /* We cleared the interrupt scheme when we suspended, so we need to - * restore it now to resume device functionality. - */ - err = i40e_restore_interrupt_scheme(pf); - if (err) { - dev_err(dev, "Cannot restore interrupt scheme: %d\n", - err); - } - - clear_bit(__I40E_DOWN, pf->state); - i40e_reset_and_rebuild(pf, false, true); - - rtnl_unlock(); - - /* Clear suspended state last after everything is recovered */ - clear_bit(__I40E_SUSPENDED, pf->state); - - /* Restart the service task */ - mod_timer(&pf->service_timer, - round_jiffies(jiffies + pf->service_timer_period)); - - return 0; + return i40e_io_resume(pf); } static const struct pci_error_handlers i40e_err_handler = { @@ -16615,16 +16669,14 @@ static const struct pci_error_handlers i40e_err_handler = { .resume = i40e_pci_error_resume, }; -static SIMPLE_DEV_PM_OPS(i40e_pm_ops, i40e_suspend, i40e_resume); +static DEFINE_SIMPLE_DEV_PM_OPS(i40e_pm_ops, i40e_suspend, i40e_resume); static struct pci_driver i40e_driver = { .name = i40e_driver_name, .id_table = i40e_pci_tbl, .probe = i40e_probe, .remove = i40e_remove, - .driver = { - .pm = &i40e_pm_ops, - }, + .driver.pm = pm_sleep_ptr(&i40e_pm_ops), .shutdown = i40e_shutdown, .err_handler = &i40e_err_handler, .sriov_configure = i40e_pci_sriov_configure, diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c index 605fd82f5d..7f0936f4e0 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c +++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c @@ -734,37 +734,7 @@ int i40e_validate_nvm_checksum(struct i40e_hw *hw, return ret_code; } -static int i40e_nvmupd_state_init(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - u8 *bytes, int *perrno); -static int i40e_nvmupd_state_reading(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - u8 *bytes, int *perrno); -static int i40e_nvmupd_state_writing(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - u8 *bytes, int *errno); -static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - int *perrno); -static int i40e_nvmupd_nvm_erase(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - int *perrno); -static int i40e_nvmupd_nvm_write(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - u8 *bytes, int *perrno); -static int i40e_nvmupd_nvm_read(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - u8 *bytes, int *perrno); -static int i40e_nvmupd_exec_aq(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - u8 *bytes, int *perrno); -static int i40e_nvmupd_get_aq_result(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - u8 *bytes, int *perrno); -static int i40e_nvmupd_get_aq_event(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - u8 *bytes, int *perrno); -static inline u8 i40e_nvmupd_get_module(u32 val) +static u8 i40e_nvmupd_get_module(u32 val) { return (u8)(val & I40E_NVM_MOD_PNT_MASK); } @@ -799,121 +769,408 @@ static const char * const i40e_nvm_update_state_str[] = { }; /** - * i40e_nvmupd_command - Process an NVM update command + * i40e_nvmupd_validate_command - Validate given command * @hw: pointer to hardware structure - * @cmd: pointer to nvm update command - * @bytes: pointer to the data buffer + * @cmd: pointer to nvm update command buffer * @perrno: pointer to return error code * - * Dispatches command depending on what update state is current + * Return one of the valid command types or I40E_NVMUPD_INVALID **/ -int i40e_nvmupd_command(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - u8 *bytes, int *perrno) +static enum i40e_nvmupd_cmd +i40e_nvmupd_validate_command(struct i40e_hw *hw, struct i40e_nvm_access *cmd, + int *perrno) { enum i40e_nvmupd_cmd upd_cmd; - int status; - - /* assume success */ - *perrno = 0; + u8 module, transaction; - /* early check for status command and debug msgs */ - upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno); + /* anything that doesn't match a recognized case is an error */ + upd_cmd = I40E_NVMUPD_INVALID; - i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d opc 0x%04x cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n", - i40e_nvm_update_state_str[upd_cmd], - hw->nvmupd_state, - hw->nvm_release_on_done, hw->nvm_wait_opcode, - cmd->command, cmd->config, cmd->offset, cmd->data_size); + transaction = i40e_nvmupd_get_transaction(cmd->config); + module = i40e_nvmupd_get_module(cmd->config); - if (upd_cmd == I40E_NVMUPD_INVALID) { - *perrno = -EFAULT; + /* limits on data size */ + if (cmd->data_size < 1 || cmd->data_size > I40E_NVMUPD_MAX_DATA) { i40e_debug(hw, I40E_DEBUG_NVM, - "i40e_nvmupd_validate_command returns %d errno %d\n", - upd_cmd, *perrno); + "%s data_size %d\n", __func__, cmd->data_size); + *perrno = -EFAULT; + return I40E_NVMUPD_INVALID; } - /* a status request returns immediately rather than - * going into the state machine - */ - if (upd_cmd == I40E_NVMUPD_STATUS) { - if (!cmd->data_size) { - *perrno = -EFAULT; - return -EINVAL; + switch (cmd->command) { + case I40E_NVM_READ: + switch (transaction) { + case I40E_NVM_CON: + upd_cmd = I40E_NVMUPD_READ_CON; + break; + case I40E_NVM_SNT: + upd_cmd = I40E_NVMUPD_READ_SNT; + break; + case I40E_NVM_LCB: + upd_cmd = I40E_NVMUPD_READ_LCB; + break; + case I40E_NVM_SA: + upd_cmd = I40E_NVMUPD_READ_SA; + break; + case I40E_NVM_EXEC: + if (module == 0xf) + upd_cmd = I40E_NVMUPD_STATUS; + else if (module == 0) + upd_cmd = I40E_NVMUPD_GET_AQ_RESULT; + break; + case I40E_NVM_AQE: + upd_cmd = I40E_NVMUPD_GET_AQ_EVENT; + break; } + break; - bytes[0] = hw->nvmupd_state; - - if (cmd->data_size >= 4) { - bytes[1] = 0; - *((u16 *)&bytes[2]) = hw->nvm_wait_opcode; + case I40E_NVM_WRITE: + switch (transaction) { + case I40E_NVM_CON: + upd_cmd = I40E_NVMUPD_WRITE_CON; + break; + case I40E_NVM_SNT: + upd_cmd = I40E_NVMUPD_WRITE_SNT; + break; + case I40E_NVM_LCB: + upd_cmd = I40E_NVMUPD_WRITE_LCB; + break; + case I40E_NVM_SA: + upd_cmd = I40E_NVMUPD_WRITE_SA; + break; + case I40E_NVM_ERA: + upd_cmd = I40E_NVMUPD_WRITE_ERA; + break; + case I40E_NVM_CSUM: + upd_cmd = I40E_NVMUPD_CSUM_CON; + break; + case (I40E_NVM_CSUM | I40E_NVM_SA): + upd_cmd = I40E_NVMUPD_CSUM_SA; + break; + case (I40E_NVM_CSUM | I40E_NVM_LCB): + upd_cmd = I40E_NVMUPD_CSUM_LCB; + break; + case I40E_NVM_EXEC: + if (module == 0) + upd_cmd = I40E_NVMUPD_EXEC_AQ; + break; } + break; + } - /* Clear error status on read */ - if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR) - hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; + return upd_cmd; +} - return 0; +/** + * i40e_nvmupd_nvm_erase - Erase an NVM module + * @hw: pointer to hardware structure + * @cmd: pointer to nvm update command buffer + * @perrno: pointer to return error code + * + * module, offset, data_size and data are in cmd structure + **/ +static int i40e_nvmupd_nvm_erase(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + int *perrno) +{ + struct i40e_asq_cmd_details cmd_details; + u8 module, transaction; + int status = 0; + bool last; + + transaction = i40e_nvmupd_get_transaction(cmd->config); + module = i40e_nvmupd_get_module(cmd->config); + last = (transaction & I40E_NVM_LCB); + + memset(&cmd_details, 0, sizeof(cmd_details)); + cmd_details.wb_desc = &hw->nvm_wb_desc; + + status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size, + last, &cmd_details); + if (status) { + i40e_debug(hw, I40E_DEBUG_NVM, + "%s mod 0x%x off 0x%x len 0x%x\n", + __func__, module, cmd->offset, cmd->data_size); + i40e_debug(hw, I40E_DEBUG_NVM, + "%s status %d aq %d\n", + __func__, status, hw->aq.asq_last_status); + *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); } - /* Clear status even it is not read and log */ - if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR) { + return status; +} + +/** + * i40e_nvmupd_nvm_write - Write NVM + * @hw: pointer to hardware structure + * @cmd: pointer to nvm update command buffer + * @bytes: pointer to the data buffer + * @perrno: pointer to return error code + * + * module, offset, data_size and data are in cmd structure + **/ +static int i40e_nvmupd_nvm_write(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno) +{ + struct i40e_asq_cmd_details cmd_details; + u8 module, transaction; + u8 preservation_flags; + int status = 0; + bool last; + + transaction = i40e_nvmupd_get_transaction(cmd->config); + module = i40e_nvmupd_get_module(cmd->config); + last = (transaction & I40E_NVM_LCB); + preservation_flags = i40e_nvmupd_get_preservation_flags(cmd->config); + + memset(&cmd_details, 0, sizeof(cmd_details)); + cmd_details.wb_desc = &hw->nvm_wb_desc; + + status = i40e_aq_update_nvm(hw, module, cmd->offset, + (u16)cmd->data_size, bytes, last, + preservation_flags, &cmd_details); + if (status) { i40e_debug(hw, I40E_DEBUG_NVM, - "Clearing I40E_NVMUPD_STATE_ERROR state without reading\n"); - hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; + "%s mod 0x%x off 0x%x len 0x%x\n", + __func__, module, cmd->offset, cmd->data_size); + i40e_debug(hw, I40E_DEBUG_NVM, + "%s status %d aq %d\n", + __func__, status, hw->aq.asq_last_status); + *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); } - /* Acquire lock to prevent race condition where adminq_task - * can execute after i40e_nvmupd_nvm_read/write but before state - * variables (nvm_wait_opcode, nvm_release_on_done) are updated. - * - * During NVMUpdate, it is observed that lock could be held for - * ~5ms for most commands. However lock is held for ~60ms for - * NVMUPD_CSUM_LCB command. - */ - mutex_lock(&hw->aq.arq_mutex); - switch (hw->nvmupd_state) { - case I40E_NVMUPD_STATE_INIT: - status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno); - break; + return status; +} - case I40E_NVMUPD_STATE_READING: - status = i40e_nvmupd_state_reading(hw, cmd, bytes, perrno); - break; +/** + * i40e_nvmupd_nvm_read - Read NVM + * @hw: pointer to hardware structure + * @cmd: pointer to nvm update command buffer + * @bytes: pointer to the data buffer + * @perrno: pointer to return error code + * + * cmd structure contains identifiers and data buffer + **/ +static int i40e_nvmupd_nvm_read(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno) +{ + struct i40e_asq_cmd_details cmd_details; + u8 module, transaction; + int status; + bool last; - case I40E_NVMUPD_STATE_WRITING: - status = i40e_nvmupd_state_writing(hw, cmd, bytes, perrno); - break; + transaction = i40e_nvmupd_get_transaction(cmd->config); + module = i40e_nvmupd_get_module(cmd->config); + last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA); - case I40E_NVMUPD_STATE_INIT_WAIT: - case I40E_NVMUPD_STATE_WRITE_WAIT: - /* if we need to stop waiting for an event, clear - * the wait info and return before doing anything else - */ - if (cmd->offset == 0xffff) { - i40e_nvmupd_clear_wait_state(hw); - status = 0; - break; + memset(&cmd_details, 0, sizeof(cmd_details)); + cmd_details.wb_desc = &hw->nvm_wb_desc; + + status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size, + bytes, last, &cmd_details); + if (status) { + i40e_debug(hw, I40E_DEBUG_NVM, + "%s mod 0x%x off 0x%x len 0x%x\n", + __func__, module, cmd->offset, cmd->data_size); + i40e_debug(hw, I40E_DEBUG_NVM, + "%s status %d aq %d\n", + __func__, status, hw->aq.asq_last_status); + *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); + } + + return status; +} + +/** + * i40e_nvmupd_exec_aq - Run an AQ command + * @hw: pointer to hardware structure + * @cmd: pointer to nvm update command buffer + * @bytes: pointer to the data buffer + * @perrno: pointer to return error code + * + * cmd structure contains identifiers and data buffer + **/ +static int i40e_nvmupd_exec_aq(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno) +{ + struct i40e_asq_cmd_details cmd_details; + struct i40e_aq_desc *aq_desc; + u32 buff_size = 0; + u8 *buff = NULL; + u32 aq_desc_len; + u32 aq_data_len; + int status; + + i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__); + if (cmd->offset == 0xffff) + return 0; + + memset(&cmd_details, 0, sizeof(cmd_details)); + cmd_details.wb_desc = &hw->nvm_wb_desc; + + aq_desc_len = sizeof(struct i40e_aq_desc); + memset(&hw->nvm_wb_desc, 0, aq_desc_len); + + /* get the aq descriptor */ + if (cmd->data_size < aq_desc_len) { + i40e_debug(hw, I40E_DEBUG_NVM, + "NVMUPD: not enough aq desc bytes for exec, size %d < %d\n", + cmd->data_size, aq_desc_len); + *perrno = -EINVAL; + return -EINVAL; + } + aq_desc = (struct i40e_aq_desc *)bytes; + + /* if data buffer needed, make sure it's ready */ + aq_data_len = cmd->data_size - aq_desc_len; + buff_size = max_t(u32, aq_data_len, le16_to_cpu(aq_desc->datalen)); + if (buff_size) { + if (!hw->nvm_buff.va) { + status = i40e_allocate_virt_mem(hw, &hw->nvm_buff, + hw->aq.asq_buf_size); + if (status) + i40e_debug(hw, I40E_DEBUG_NVM, + "NVMUPD: i40e_allocate_virt_mem for exec buff failed, %d\n", + status); } - status = -EBUSY; - *perrno = -EBUSY; - break; + if (hw->nvm_buff.va) { + buff = hw->nvm_buff.va; + memcpy(buff, &bytes[aq_desc_len], aq_data_len); + } + } - default: - /* invalid state, should never happen */ + if (cmd->offset) + memset(&hw->nvm_aq_event_desc, 0, aq_desc_len); + + /* and away we go! */ + status = i40e_asq_send_command(hw, aq_desc, buff, + buff_size, &cmd_details); + if (status) { i40e_debug(hw, I40E_DEBUG_NVM, - "NVMUPD: no such state %d\n", hw->nvmupd_state); - status = -EOPNOTSUPP; - *perrno = -ESRCH; - break; + "%s err %pe aq_err %s\n", + __func__, ERR_PTR(status), + i40e_aq_str(hw, hw->aq.asq_last_status)); + *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); + return status; + } + + /* should we wait for a followup event? */ + if (cmd->offset) { + hw->nvm_wait_opcode = cmd->offset; + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; } - mutex_unlock(&hw->aq.arq_mutex); return status; } /** + * i40e_nvmupd_get_aq_result - Get the results from the previous exec_aq + * @hw: pointer to hardware structure + * @cmd: pointer to nvm update command buffer + * @bytes: pointer to the data buffer + * @perrno: pointer to return error code + * + * cmd structure contains identifiers and data buffer + **/ +static int i40e_nvmupd_get_aq_result(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno) +{ + u32 aq_total_len; + u32 aq_desc_len; + int remainder; + u8 *buff; + + i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__); + + aq_desc_len = sizeof(struct i40e_aq_desc); + aq_total_len = aq_desc_len + le16_to_cpu(hw->nvm_wb_desc.datalen); + + /* check offset range */ + if (cmd->offset > aq_total_len) { + i40e_debug(hw, I40E_DEBUG_NVM, "%s: offset too big %d > %d\n", + __func__, cmd->offset, aq_total_len); + *perrno = -EINVAL; + return -EINVAL; + } + + /* check copylength range */ + if (cmd->data_size > (aq_total_len - cmd->offset)) { + int new_len = aq_total_len - cmd->offset; + + i40e_debug(hw, I40E_DEBUG_NVM, "%s: copy length %d too big, trimming to %d\n", + __func__, cmd->data_size, new_len); + cmd->data_size = new_len; + } + + remainder = cmd->data_size; + if (cmd->offset < aq_desc_len) { + u32 len = aq_desc_len - cmd->offset; + + len = min(len, cmd->data_size); + i40e_debug(hw, I40E_DEBUG_NVM, "%s: aq_desc bytes %d to %d\n", + __func__, cmd->offset, cmd->offset + len); + + buff = ((u8 *)&hw->nvm_wb_desc) + cmd->offset; + memcpy(bytes, buff, len); + + bytes += len; + remainder -= len; + buff = hw->nvm_buff.va; + } else { + buff = hw->nvm_buff.va + (cmd->offset - aq_desc_len); + } + + if (remainder > 0) { + int start_byte = buff - (u8 *)hw->nvm_buff.va; + + i40e_debug(hw, I40E_DEBUG_NVM, "%s: databuf bytes %d to %d\n", + __func__, start_byte, start_byte + remainder); + memcpy(bytes, buff, remainder); + } + + return 0; +} + +/** + * i40e_nvmupd_get_aq_event - Get the Admin Queue event from previous exec_aq + * @hw: pointer to hardware structure + * @cmd: pointer to nvm update command buffer + * @bytes: pointer to the data buffer + * @perrno: pointer to return error code + * + * cmd structure contains identifiers and data buffer + **/ +static int i40e_nvmupd_get_aq_event(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno) +{ + u32 aq_total_len; + u32 aq_desc_len; + + i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__); + + aq_desc_len = sizeof(struct i40e_aq_desc); + aq_total_len = aq_desc_len + le16_to_cpu(hw->nvm_aq_event_desc.datalen); + + /* check copylength range */ + if (cmd->data_size > aq_total_len) { + i40e_debug(hw, I40E_DEBUG_NVM, + "%s: copy length %d too big, trimming to %d\n", + __func__, cmd->data_size, aq_total_len); + cmd->data_size = aq_total_len; + } + + memcpy(bytes, &hw->nvm_aq_event_desc, cmd->data_size); + + return 0; +} + +/** * i40e_nvmupd_state_init - Handle NVM update state Init * @hw: pointer to hardware structure * @cmd: pointer to nvm update command buffer @@ -937,7 +1194,7 @@ static int i40e_nvmupd_state_init(struct i40e_hw *hw, status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); if (status) { *perrno = i40e_aq_rc_to_posix(status, - hw->aq.asq_last_status); + hw->aq.asq_last_status); } else { status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno); i40e_release_nvm(hw); @@ -948,7 +1205,7 @@ static int i40e_nvmupd_state_init(struct i40e_hw *hw, status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ); if (status) { *perrno = i40e_aq_rc_to_posix(status, - hw->aq.asq_last_status); + hw->aq.asq_last_status); } else { status = i40e_nvmupd_nvm_read(hw, cmd, bytes, perrno); if (status) @@ -962,7 +1219,7 @@ static int i40e_nvmupd_state_init(struct i40e_hw *hw, status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); if (status) { *perrno = i40e_aq_rc_to_posix(status, - hw->aq.asq_last_status); + hw->aq.asq_last_status); } else { status = i40e_nvmupd_nvm_erase(hw, cmd, perrno); if (status) { @@ -979,7 +1236,7 @@ static int i40e_nvmupd_state_init(struct i40e_hw *hw, status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); if (status) { *perrno = i40e_aq_rc_to_posix(status, - hw->aq.asq_last_status); + hw->aq.asq_last_status); } else { status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno); if (status) { @@ -996,7 +1253,7 @@ static int i40e_nvmupd_state_init(struct i40e_hw *hw, status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); if (status) { *perrno = i40e_aq_rc_to_posix(status, - hw->aq.asq_last_status); + hw->aq.asq_last_status); } else { status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno); if (status) { @@ -1012,7 +1269,7 @@ static int i40e_nvmupd_state_init(struct i40e_hw *hw, status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE); if (status) { *perrno = i40e_aq_rc_to_posix(status, - hw->aq.asq_last_status); + hw->aq.asq_last_status); } else { status = i40e_update_nvm_checksum(hw); if (status) { @@ -1185,7 +1442,7 @@ retry: * so here we try to reacquire the semaphore then retry the write. * We only do one retry, then give up. */ - if (status && (hw->aq.asq_last_status == I40E_AQ_RC_EBUSY) && + if (status && hw->aq.asq_last_status == I40E_AQ_RC_EBUSY && !retry_attempt) { u32 old_asq_status = hw->aq.asq_last_status; int old_status = status; @@ -1215,457 +1472,168 @@ retry: } /** - * i40e_nvmupd_clear_wait_state - clear wait state on hw - * @hw: pointer to the hardware structure - **/ -void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw) -{ - i40e_debug(hw, I40E_DEBUG_NVM, - "NVMUPD: clearing wait on opcode 0x%04x\n", - hw->nvm_wait_opcode); - - if (hw->nvm_release_on_done) { - i40e_release_nvm(hw); - hw->nvm_release_on_done = false; - } - hw->nvm_wait_opcode = 0; - - if (hw->aq.arq_last_status) { - hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR; - return; - } - - switch (hw->nvmupd_state) { - case I40E_NVMUPD_STATE_INIT_WAIT: - hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; - break; - - case I40E_NVMUPD_STATE_WRITE_WAIT: - hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING; - break; - - default: - break; - } -} - -/** - * i40e_nvmupd_check_wait_event - handle NVM update operation events - * @hw: pointer to the hardware structure - * @opcode: the event that just happened - * @desc: AdminQ descriptor - **/ -void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode, - struct i40e_aq_desc *desc) -{ - u32 aq_desc_len = sizeof(struct i40e_aq_desc); - - if (opcode == hw->nvm_wait_opcode) { - memcpy(&hw->nvm_aq_event_desc, desc, aq_desc_len); - i40e_nvmupd_clear_wait_state(hw); - } -} - -/** - * i40e_nvmupd_validate_command - Validate given command - * @hw: pointer to hardware structure - * @cmd: pointer to nvm update command buffer - * @perrno: pointer to return error code - * - * Return one of the valid command types or I40E_NVMUPD_INVALID - **/ -static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - int *perrno) -{ - enum i40e_nvmupd_cmd upd_cmd; - u8 module, transaction; - - /* anything that doesn't match a recognized case is an error */ - upd_cmd = I40E_NVMUPD_INVALID; - - transaction = i40e_nvmupd_get_transaction(cmd->config); - module = i40e_nvmupd_get_module(cmd->config); - - /* limits on data size */ - if ((cmd->data_size < 1) || - (cmd->data_size > I40E_NVMUPD_MAX_DATA)) { - i40e_debug(hw, I40E_DEBUG_NVM, - "i40e_nvmupd_validate_command data_size %d\n", - cmd->data_size); - *perrno = -EFAULT; - return I40E_NVMUPD_INVALID; - } - - switch (cmd->command) { - case I40E_NVM_READ: - switch (transaction) { - case I40E_NVM_CON: - upd_cmd = I40E_NVMUPD_READ_CON; - break; - case I40E_NVM_SNT: - upd_cmd = I40E_NVMUPD_READ_SNT; - break; - case I40E_NVM_LCB: - upd_cmd = I40E_NVMUPD_READ_LCB; - break; - case I40E_NVM_SA: - upd_cmd = I40E_NVMUPD_READ_SA; - break; - case I40E_NVM_EXEC: - if (module == 0xf) - upd_cmd = I40E_NVMUPD_STATUS; - else if (module == 0) - upd_cmd = I40E_NVMUPD_GET_AQ_RESULT; - break; - case I40E_NVM_AQE: - upd_cmd = I40E_NVMUPD_GET_AQ_EVENT; - break; - } - break; - - case I40E_NVM_WRITE: - switch (transaction) { - case I40E_NVM_CON: - upd_cmd = I40E_NVMUPD_WRITE_CON; - break; - case I40E_NVM_SNT: - upd_cmd = I40E_NVMUPD_WRITE_SNT; - break; - case I40E_NVM_LCB: - upd_cmd = I40E_NVMUPD_WRITE_LCB; - break; - case I40E_NVM_SA: - upd_cmd = I40E_NVMUPD_WRITE_SA; - break; - case I40E_NVM_ERA: - upd_cmd = I40E_NVMUPD_WRITE_ERA; - break; - case I40E_NVM_CSUM: - upd_cmd = I40E_NVMUPD_CSUM_CON; - break; - case (I40E_NVM_CSUM|I40E_NVM_SA): - upd_cmd = I40E_NVMUPD_CSUM_SA; - break; - case (I40E_NVM_CSUM|I40E_NVM_LCB): - upd_cmd = I40E_NVMUPD_CSUM_LCB; - break; - case I40E_NVM_EXEC: - if (module == 0) - upd_cmd = I40E_NVMUPD_EXEC_AQ; - break; - } - break; - } - - return upd_cmd; -} - -/** - * i40e_nvmupd_exec_aq - Run an AQ command + * i40e_nvmupd_command - Process an NVM update command * @hw: pointer to hardware structure - * @cmd: pointer to nvm update command buffer + * @cmd: pointer to nvm update command * @bytes: pointer to the data buffer * @perrno: pointer to return error code * - * cmd structure contains identifiers and data buffer + * Dispatches command depending on what update state is current **/ -static int i40e_nvmupd_exec_aq(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - u8 *bytes, int *perrno) +int i40e_nvmupd_command(struct i40e_hw *hw, + struct i40e_nvm_access *cmd, + u8 *bytes, int *perrno) { - struct i40e_asq_cmd_details cmd_details; - struct i40e_aq_desc *aq_desc; - u32 buff_size = 0; - u8 *buff = NULL; - u32 aq_desc_len; - u32 aq_data_len; + enum i40e_nvmupd_cmd upd_cmd; int status; - i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__); - if (cmd->offset == 0xffff) - return 0; + /* assume success */ + *perrno = 0; - memset(&cmd_details, 0, sizeof(cmd_details)); - cmd_details.wb_desc = &hw->nvm_wb_desc; + /* early check for status command and debug msgs */ + upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno); - aq_desc_len = sizeof(struct i40e_aq_desc); - memset(&hw->nvm_wb_desc, 0, aq_desc_len); + i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d opc 0x%04x cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n", + i40e_nvm_update_state_str[upd_cmd], + hw->nvmupd_state, + hw->nvm_release_on_done, hw->nvm_wait_opcode, + cmd->command, cmd->config, cmd->offset, cmd->data_size); - /* get the aq descriptor */ - if (cmd->data_size < aq_desc_len) { + if (upd_cmd == I40E_NVMUPD_INVALID) { + *perrno = -EFAULT; i40e_debug(hw, I40E_DEBUG_NVM, - "NVMUPD: not enough aq desc bytes for exec, size %d < %d\n", - cmd->data_size, aq_desc_len); - *perrno = -EINVAL; - return -EINVAL; + "i40e_nvmupd_validate_command returns %d errno %d\n", + upd_cmd, *perrno); } - aq_desc = (struct i40e_aq_desc *)bytes; - /* if data buffer needed, make sure it's ready */ - aq_data_len = cmd->data_size - aq_desc_len; - buff_size = max_t(u32, aq_data_len, le16_to_cpu(aq_desc->datalen)); - if (buff_size) { - if (!hw->nvm_buff.va) { - status = i40e_allocate_virt_mem(hw, &hw->nvm_buff, - hw->aq.asq_buf_size); - if (status) - i40e_debug(hw, I40E_DEBUG_NVM, - "NVMUPD: i40e_allocate_virt_mem for exec buff failed, %d\n", - status); - } - - if (hw->nvm_buff.va) { - buff = hw->nvm_buff.va; - memcpy(buff, &bytes[aq_desc_len], aq_data_len); + /* a status request returns immediately rather than + * going into the state machine + */ + if (upd_cmd == I40E_NVMUPD_STATUS) { + if (!cmd->data_size) { + *perrno = -EFAULT; + return -EINVAL; } - } - - if (cmd->offset) - memset(&hw->nvm_aq_event_desc, 0, aq_desc_len); - - /* and away we go! */ - status = i40e_asq_send_command(hw, aq_desc, buff, - buff_size, &cmd_details); - if (status) { - i40e_debug(hw, I40E_DEBUG_NVM, - "%s err %pe aq_err %s\n", - __func__, ERR_PTR(status), - i40e_aq_str(hw, hw->aq.asq_last_status)); - *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); - return status; - } - - /* should we wait for a followup event? */ - if (cmd->offset) { - hw->nvm_wait_opcode = cmd->offset; - hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; - } - - return status; -} -/** - * i40e_nvmupd_get_aq_result - Get the results from the previous exec_aq - * @hw: pointer to hardware structure - * @cmd: pointer to nvm update command buffer - * @bytes: pointer to the data buffer - * @perrno: pointer to return error code - * - * cmd structure contains identifiers and data buffer - **/ -static int i40e_nvmupd_get_aq_result(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - u8 *bytes, int *perrno) -{ - u32 aq_total_len; - u32 aq_desc_len; - int remainder; - u8 *buff; - - i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__); - - aq_desc_len = sizeof(struct i40e_aq_desc); - aq_total_len = aq_desc_len + le16_to_cpu(hw->nvm_wb_desc.datalen); + bytes[0] = hw->nvmupd_state; - /* check offset range */ - if (cmd->offset > aq_total_len) { - i40e_debug(hw, I40E_DEBUG_NVM, "%s: offset too big %d > %d\n", - __func__, cmd->offset, aq_total_len); - *perrno = -EINVAL; - return -EINVAL; - } + if (cmd->data_size >= 4) { + bytes[1] = 0; + *((u16 *)&bytes[2]) = hw->nvm_wait_opcode; + } - /* check copylength range */ - if (cmd->data_size > (aq_total_len - cmd->offset)) { - int new_len = aq_total_len - cmd->offset; + /* Clear error status on read */ + if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR) + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; - i40e_debug(hw, I40E_DEBUG_NVM, "%s: copy length %d too big, trimming to %d\n", - __func__, cmd->data_size, new_len); - cmd->data_size = new_len; + return 0; } - remainder = cmd->data_size; - if (cmd->offset < aq_desc_len) { - u32 len = aq_desc_len - cmd->offset; - - len = min(len, cmd->data_size); - i40e_debug(hw, I40E_DEBUG_NVM, "%s: aq_desc bytes %d to %d\n", - __func__, cmd->offset, cmd->offset + len); - - buff = ((u8 *)&hw->nvm_wb_desc) + cmd->offset; - memcpy(bytes, buff, len); - - bytes += len; - remainder -= len; - buff = hw->nvm_buff.va; - } else { - buff = hw->nvm_buff.va + (cmd->offset - aq_desc_len); + /* Clear status even it is not read and log */ + if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR) { + i40e_debug(hw, I40E_DEBUG_NVM, + "Clearing I40E_NVMUPD_STATE_ERROR state without reading\n"); + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; } - if (remainder > 0) { - int start_byte = buff - (u8 *)hw->nvm_buff.va; - - i40e_debug(hw, I40E_DEBUG_NVM, "%s: databuf bytes %d to %d\n", - __func__, start_byte, start_byte + remainder); - memcpy(bytes, buff, remainder); - } + /* Acquire lock to prevent race condition where adminq_task + * can execute after i40e_nvmupd_nvm_read/write but before state + * variables (nvm_wait_opcode, nvm_release_on_done) are updated. + * + * During NVMUpdate, it is observed that lock could be held for + * ~5ms for most commands. However lock is held for ~60ms for + * NVMUPD_CSUM_LCB command. + */ + mutex_lock(&hw->aq.arq_mutex); + switch (hw->nvmupd_state) { + case I40E_NVMUPD_STATE_INIT: + status = i40e_nvmupd_state_init(hw, cmd, bytes, perrno); + break; - return 0; -} + case I40E_NVMUPD_STATE_READING: + status = i40e_nvmupd_state_reading(hw, cmd, bytes, perrno); + break; -/** - * i40e_nvmupd_get_aq_event - Get the Admin Queue event from previous exec_aq - * @hw: pointer to hardware structure - * @cmd: pointer to nvm update command buffer - * @bytes: pointer to the data buffer - * @perrno: pointer to return error code - * - * cmd structure contains identifiers and data buffer - **/ -static int i40e_nvmupd_get_aq_event(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - u8 *bytes, int *perrno) -{ - u32 aq_total_len; - u32 aq_desc_len; + case I40E_NVMUPD_STATE_WRITING: + status = i40e_nvmupd_state_writing(hw, cmd, bytes, perrno); + break; - i40e_debug(hw, I40E_DEBUG_NVM, "NVMUPD: %s\n", __func__); + case I40E_NVMUPD_STATE_INIT_WAIT: + case I40E_NVMUPD_STATE_WRITE_WAIT: + /* if we need to stop waiting for an event, clear + * the wait info and return before doing anything else + */ + if (cmd->offset == 0xffff) { + i40e_nvmupd_clear_wait_state(hw); + status = 0; + break; + } - aq_desc_len = sizeof(struct i40e_aq_desc); - aq_total_len = aq_desc_len + le16_to_cpu(hw->nvm_aq_event_desc.datalen); + status = -EBUSY; + *perrno = -EBUSY; + break; - /* check copylength range */ - if (cmd->data_size > aq_total_len) { + default: + /* invalid state, should never happen */ i40e_debug(hw, I40E_DEBUG_NVM, - "%s: copy length %d too big, trimming to %d\n", - __func__, cmd->data_size, aq_total_len); - cmd->data_size = aq_total_len; + "NVMUPD: no such state %d\n", hw->nvmupd_state); + status = -EOPNOTSUPP; + *perrno = -ESRCH; + break; } - memcpy(bytes, &hw->nvm_aq_event_desc, cmd->data_size); - - return 0; + mutex_unlock(&hw->aq.arq_mutex); + return status; } /** - * i40e_nvmupd_nvm_read - Read NVM - * @hw: pointer to hardware structure - * @cmd: pointer to nvm update command buffer - * @bytes: pointer to the data buffer - * @perrno: pointer to return error code - * - * cmd structure contains identifiers and data buffer + * i40e_nvmupd_clear_wait_state - clear wait state on hw + * @hw: pointer to the hardware structure **/ -static int i40e_nvmupd_nvm_read(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - u8 *bytes, int *perrno) +void i40e_nvmupd_clear_wait_state(struct i40e_hw *hw) { - struct i40e_asq_cmd_details cmd_details; - u8 module, transaction; - int status; - bool last; - - transaction = i40e_nvmupd_get_transaction(cmd->config); - module = i40e_nvmupd_get_module(cmd->config); - last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA); - - memset(&cmd_details, 0, sizeof(cmd_details)); - cmd_details.wb_desc = &hw->nvm_wb_desc; + i40e_debug(hw, I40E_DEBUG_NVM, + "NVMUPD: clearing wait on opcode 0x%04x\n", + hw->nvm_wait_opcode); - status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size, - bytes, last, &cmd_details); - if (status) { - i40e_debug(hw, I40E_DEBUG_NVM, - "i40e_nvmupd_nvm_read mod 0x%x off 0x%x len 0x%x\n", - module, cmd->offset, cmd->data_size); - i40e_debug(hw, I40E_DEBUG_NVM, - "i40e_nvmupd_nvm_read status %d aq %d\n", - status, hw->aq.asq_last_status); - *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); + if (hw->nvm_release_on_done) { + i40e_release_nvm(hw); + hw->nvm_release_on_done = false; } + hw->nvm_wait_opcode = 0; - return status; -} - -/** - * i40e_nvmupd_nvm_erase - Erase an NVM module - * @hw: pointer to hardware structure - * @cmd: pointer to nvm update command buffer - * @perrno: pointer to return error code - * - * module, offset, data_size and data are in cmd structure - **/ -static int i40e_nvmupd_nvm_erase(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - int *perrno) -{ - struct i40e_asq_cmd_details cmd_details; - u8 module, transaction; - int status = 0; - bool last; + if (hw->aq.arq_last_status) { + hw->nvmupd_state = I40E_NVMUPD_STATE_ERROR; + return; + } - transaction = i40e_nvmupd_get_transaction(cmd->config); - module = i40e_nvmupd_get_module(cmd->config); - last = (transaction & I40E_NVM_LCB); + switch (hw->nvmupd_state) { + case I40E_NVMUPD_STATE_INIT_WAIT: + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; + break; - memset(&cmd_details, 0, sizeof(cmd_details)); - cmd_details.wb_desc = &hw->nvm_wb_desc; + case I40E_NVMUPD_STATE_WRITE_WAIT: + hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING; + break; - status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size, - last, &cmd_details); - if (status) { - i40e_debug(hw, I40E_DEBUG_NVM, - "i40e_nvmupd_nvm_erase mod 0x%x off 0x%x len 0x%x\n", - module, cmd->offset, cmd->data_size); - i40e_debug(hw, I40E_DEBUG_NVM, - "i40e_nvmupd_nvm_erase status %d aq %d\n", - status, hw->aq.asq_last_status); - *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); + default: + break; } - - return status; } /** - * i40e_nvmupd_nvm_write - Write NVM - * @hw: pointer to hardware structure - * @cmd: pointer to nvm update command buffer - * @bytes: pointer to the data buffer - * @perrno: pointer to return error code - * - * module, offset, data_size and data are in cmd structure + * i40e_nvmupd_check_wait_event - handle NVM update operation events + * @hw: pointer to the hardware structure + * @opcode: the event that just happened + * @desc: AdminQ descriptor **/ -static int i40e_nvmupd_nvm_write(struct i40e_hw *hw, - struct i40e_nvm_access *cmd, - u8 *bytes, int *perrno) +void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode, + struct i40e_aq_desc *desc) { - struct i40e_asq_cmd_details cmd_details; - u8 module, transaction; - u8 preservation_flags; - int status = 0; - bool last; - - transaction = i40e_nvmupd_get_transaction(cmd->config); - module = i40e_nvmupd_get_module(cmd->config); - last = (transaction & I40E_NVM_LCB); - preservation_flags = i40e_nvmupd_get_preservation_flags(cmd->config); - - memset(&cmd_details, 0, sizeof(cmd_details)); - cmd_details.wb_desc = &hw->nvm_wb_desc; + u32 aq_desc_len = sizeof(struct i40e_aq_desc); - status = i40e_aq_update_nvm(hw, module, cmd->offset, - (u16)cmd->data_size, bytes, last, - preservation_flags, &cmd_details); - if (status) { - i40e_debug(hw, I40E_DEBUG_NVM, - "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n", - module, cmd->offset, cmd->data_size); - i40e_debug(hw, I40E_DEBUG_NVM, - "i40e_nvmupd_nvm_write status %d aq %d\n", - status, hw->aq.asq_last_status); - *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); + if (opcode == hw->nvm_wait_opcode) { + memcpy(&hw->nvm_aq_event_desc, desc, aq_desc_len); + i40e_nvmupd_clear_wait_state(hw); } - - return status; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_prototype.h b/drivers/net/ethernet/intel/i40e/i40e_prototype.h index ce1f11b8ad..5a0699ca7c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_prototype.h +++ b/drivers/net/ethernet/intel/i40e/i40e_prototype.h @@ -371,13 +371,6 @@ void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status); int i40e_set_mac_type(struct i40e_hw *hw); -extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[]; - -static inline struct i40e_rx_ptype_decoded decode_rx_desc_ptype(u8 ptype) -{ - return i40e_ptype_lookup[ptype]; -} - /** * i40e_virtchnl_link_speed - Convert AdminQ link_speed to virtchnl definition * @link_speed: the speed to convert diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index e7ebcb09f2..b72a4b5d76 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c @@ -1472,7 +1472,8 @@ void i40e_ptp_restore_hw_time(struct i40e_pf *pf) **/ void i40e_ptp_init(struct i40e_pf *pf) { - struct net_device *netdev = pf->vsi[pf->lan_vsi]->netdev; + struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf); + struct net_device *netdev = vsi->netdev; struct i40e_hw *hw = &pf->hw; u32 pf_id; long err; @@ -1536,6 +1537,7 @@ void i40e_ptp_init(struct i40e_pf *pf) **/ void i40e_ptp_stop(struct i40e_pf *pf) { + struct i40e_vsi *main_vsi = i40e_pf_get_main_vsi(pf); struct i40e_hw *hw = &pf->hw; u32 regval; @@ -1555,7 +1557,7 @@ void i40e_ptp_stop(struct i40e_pf *pf) ptp_clock_unregister(pf->ptp_clock); pf->ptp_clock = NULL; dev_info(&pf->pdev->dev, "%s: removed PHC on %s\n", __func__, - pf->vsi[pf->lan_vsi]->netdev->name); + main_vsi->netdev->name); } if (i40e_is_ptp_pin_dev(&pf->hw)) { diff --git a/drivers/net/ethernet/intel/i40e/i40e_trace.h b/drivers/net/ethernet/intel/i40e/i40e_trace.h index 33b4e30f5e..759f3d1c4c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_trace.h +++ b/drivers/net/ethernet/intel/i40e/i40e_trace.h @@ -89,8 +89,8 @@ TRACE_EVENT(i40e_napi_poll, __entry->tx_clean_complete = tx_clean_complete; __entry->irq_num = q->irq_num; __entry->curr_cpu = get_cpu(); - __assign_str(qname, q->name); - __assign_str(dev_name, napi->dev ? napi->dev->name : NO_DEV); + __assign_str(qname); + __assign_str(dev_name); __assign_bitmask(irq_affinity, cpumask_bits(&q->affinity_mask), nr_cpumask_bits); ), @@ -132,7 +132,7 @@ DECLARE_EVENT_CLASS( __entry->ring = ring; __entry->desc = desc; __entry->buf = buf; - __assign_str(devname, ring->netdev->name); + __assign_str(devname); ), TP_printk( @@ -177,7 +177,7 @@ DECLARE_EVENT_CLASS( __entry->ring = ring; __entry->desc = desc; __entry->xdp = xdp; - __assign_str(devname, ring->netdev->name); + __assign_str(devname); ), TP_printk( @@ -219,7 +219,7 @@ DECLARE_EVENT_CLASS( TP_fast_assign( __entry->skb = skb; __entry->ring = ring; - __assign_str(devname, ring->netdev->name); + __assign_str(devname); ), TP_printk( diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 1a12b73281..c006f716a3 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -2,6 +2,7 @@ /* Copyright(c) 2013 - 2018 Intel Corporation. */ #include <linux/bpf_trace.h> +#include <linux/net/intel/libie/rx.h> #include <linux/prefetch.h> #include <linux/sctp.h> #include <net/mpls.h> @@ -23,7 +24,7 @@ static void i40e_fdir(struct i40e_ring *tx_ring, { struct i40e_filter_program_desc *fdir_desc; struct i40e_pf *pf = tx_ring->vsi->back; - u32 flex_ptype, dtype_cmd; + u32 flex_ptype, dtype_cmd, vsi_id; u16 i; /* grab the next descriptor */ @@ -41,8 +42,8 @@ static void i40e_fdir(struct i40e_ring *tx_ring, flex_ptype |= FIELD_PREP(I40E_TXD_FLTR_QW0_PCTYPE_MASK, fdata->pctype); /* Use LAN VSI Id if not programmed by user */ - flex_ptype |= FIELD_PREP(I40E_TXD_FLTR_QW0_DEST_VSI_MASK, - fdata->dest_vsi ? : pf->vsi[pf->lan_vsi]->id); + vsi_id = fdata->dest_vsi ? : i40e_pf_get_main_vsi(pf)->id; + flex_ptype |= FIELD_PREP(I40E_TXD_FLTR_QW0_DEST_VSI_MASK, vsi_id); dtype_cmd = I40E_TX_DESC_DTYPE_FILTER_PROG; @@ -860,13 +861,15 @@ u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw) /** * i40e_detect_recover_hung - Function to detect and recover hung_queues - * @vsi: pointer to vsi struct with tx queues + * @pf: pointer to PF struct * - * VSI has netdev and netdev has TX queues. This function is to check each of - * those TX queues if they are hung, trigger recovery by issuing SW interrupt. + * LAN VSI has netdev and netdev has TX queues. This function is to check + * each of those TX queues if they are hung, trigger recovery by issuing + * SW interrupt. **/ -void i40e_detect_recover_hung(struct i40e_vsi *vsi) +void i40e_detect_recover_hung(struct i40e_pf *pf) { + struct i40e_vsi *vsi = i40e_pf_get_main_vsi(pf); struct i40e_ring *tx_ring = NULL; struct net_device *netdev; unsigned int i; @@ -1741,38 +1744,30 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, struct sk_buff *skb, union i40e_rx_desc *rx_desc) { - struct i40e_rx_ptype_decoded decoded; + struct libeth_rx_pt decoded; u32 rx_error, rx_status; bool ipv4, ipv6; u8 ptype; u64 qword; - qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); - ptype = FIELD_GET(I40E_RXD_QW1_PTYPE_MASK, qword); - rx_error = FIELD_GET(I40E_RXD_QW1_ERROR_MASK, qword); - rx_status = FIELD_GET(I40E_RXD_QW1_STATUS_MASK, qword); - decoded = decode_rx_desc_ptype(ptype); - skb->ip_summed = CHECKSUM_NONE; - skb_checksum_none_assert(skb); + qword = le64_to_cpu(rx_desc->wb.qword1.status_error_len); + ptype = FIELD_GET(I40E_RXD_QW1_PTYPE_MASK, qword); - /* Rx csum enabled and ip headers found? */ - if (!(vsi->netdev->features & NETIF_F_RXCSUM)) + decoded = libie_rx_pt_parse(ptype); + if (!libeth_rx_pt_has_checksum(vsi->netdev, decoded)) return; + rx_error = FIELD_GET(I40E_RXD_QW1_ERROR_MASK, qword); + rx_status = FIELD_GET(I40E_RXD_QW1_STATUS_MASK, qword); + /* did the hardware decode the packet and checksum? */ if (!(rx_status & BIT(I40E_RX_DESC_STATUS_L3L4P_SHIFT))) return; - /* both known and outer_ip must be set for the below code to work */ - if (!(decoded.known && decoded.outer_ip)) - return; - - ipv4 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) && - (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV4); - ipv6 = (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP) && - (decoded.outer_ip_ver == I40E_RX_PTYPE_OUTER_IPV6); + ipv4 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV4; + ipv6 = libeth_rx_pt_get_ip_ver(decoded) == LIBETH_RX_PT_OUTER_IPV6; if (ipv4 && (rx_error & (BIT(I40E_RX_DESC_ERROR_IPE_SHIFT) | @@ -1800,20 +1795,10 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, * we need to bump the checksum level by 1 to reflect the fact that * we are indicating we validated the inner checksum. */ - if (decoded.tunnel_type >= I40E_RX_PTYPE_TUNNEL_IP_GRENAT) + if (decoded.tunnel_type >= LIBETH_RX_PT_TUNNEL_IP_GRENAT) skb->csum_level = 1; - /* Only report checksum unnecessary for TCP, UDP, or SCTP */ - switch (decoded.inner_prot) { - case I40E_RX_PTYPE_INNER_PROT_TCP: - case I40E_RX_PTYPE_INNER_PROT_UDP: - case I40E_RX_PTYPE_INNER_PROT_SCTP: - skb->ip_summed = CHECKSUM_UNNECESSARY; - fallthrough; - default: - break; - } - + skb->ip_summed = CHECKSUM_UNNECESSARY; return; checksum_fail: @@ -1821,29 +1806,6 @@ checksum_fail: } /** - * i40e_ptype_to_htype - get a hash type - * @ptype: the ptype value from the descriptor - * - * Returns a hash type to be used by skb_set_hash - **/ -static inline int i40e_ptype_to_htype(u8 ptype) -{ - struct i40e_rx_ptype_decoded decoded = decode_rx_desc_ptype(ptype); - - if (!decoded.known) - return PKT_HASH_TYPE_NONE; - - if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP && - decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4) - return PKT_HASH_TYPE_L4; - else if (decoded.outer_ip == I40E_RX_PTYPE_OUTER_IP && - decoded.payload_layer == I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3) - return PKT_HASH_TYPE_L3; - else - return PKT_HASH_TYPE_L2; -} - -/** * i40e_rx_hash - set the hash value in the skb * @ring: descriptor ring * @rx_desc: specific descriptor @@ -1855,17 +1817,19 @@ static inline void i40e_rx_hash(struct i40e_ring *ring, struct sk_buff *skb, u8 rx_ptype) { + struct libeth_rx_pt decoded; u32 hash; const __le64 rss_mask = cpu_to_le64((u64)I40E_RX_DESC_FLTSTAT_RSS_HASH << I40E_RX_DESC_STATUS_FLTSTAT_SHIFT); - if (!(ring->netdev->features & NETIF_F_RXHASH)) + decoded = libie_rx_pt_parse(rx_ptype); + if (!libeth_rx_pt_has_hash(ring->netdev, decoded)) return; if ((rx_desc->wb.qword1.status_error_len & rss_mask) == rss_mask) { hash = le32_to_cpu(rx_desc->wb.qword0.hi_dword.rss); - skb_set_hash(skb, hash, i40e_ptype_to_htype(rx_ptype)); + libeth_rx_pt_set_hash(skb, hash, decoded); } } @@ -2144,9 +2108,7 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring, */ /* allocate a skb to store the frags */ - skb = __napi_alloc_skb(&rx_ring->q_vector->napi, - I40E_RX_HDR_SIZE, - GFP_ATOMIC | __GFP_NOWARN); + skb = napi_alloc_skb(&rx_ring->q_vector->napi, I40E_RX_HDR_SIZE); if (unlikely(!skb)) return NULL; diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index 2cdc7de630..7c26c9a2bf 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -470,7 +470,7 @@ void i40e_free_rx_resources(struct i40e_ring *rx_ring); int i40e_napi_poll(struct napi_struct *napi, int budget); void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector); u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw); -void i40e_detect_recover_hung(struct i40e_vsi *vsi); +void i40e_detect_recover_hung(struct i40e_pf *pf); int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size); bool __i40e_chk_linearize(struct sk_buff *skb); int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index d903149969..28568e1268 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -745,94 +745,6 @@ enum i40e_rx_desc_error_l3l4e_fcoe_masks { #define I40E_RXD_QW1_PTYPE_SHIFT 30 #define I40E_RXD_QW1_PTYPE_MASK (0xFFULL << I40E_RXD_QW1_PTYPE_SHIFT) -/* Packet type non-ip values */ -enum i40e_rx_l2_ptype { - I40E_RX_PTYPE_L2_RESERVED = 0, - I40E_RX_PTYPE_L2_MAC_PAY2 = 1, - I40E_RX_PTYPE_L2_TIMESYNC_PAY2 = 2, - I40E_RX_PTYPE_L2_FIP_PAY2 = 3, - I40E_RX_PTYPE_L2_OUI_PAY2 = 4, - I40E_RX_PTYPE_L2_MACCNTRL_PAY2 = 5, - I40E_RX_PTYPE_L2_LLDP_PAY2 = 6, - I40E_RX_PTYPE_L2_ECP_PAY2 = 7, - I40E_RX_PTYPE_L2_EVB_PAY2 = 8, - I40E_RX_PTYPE_L2_QCN_PAY2 = 9, - I40E_RX_PTYPE_L2_EAPOL_PAY2 = 10, - I40E_RX_PTYPE_L2_ARP = 11, - I40E_RX_PTYPE_L2_FCOE_PAY3 = 12, - I40E_RX_PTYPE_L2_FCOE_FCDATA_PAY3 = 13, - I40E_RX_PTYPE_L2_FCOE_FCRDY_PAY3 = 14, - I40E_RX_PTYPE_L2_FCOE_FCRSP_PAY3 = 15, - I40E_RX_PTYPE_L2_FCOE_FCOTHER_PA = 16, - I40E_RX_PTYPE_L2_FCOE_VFT_PAY3 = 17, - I40E_RX_PTYPE_L2_FCOE_VFT_FCDATA = 18, - I40E_RX_PTYPE_L2_FCOE_VFT_FCRDY = 19, - I40E_RX_PTYPE_L2_FCOE_VFT_FCRSP = 20, - I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER = 21, - I40E_RX_PTYPE_GRENAT4_MAC_PAY3 = 58, - I40E_RX_PTYPE_GRENAT4_MACVLAN_IPV6_ICMP_PAY4 = 87, - I40E_RX_PTYPE_GRENAT6_MAC_PAY3 = 124, - I40E_RX_PTYPE_GRENAT6_MACVLAN_IPV6_ICMP_PAY4 = 153 -}; - -struct i40e_rx_ptype_decoded { - u32 known:1; - u32 outer_ip:1; - u32 outer_ip_ver:1; - u32 outer_frag:1; - u32 tunnel_type:3; - u32 tunnel_end_prot:2; - u32 tunnel_end_frag:1; - u32 inner_prot:4; - u32 payload_layer:3; -}; - -enum i40e_rx_ptype_outer_ip { - I40E_RX_PTYPE_OUTER_L2 = 0, - I40E_RX_PTYPE_OUTER_IP = 1 -}; - -enum i40e_rx_ptype_outer_ip_ver { - I40E_RX_PTYPE_OUTER_NONE = 0, - I40E_RX_PTYPE_OUTER_IPV4 = 0, - I40E_RX_PTYPE_OUTER_IPV6 = 1 -}; - -enum i40e_rx_ptype_outer_fragmented { - I40E_RX_PTYPE_NOT_FRAG = 0, - I40E_RX_PTYPE_FRAG = 1 -}; - -enum i40e_rx_ptype_tunnel_type { - I40E_RX_PTYPE_TUNNEL_NONE = 0, - I40E_RX_PTYPE_TUNNEL_IP_IP = 1, - I40E_RX_PTYPE_TUNNEL_IP_GRENAT = 2, - I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC = 3, - I40E_RX_PTYPE_TUNNEL_IP_GRENAT_MAC_VLAN = 4, -}; - -enum i40e_rx_ptype_tunnel_end_prot { - I40E_RX_PTYPE_TUNNEL_END_NONE = 0, - I40E_RX_PTYPE_TUNNEL_END_IPV4 = 1, - I40E_RX_PTYPE_TUNNEL_END_IPV6 = 2, -}; - -enum i40e_rx_ptype_inner_prot { - I40E_RX_PTYPE_INNER_PROT_NONE = 0, - I40E_RX_PTYPE_INNER_PROT_UDP = 1, - I40E_RX_PTYPE_INNER_PROT_TCP = 2, - I40E_RX_PTYPE_INNER_PROT_SCTP = 3, - I40E_RX_PTYPE_INNER_PROT_ICMP = 4, - I40E_RX_PTYPE_INNER_PROT_TIMESYNC = 5 -}; - -enum i40e_rx_ptype_payload_layer { - I40E_RX_PTYPE_PAYLOAD_LAYER_NONE = 0, - I40E_RX_PTYPE_PAYLOAD_LAYER_PAY2 = 1, - I40E_RX_PTYPE_PAYLOAD_LAYER_PAY3 = 2, - I40E_RX_PTYPE_PAYLOAD_LAYER_PAY4 = 3, -}; - #define I40E_RXD_QW1_LENGTH_PBUF_SHIFT 38 #define I40E_RXD_QW1_LENGTH_PBUF_MASK (0x3FFFULL << \ I40E_RXD_QW1_LENGTH_PBUF_SHIFT) diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 232b65b9c8..662622f01e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -795,13 +795,13 @@ error_param: static int i40e_alloc_vsi_res(struct i40e_vf *vf, u8 idx) { struct i40e_mac_filter *f = NULL; + struct i40e_vsi *main_vsi, *vsi; struct i40e_pf *pf = vf->pf; - struct i40e_vsi *vsi; u64 max_tx_rate = 0; int ret = 0; - vsi = i40e_vsi_setup(pf, I40E_VSI_SRIOV, pf->vsi[pf->lan_vsi]->seid, - vf->vf_id); + main_vsi = i40e_pf_get_main_vsi(pf); + vsi = i40e_vsi_setup(pf, I40E_VSI_SRIOV, main_vsi->seid, vf->vf_id); if (!vsi) { dev_err(&pf->pdev->dev, @@ -3322,8 +3322,9 @@ error_param: static int i40e_vc_rdma_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) { struct i40e_pf *pf = vf->pf; - int abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id; + struct i40e_vsi *main_vsi; int aq_ret = 0; + int abs_vf_id; if (!test_bit(I40E_VF_STATE_ACTIVE, &vf->vf_states) || !test_bit(I40E_VF_STATE_RDMAENA, &vf->vf_states)) { @@ -3331,8 +3332,9 @@ static int i40e_vc_rdma_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) goto error_param; } - i40e_notify_client_of_vf_msg(pf->vsi[pf->lan_vsi], abs_vf_id, - msg, msglen); + main_vsi = i40e_pf_get_main_vsi(pf); + abs_vf_id = vf->vf_id + pf->hw.func_caps.vf_base_id; + i40e_notify_client_of_vf_msg(main_vsi, abs_vf_id, msg, msglen); error_param: /* send the response to the VF */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c index 11500003af..4e885df789 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c @@ -301,8 +301,7 @@ static struct sk_buff *i40e_construct_skb_zc(struct i40e_ring *rx_ring, net_prefetch(xdp->data_meta); /* allocate a skb to store the frags */ - skb = __napi_alloc_skb(&rx_ring->q_vector->napi, totalsize, - GFP_ATOMIC | __GFP_NOWARN); + skb = napi_alloc_skb(&rx_ring->q_vector->napi, totalsize); if (unlikely(!skb)) goto out; @@ -483,7 +482,7 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget) bi = *i40e_rx_bi(rx_ring, next_to_process); xsk_buff_set_size(bi, size); - xsk_buff_dma_sync_for_cpu(bi, rx_ring->xsk_pool); + xsk_buff_dma_sync_for_cpu(bi); if (!first) first = bi; |