diff options
Diffstat (limited to 'drivers/net/ethernet/aquantia/atlantic/hw_atl2')
9 files changed, 3115 insertions, 0 deletions
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c new file mode 100644 index 000000000..5dfc75157 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.c @@ -0,0 +1,863 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Atlantic Network Driver + * Copyright (C) 2020 Marvell International Ltd. + */ + +#include "aq_hw.h" +#include "aq_hw_utils.h" +#include "aq_ring.h" +#include "aq_nic.h" +#include "hw_atl/hw_atl_b0.h" +#include "hw_atl/hw_atl_utils.h" +#include "hw_atl/hw_atl_llh.h" +#include "hw_atl/hw_atl_llh_internal.h" +#include "hw_atl2_utils.h" +#include "hw_atl2_llh.h" +#include "hw_atl2_internal.h" +#include "hw_atl2_llh_internal.h" + +static int hw_atl2_act_rslvr_table_set(struct aq_hw_s *self, u8 location, + u32 tag, u32 mask, u32 action); + +#define DEFAULT_BOARD_BASIC_CAPABILITIES \ + .is_64_dma = true, \ + .op64bit = true, \ + .msix_irqs = 8U, \ + .irq_mask = ~0U, \ + .vecs = HW_ATL2_RSS_MAX, \ + .tcs_max = HW_ATL2_TC_MAX, \ + .rxd_alignment = 1U, \ + .rxd_size = HW_ATL2_RXD_SIZE, \ + .rxds_max = HW_ATL2_MAX_RXD, \ + .rxds_min = HW_ATL2_MIN_RXD, \ + .txd_alignment = 1U, \ + .txd_size = HW_ATL2_TXD_SIZE, \ + .txds_max = HW_ATL2_MAX_TXD, \ + .txds_min = HW_ATL2_MIN_TXD, \ + .txhwb_alignment = 4096U, \ + .tx_rings = HW_ATL2_TX_RINGS, \ + .rx_rings = HW_ATL2_RX_RINGS, \ + .hw_features = NETIF_F_HW_CSUM | \ + NETIF_F_RXCSUM | \ + NETIF_F_RXHASH | \ + NETIF_F_SG | \ + NETIF_F_TSO | \ + NETIF_F_TSO6 | \ + NETIF_F_LRO | \ + NETIF_F_NTUPLE | \ + NETIF_F_HW_VLAN_CTAG_FILTER | \ + NETIF_F_HW_VLAN_CTAG_RX | \ + NETIF_F_HW_VLAN_CTAG_TX | \ + NETIF_F_GSO_UDP_L4 | \ + NETIF_F_GSO_PARTIAL | \ + NETIF_F_HW_TC, \ + .hw_priv_flags = IFF_UNICAST_FLT, \ + .flow_control = true, \ + .mtu = HW_ATL2_MTU_JUMBO, \ + .mac_regs_count = 72, \ + .hw_alive_check_addr = 0x10U, \ + .priv_data_len = sizeof(struct hw_atl2_priv) + +const struct aq_hw_caps_s hw_atl2_caps_aqc113 = { + DEFAULT_BOARD_BASIC_CAPABILITIES, + .media_type = AQ_HW_MEDIA_TYPE_TP, + .link_speed_msk = AQ_NIC_RATE_10G | + AQ_NIC_RATE_5G | + AQ_NIC_RATE_2G5 | + AQ_NIC_RATE_1G | + AQ_NIC_RATE_100M | + AQ_NIC_RATE_10M, +}; + +const struct aq_hw_caps_s hw_atl2_caps_aqc115c = { + DEFAULT_BOARD_BASIC_CAPABILITIES, + .media_type = AQ_HW_MEDIA_TYPE_TP, + .link_speed_msk = AQ_NIC_RATE_2G5 | + AQ_NIC_RATE_1G | + AQ_NIC_RATE_100M | + AQ_NIC_RATE_10M, +}; + +const struct aq_hw_caps_s hw_atl2_caps_aqc116c = { + DEFAULT_BOARD_BASIC_CAPABILITIES, + .media_type = AQ_HW_MEDIA_TYPE_TP, + .link_speed_msk = AQ_NIC_RATE_1G | + AQ_NIC_RATE_100M | + AQ_NIC_RATE_10M, +}; + +static u32 hw_atl2_sem_act_rslvr_get(struct aq_hw_s *self) +{ + return hw_atl_reg_glb_cpu_sem_get(self, HW_ATL2_FW_SM_ACT_RSLVR); +} + +static int hw_atl2_hw_reset(struct aq_hw_s *self) +{ + struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv; + int err; + + err = hw_atl2_utils_soft_reset(self); + if (err) + return err; + + memset(priv, 0, sizeof(*priv)); + + self->aq_fw_ops->set_state(self, MPI_RESET); + + err = aq_hw_err_from_flags(self); + + return err; +} + +static int hw_atl2_hw_queue_to_tc_map_set(struct aq_hw_s *self) +{ + struct aq_nic_cfg_s *cfg = self->aq_nic_cfg; + unsigned int tcs, q_per_tc; + unsigned int tc, q; + u32 rx_map = 0; + u32 tx_map = 0; + + hw_atl2_tpb_tx_tc_q_rand_map_en_set(self, 1U); + + switch (cfg->tc_mode) { + case AQ_TC_MODE_8TCS: + tcs = 8; + q_per_tc = 4; + break; + case AQ_TC_MODE_4TCS: + tcs = 4; + q_per_tc = 8; + break; + default: + return -EINVAL; + } + + for (tc = 0; tc != tcs; tc++) { + unsigned int tc_q_offset = tc * q_per_tc; + + for (q = tc_q_offset; q != tc_q_offset + q_per_tc; q++) { + rx_map |= tc << HW_ATL2_RX_Q_TC_MAP_SHIFT(q); + if (HW_ATL2_RX_Q_TC_MAP_ADR(q) != + HW_ATL2_RX_Q_TC_MAP_ADR(q + 1)) { + aq_hw_write_reg(self, + HW_ATL2_RX_Q_TC_MAP_ADR(q), + rx_map); + rx_map = 0; + } + + tx_map |= tc << HW_ATL2_TX_Q_TC_MAP_SHIFT(q); + if (HW_ATL2_TX_Q_TC_MAP_ADR(q) != + HW_ATL2_TX_Q_TC_MAP_ADR(q + 1)) { + aq_hw_write_reg(self, + HW_ATL2_TX_Q_TC_MAP_ADR(q), + tx_map); + tx_map = 0; + } + } + } + + return aq_hw_err_from_flags(self); +} + +static int hw_atl2_hw_qos_set(struct aq_hw_s *self) +{ + struct aq_nic_cfg_s *cfg = self->aq_nic_cfg; + u32 tx_buff_size = HW_ATL2_TXBUF_MAX; + u32 rx_buff_size = HW_ATL2_RXBUF_MAX; + unsigned int prio = 0U; + u32 tc = 0U; + + /* TPS Descriptor rate init */ + hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U); + hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA); + + /* TPS VM init */ + hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U); + + tx_buff_size /= cfg->tcs; + rx_buff_size /= cfg->tcs; + for (tc = 0; tc < cfg->tcs; tc++) { + u32 threshold = 0U; + + /* Tx buf size TC0 */ + hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, tx_buff_size, tc); + + threshold = (tx_buff_size * (1024 / 32U) * 66U) / 100U; + hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(self, threshold, tc); + + threshold = (tx_buff_size * (1024 / 32U) * 50U) / 100U; + hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(self, threshold, tc); + + /* QoS Rx buf size per TC */ + hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, rx_buff_size, tc); + + threshold = (rx_buff_size * (1024U / 32U) * 66U) / 100U; + hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(self, threshold, tc); + + threshold = (rx_buff_size * (1024U / 32U) * 50U) / 100U; + hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(self, threshold, tc); + + hw_atl_b0_set_fc(self, self->aq_nic_cfg->fc.req, tc); + } + + /* QoS 802.1p priority -> TC mapping */ + for (prio = 0; prio < 8; ++prio) + hw_atl_rpf_rpb_user_priority_tc_map_set(self, prio, + cfg->prio_tc_map[prio]); + + /* ATL2 Apply ring to TC mapping */ + hw_atl2_hw_queue_to_tc_map_set(self); + + return aq_hw_err_from_flags(self); +} + +static int hw_atl2_hw_rss_set(struct aq_hw_s *self, + struct aq_rss_parameters *rss_params) +{ + u8 *indirection_table = rss_params->indirection_table; + const u32 num_tcs = aq_hw_num_tcs(self); + u32 rpf_redir2_enable; + int tc; + int i; + + rpf_redir2_enable = num_tcs > 4 ? 1 : 0; + + hw_atl2_rpf_redirection_table2_select_set(self, rpf_redir2_enable); + + for (i = HW_ATL2_RSS_REDIRECTION_MAX; i--;) { + for (tc = 0; tc != num_tcs; tc++) { + hw_atl2_new_rpf_rss_redir_set(self, tc, i, + tc * + aq_hw_q_per_tc(self) + + indirection_table[i]); + } + } + + return aq_hw_err_from_flags(self); +} + +static int hw_atl2_hw_init_tx_tc_rate_limit(struct aq_hw_s *self) +{ + static const u32 max_weight = BIT(HW_ATL2_TPS_DATA_TCTWEIGHT_WIDTH) - 1; + /* Scale factor is based on the number of bits in fractional portion */ + static const u32 scale = BIT(HW_ATL_TPS_DESC_RATE_Y_WIDTH); + static const u32 frac_msk = HW_ATL_TPS_DESC_RATE_Y_MSK >> + HW_ATL_TPS_DESC_RATE_Y_SHIFT; + const u32 link_speed = self->aq_link_status.mbps; + struct aq_nic_cfg_s *nic_cfg = self->aq_nic_cfg; + unsigned long num_min_rated_tcs = 0; + u32 tc_weight[AQ_CFG_TCS_MAX]; + u32 fixed_max_credit_4b; + u32 fixed_max_credit; + u8 min_rate_msk = 0; + u32 sum_weight = 0; + int tc; + + /* By default max_credit is based upon MTU (in unit of 64b) */ + fixed_max_credit = nic_cfg->aq_hw_caps->mtu / 64; + /* in unit of 4b */ + fixed_max_credit_4b = nic_cfg->aq_hw_caps->mtu / 4; + + if (link_speed) { + min_rate_msk = nic_cfg->tc_min_rate_msk & + (BIT(nic_cfg->tcs) - 1); + num_min_rated_tcs = hweight8(min_rate_msk); + } + + /* First, calculate weights where min_rate is specified */ + if (num_min_rated_tcs) { + for (tc = 0; tc != nic_cfg->tcs; tc++) { + if (!nic_cfg->tc_min_rate[tc]) { + tc_weight[tc] = 0; + continue; + } + + tc_weight[tc] = (-1L + link_speed + + nic_cfg->tc_min_rate[tc] * + max_weight) / + link_speed; + tc_weight[tc] = min(tc_weight[tc], max_weight); + sum_weight += tc_weight[tc]; + } + } + + /* WSP, if min_rate is set for at least one TC. + * RR otherwise. + */ + hw_atl2_tps_tx_pkt_shed_data_arb_mode_set(self, min_rate_msk ? 1U : 0U); + /* Data TC Arbiter takes precedence over Descriptor TC Arbiter, + * leave Descriptor TC Arbiter as RR. + */ + hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U); + + hw_atl_tps_tx_desc_rate_mode_set(self, nic_cfg->is_qos ? 1U : 0U); + + for (tc = 0; tc != nic_cfg->tcs; tc++) { + const u32 en = (nic_cfg->tc_max_rate[tc] != 0) ? 1U : 0U; + const u32 desc = AQ_NIC_CFG_TCVEC2RING(nic_cfg, tc, 0); + u32 weight, max_credit; + + hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, tc, + fixed_max_credit); + hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, tc, 0x1E); + + if (num_min_rated_tcs) { + weight = tc_weight[tc]; + + if (!weight && sum_weight < max_weight) + weight = (max_weight - sum_weight) / + (nic_cfg->tcs - num_min_rated_tcs); + else if (!weight) + weight = 0x640; + + max_credit = max(2 * weight, fixed_max_credit_4b); + } else { + weight = 0x640; + max_credit = 0xFFF0; + } + + hw_atl2_tps_tx_pkt_shed_tc_data_weight_set(self, tc, weight); + hw_atl2_tps_tx_pkt_shed_tc_data_max_credit_set(self, tc, + max_credit); + + hw_atl_tps_tx_desc_rate_en_set(self, desc, en); + + if (en) { + /* Nominal rate is always 10G */ + const u32 rate = 10000U * scale / + nic_cfg->tc_max_rate[tc]; + const u32 rate_int = rate >> + HW_ATL_TPS_DESC_RATE_Y_WIDTH; + const u32 rate_frac = rate & frac_msk; + + hw_atl_tps_tx_desc_rate_x_set(self, desc, rate_int); + hw_atl_tps_tx_desc_rate_y_set(self, desc, rate_frac); + } else { + /* A value of 1 indicates the queue is not + * rate controlled. + */ + hw_atl_tps_tx_desc_rate_x_set(self, desc, 1U); + hw_atl_tps_tx_desc_rate_y_set(self, desc, 0U); + } + } + for (tc = nic_cfg->tcs; tc != AQ_CFG_TCS_MAX; tc++) { + const u32 desc = AQ_NIC_CFG_TCVEC2RING(nic_cfg, tc, 0); + + hw_atl_tps_tx_desc_rate_en_set(self, desc, 0U); + hw_atl_tps_tx_desc_rate_x_set(self, desc, 1U); + hw_atl_tps_tx_desc_rate_y_set(self, desc, 0U); + } + + return aq_hw_err_from_flags(self); +} + +static int hw_atl2_hw_init_tx_path(struct aq_hw_s *self) +{ + struct aq_nic_cfg_s *nic_cfg = self->aq_nic_cfg; + + /* Tx TC/RSS number config */ + hw_atl_tpb_tps_tx_tc_mode_set(self, nic_cfg->tc_mode); + + hw_atl_thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U); + hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U); + hw_atl_thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU); + + /* Tx interrupts */ + hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U); + + /* misc */ + hw_atl_tdm_tx_dca_en_set(self, 0U); + hw_atl_tdm_tx_dca_mode_set(self, 0U); + + hw_atl_tpb_tx_path_scp_ins_en_set(self, 1U); + + hw_atl2_tpb_tx_buf_clk_gate_en_set(self, 0U); + + return aq_hw_err_from_flags(self); +} + +static void hw_atl2_hw_init_new_rx_filters(struct aq_hw_s *self) +{ + struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv; + u8 *prio_tc_map = self->aq_nic_cfg->prio_tc_map; + u16 action; + u8 index; + int i; + + /* Action Resolver Table (ART) is used by RPF to decide which action + * to take with a packet based upon input tag and tag mask, where: + * - input tag is a combination of 3-bit VLan Prio (PTP) and + * 29-bit concatenation of all tags from filter block; + * - tag mask is a mask used for matching against input tag. + * The input_tag is compared with the all the Requested_tags in the + * Record table to find a match. Action field of the selected matched + * REC entry is used for further processing. If multiple entries match, + * the lowest REC entry, Action field will be selected. + */ + hw_atl2_rpf_act_rslvr_section_en_set(self, 0xFFFF); + hw_atl2_rpfl2_uc_flr_tag_set(self, HW_ATL2_RPF_TAG_BASE_UC, + HW_ATL2_MAC_UC); + hw_atl2_rpfl2_bc_flr_tag_set(self, HW_ATL2_RPF_TAG_BASE_UC); + + /* FW reserves the beginning of ART, thus all driver entries must + * start from the offset specified in FW caps. + */ + index = priv->art_base_index + HW_ATL2_RPF_L2_PROMISC_OFF_INDEX; + hw_atl2_act_rslvr_table_set(self, index, 0, + HW_ATL2_RPF_TAG_UC_MASK | + HW_ATL2_RPF_TAG_ALLMC_MASK, + HW_ATL2_ACTION_DROP); + + index = priv->art_base_index + HW_ATL2_RPF_VLAN_PROMISC_OFF_INDEX; + hw_atl2_act_rslvr_table_set(self, index, 0, + HW_ATL2_RPF_TAG_VLAN_MASK | + HW_ATL2_RPF_TAG_UNTAG_MASK, + HW_ATL2_ACTION_DROP); + + /* Configure ART to map given VLan Prio (PCP) to the TC index for + * RSS redirection table. + */ + for (i = 0; i < 8; i++) { + action = HW_ATL2_ACTION_ASSIGN_TC(prio_tc_map[i]); + + index = priv->art_base_index + HW_ATL2_RPF_PCP_TO_TC_INDEX + i; + hw_atl2_act_rslvr_table_set(self, index, + i << HW_ATL2_RPF_TAG_PCP_OFFSET, + HW_ATL2_RPF_TAG_PCP_MASK, action); + } +} + +static void hw_atl2_hw_new_rx_filter_vlan_promisc(struct aq_hw_s *self, + bool promisc) +{ + u16 off_action = (!promisc && + !hw_atl_rpfl2promiscuous_mode_en_get(self)) ? + HW_ATL2_ACTION_DROP : HW_ATL2_ACTION_DISABLE; + struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv; + u8 index; + + index = priv->art_base_index + HW_ATL2_RPF_VLAN_PROMISC_OFF_INDEX; + hw_atl2_act_rslvr_table_set(self, index, 0, + HW_ATL2_RPF_TAG_VLAN_MASK | + HW_ATL2_RPF_TAG_UNTAG_MASK, off_action); +} + +static void hw_atl2_hw_new_rx_filter_promisc(struct aq_hw_s *self, bool promisc) +{ + u16 off_action = promisc ? HW_ATL2_ACTION_DISABLE : HW_ATL2_ACTION_DROP; + struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv; + bool vlan_promisc_enable; + u8 index; + + index = priv->art_base_index + HW_ATL2_RPF_L2_PROMISC_OFF_INDEX; + hw_atl2_act_rslvr_table_set(self, index, 0, + HW_ATL2_RPF_TAG_UC_MASK | + HW_ATL2_RPF_TAG_ALLMC_MASK, + off_action); + + /* turn VLAN promisc mode too */ + vlan_promisc_enable = hw_atl_rpf_vlan_prom_mode_en_get(self); + hw_atl2_hw_new_rx_filter_vlan_promisc(self, promisc | + vlan_promisc_enable); +} + +static int hw_atl2_act_rslvr_table_set(struct aq_hw_s *self, u8 location, + u32 tag, u32 mask, u32 action) +{ + u32 val; + int err; + + err = readx_poll_timeout_atomic(hw_atl2_sem_act_rslvr_get, + self, val, val == 1, + 1, 10000U); + if (err) + return err; + + hw_atl2_rpf_act_rslvr_record_set(self, location, tag, mask, + action); + + hw_atl_reg_glb_cpu_sem_set(self, 1, HW_ATL2_FW_SM_ACT_RSLVR); + + return err; +} + +static int hw_atl2_hw_init_rx_path(struct aq_hw_s *self) +{ + struct aq_nic_cfg_s *cfg = self->aq_nic_cfg; + int i; + + /* Rx TC/RSS number config */ + hw_atl_rpb_rpf_rx_traf_class_mode_set(self, cfg->tc_mode); + + /* Rx flow control */ + hw_atl_rpb_rx_flow_ctl_mode_set(self, 1U); + + hw_atl2_rpf_rss_hash_type_set(self, HW_ATL2_RPF_RSS_HASH_TYPE_ALL); + + /* RSS Ring selection */ + hw_atl_b0_hw_init_rx_rss_ctrl1(self); + + /* Multicast filters */ + for (i = HW_ATL2_MAC_MAX; i--;) { + hw_atl_rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i); + hw_atl_rpfl2unicast_flr_act_set(self, 1U, i); + } + + hw_atl_reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U); + hw_atl_reg_rx_flr_mcst_flr_set(self, HW_ATL_MCAST_FLT_ANY_TO_HOST, 0U); + + /* Vlan filters */ + hw_atl_rpf_vlan_outer_etht_set(self, ETH_P_8021AD); + hw_atl_rpf_vlan_inner_etht_set(self, ETH_P_8021Q); + + hw_atl_rpf_vlan_prom_mode_en_set(self, 1); + + /* Always accept untagged packets */ + hw_atl_rpf_vlan_accept_untagged_packets_set(self, 1U); + hw_atl_rpf_vlan_untagged_act_set(self, 1U); + + hw_atl2_hw_init_new_rx_filters(self); + + /* Rx Interrupts */ + hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U); + + hw_atl_rpfl2broadcast_flr_act_set(self, 1U); + hw_atl_rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U)); + + hw_atl_rdm_rx_dca_en_set(self, 0U); + hw_atl_rdm_rx_dca_mode_set(self, 0U); + + return aq_hw_err_from_flags(self); +} + +static int hw_atl2_hw_init(struct aq_hw_s *self, const u8 *mac_addr) +{ + static u32 aq_hw_atl2_igcr_table_[4][2] = { + [AQ_HW_IRQ_INVALID] = { 0x20000000U, 0x20000000U }, + [AQ_HW_IRQ_LEGACY] = { 0x20000080U, 0x20000080U }, + [AQ_HW_IRQ_MSI] = { 0x20000021U, 0x20000025U }, + [AQ_HW_IRQ_MSIX] = { 0x20000022U, 0x20000026U }, + }; + + struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv; + struct aq_nic_cfg_s *aq_nic_cfg = self->aq_nic_cfg; + u8 base_index, count; + int err; + + err = hw_atl2_utils_get_action_resolve_table_caps(self, &base_index, + &count); + if (err) + return err; + + priv->art_base_index = 8 * base_index; + + hw_atl2_init_launchtime(self); + + hw_atl2_hw_init_tx_path(self); + hw_atl2_hw_init_rx_path(self); + + hw_atl_b0_hw_mac_addr_set(self, mac_addr); + + self->aq_fw_ops->set_link_speed(self, aq_nic_cfg->link_speed_msk); + self->aq_fw_ops->set_state(self, MPI_INIT); + + hw_atl2_hw_qos_set(self); + hw_atl2_hw_rss_set(self, &aq_nic_cfg->aq_rss); + hw_atl_b0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss); + + hw_atl2_rpf_new_enable_set(self, 1); + + /* Reset link status and read out initial hardware counters */ + self->aq_link_status.mbps = 0; + self->aq_fw_ops->update_stats(self); + + err = aq_hw_err_from_flags(self); + if (err < 0) + goto err_exit; + + /* Interrupts */ + hw_atl_reg_irq_glb_ctl_set(self, + aq_hw_atl2_igcr_table_[aq_nic_cfg->irq_type] + [(aq_nic_cfg->vecs > 1U) ? + 1 : 0]); + + hw_atl_itr_irq_auto_masklsw_set(self, aq_nic_cfg->aq_hw_caps->irq_mask); + + /* Interrupts */ + hw_atl_reg_gen_irq_map_set(self, + ((HW_ATL2_ERR_INT << 0x18) | + (1U << 0x1F)) | + ((HW_ATL2_ERR_INT << 0x10) | + (1U << 0x17)), 0U); + + hw_atl_b0_hw_offload_set(self, aq_nic_cfg); + +err_exit: + return err; +} + +static int hw_atl2_hw_ring_rx_init(struct aq_hw_s *self, + struct aq_ring_s *aq_ring, + struct aq_ring_param_s *aq_ring_param) +{ + return hw_atl_b0_hw_ring_rx_init(self, aq_ring, aq_ring_param); +} + +static int hw_atl2_hw_ring_tx_init(struct aq_hw_s *self, + struct aq_ring_s *aq_ring, + struct aq_ring_param_s *aq_ring_param) +{ + return hw_atl_b0_hw_ring_tx_init(self, aq_ring, aq_ring_param); +} + +#define IS_FILTER_ENABLED(_F_) ((packet_filter & (_F_)) ? 1U : 0U) + +static int hw_atl2_hw_packet_filter_set(struct aq_hw_s *self, + unsigned int packet_filter) +{ + hw_atl2_hw_new_rx_filter_promisc(self, IS_FILTER_ENABLED(IFF_PROMISC)); + + return hw_atl_b0_hw_packet_filter_set(self, packet_filter); +} + +#undef IS_FILTER_ENABLED + +static int hw_atl2_hw_multicast_list_set(struct aq_hw_s *self, + u8 ar_mac + [AQ_HW_MULTICAST_ADDRESS_MAX] + [ETH_ALEN], + u32 count) +{ + struct aq_nic_cfg_s *cfg = self->aq_nic_cfg; + int err = 0; + + if (count > (HW_ATL2_MAC_MAX - HW_ATL2_MAC_MIN)) { + err = -EBADRQC; + goto err_exit; + } + for (cfg->mc_list_count = 0U; + cfg->mc_list_count < count; + ++cfg->mc_list_count) { + u32 i = cfg->mc_list_count; + u32 h = (ar_mac[i][0] << 8) | (ar_mac[i][1]); + u32 l = (ar_mac[i][2] << 24) | (ar_mac[i][3] << 16) | + (ar_mac[i][4] << 8) | ar_mac[i][5]; + + hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL2_MAC_MIN + i); + + hw_atl_rpfl2unicast_dest_addresslsw_set(self, l, + HW_ATL2_MAC_MIN + i); + + hw_atl_rpfl2unicast_dest_addressmsw_set(self, h, + HW_ATL2_MAC_MIN + i); + + hw_atl2_rpfl2_uc_flr_tag_set(self, 1, HW_ATL2_MAC_MIN + i); + + hw_atl_rpfl2_uc_flr_en_set(self, (cfg->is_mc_list_enabled), + HW_ATL2_MAC_MIN + i); + } + + err = aq_hw_err_from_flags(self); + +err_exit: + return err; +} + +static int hw_atl2_hw_interrupt_moderation_set(struct aq_hw_s *self) +{ + unsigned int i = 0U; + u32 itr_tx = 2U; + u32 itr_rx = 2U; + + switch (self->aq_nic_cfg->itr) { + case AQ_CFG_INTERRUPT_MODERATION_ON: + case AQ_CFG_INTERRUPT_MODERATION_AUTO: + hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 0U); + hw_atl_tdm_tdm_intr_moder_en_set(self, 1U); + hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 0U); + hw_atl_rdm_rdm_intr_moder_en_set(self, 1U); + + if (self->aq_nic_cfg->itr == AQ_CFG_INTERRUPT_MODERATION_ON) { + /* HW timers are in 2us units */ + int tx_max_timer = self->aq_nic_cfg->tx_itr / 2; + int tx_min_timer = tx_max_timer / 2; + + int rx_max_timer = self->aq_nic_cfg->rx_itr / 2; + int rx_min_timer = rx_max_timer / 2; + + tx_max_timer = min(HW_ATL2_INTR_MODER_MAX, + tx_max_timer); + tx_min_timer = min(HW_ATL2_INTR_MODER_MIN, + tx_min_timer); + rx_max_timer = min(HW_ATL2_INTR_MODER_MAX, + rx_max_timer); + rx_min_timer = min(HW_ATL2_INTR_MODER_MIN, + rx_min_timer); + + itr_tx |= tx_min_timer << 0x8U; + itr_tx |= tx_max_timer << 0x10U; + itr_rx |= rx_min_timer << 0x8U; + itr_rx |= rx_max_timer << 0x10U; + } else { + static unsigned int hw_atl2_timers_table_tx_[][2] = { + {0xfU, 0xffU}, /* 10Gbit */ + {0xfU, 0x1ffU}, /* 5Gbit */ + {0xfU, 0x1ffU}, /* 5Gbit 5GS */ + {0xfU, 0x1ffU}, /* 2.5Gbit */ + {0xfU, 0x1ffU}, /* 1Gbit */ + {0xfU, 0x1ffU}, /* 100Mbit */ + }; + static unsigned int hw_atl2_timers_table_rx_[][2] = { + {0x6U, 0x38U},/* 10Gbit */ + {0xCU, 0x70U},/* 5Gbit */ + {0xCU, 0x70U},/* 5Gbit 5GS */ + {0x18U, 0xE0U},/* 2.5Gbit */ + {0x30U, 0x80U},/* 1Gbit */ + {0x4U, 0x50U},/* 100Mbit */ + }; + unsigned int mbps = self->aq_link_status.mbps; + unsigned int speed_index; + + speed_index = hw_atl_utils_mbps_2_speed_index(mbps); + + /* Update user visible ITR settings */ + self->aq_nic_cfg->tx_itr = hw_atl2_timers_table_tx_ + [speed_index][1] * 2; + self->aq_nic_cfg->rx_itr = hw_atl2_timers_table_rx_ + [speed_index][1] * 2; + + itr_tx |= hw_atl2_timers_table_tx_ + [speed_index][0] << 0x8U; + itr_tx |= hw_atl2_timers_table_tx_ + [speed_index][1] << 0x10U; + + itr_rx |= hw_atl2_timers_table_rx_ + [speed_index][0] << 0x8U; + itr_rx |= hw_atl2_timers_table_rx_ + [speed_index][1] << 0x10U; + } + break; + case AQ_CFG_INTERRUPT_MODERATION_OFF: + hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U); + hw_atl_tdm_tdm_intr_moder_en_set(self, 0U); + hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U); + hw_atl_rdm_rdm_intr_moder_en_set(self, 0U); + itr_tx = 0U; + itr_rx = 0U; + break; + } + + for (i = HW_ATL2_RINGS_MAX; i--;) { + hw_atl2_reg_tx_intr_moder_ctrl_set(self, itr_tx, i); + hw_atl_reg_rx_intr_moder_ctrl_set(self, itr_rx, i); + } + + return aq_hw_err_from_flags(self); +} + +static int hw_atl2_hw_stop(struct aq_hw_s *self) +{ + hw_atl_b0_hw_irq_disable(self, HW_ATL2_INT_MASK); + + return 0; +} + +static struct aq_stats_s *hw_atl2_utils_get_hw_stats(struct aq_hw_s *self) +{ + return &self->curr_stats; +} + +static int hw_atl2_hw_vlan_set(struct aq_hw_s *self, + struct aq_rx_filter_vlan *aq_vlans) +{ + struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv; + u32 queue; + u8 index; + int i; + + hw_atl_rpf_vlan_prom_mode_en_set(self, 1U); + + for (i = 0; i < HW_ATL_VLAN_MAX_FILTERS; i++) { + queue = HW_ATL2_ACTION_ASSIGN_QUEUE(aq_vlans[i].queue); + + hw_atl_rpf_vlan_flr_en_set(self, 0U, i); + hw_atl_rpf_vlan_rxq_en_flr_set(self, 0U, i); + index = priv->art_base_index + HW_ATL2_RPF_VLAN_USER_INDEX + i; + hw_atl2_act_rslvr_table_set(self, index, 0, 0, + HW_ATL2_ACTION_DISABLE); + if (aq_vlans[i].enable) { + hw_atl_rpf_vlan_id_flr_set(self, + aq_vlans[i].vlan_id, i); + hw_atl_rpf_vlan_flr_act_set(self, 1U, i); + hw_atl_rpf_vlan_flr_en_set(self, 1U, i); + + if (aq_vlans[i].queue != 0xFF) { + hw_atl_rpf_vlan_rxq_flr_set(self, + aq_vlans[i].queue, + i); + hw_atl_rpf_vlan_rxq_en_flr_set(self, 1U, i); + + hw_atl2_rpf_vlan_flr_tag_set(self, i + 2, i); + + index = priv->art_base_index + + HW_ATL2_RPF_VLAN_USER_INDEX + i; + hw_atl2_act_rslvr_table_set(self, index, + (i + 2) << HW_ATL2_RPF_TAG_VLAN_OFFSET, + HW_ATL2_RPF_TAG_VLAN_MASK, queue); + } else { + hw_atl2_rpf_vlan_flr_tag_set(self, 1, i); + } + } + } + + return aq_hw_err_from_flags(self); +} + +static int hw_atl2_hw_vlan_ctrl(struct aq_hw_s *self, bool enable) +{ + /* set promisc in case of disabing the vlan filter */ + hw_atl_rpf_vlan_prom_mode_en_set(self, !enable); + hw_atl2_hw_new_rx_filter_vlan_promisc(self, !enable); + + return aq_hw_err_from_flags(self); +} + +const struct aq_hw_ops hw_atl2_ops = { + .hw_soft_reset = hw_atl2_utils_soft_reset, + .hw_prepare = hw_atl2_utils_initfw, + .hw_set_mac_address = hw_atl_b0_hw_mac_addr_set, + .hw_init = hw_atl2_hw_init, + .hw_reset = hw_atl2_hw_reset, + .hw_start = hw_atl_b0_hw_start, + .hw_ring_tx_start = hw_atl_b0_hw_ring_tx_start, + .hw_ring_tx_stop = hw_atl_b0_hw_ring_tx_stop, + .hw_ring_rx_start = hw_atl_b0_hw_ring_rx_start, + .hw_ring_rx_stop = hw_atl_b0_hw_ring_rx_stop, + .hw_stop = hw_atl2_hw_stop, + + .hw_ring_tx_xmit = hw_atl_b0_hw_ring_tx_xmit, + .hw_ring_tx_head_update = hw_atl_b0_hw_ring_tx_head_update, + + .hw_ring_rx_receive = hw_atl_b0_hw_ring_rx_receive, + .hw_ring_rx_fill = hw_atl_b0_hw_ring_rx_fill, + + .hw_irq_enable = hw_atl_b0_hw_irq_enable, + .hw_irq_disable = hw_atl_b0_hw_irq_disable, + .hw_irq_read = hw_atl_b0_hw_irq_read, + + .hw_ring_rx_init = hw_atl2_hw_ring_rx_init, + .hw_ring_tx_init = hw_atl2_hw_ring_tx_init, + .hw_packet_filter_set = hw_atl2_hw_packet_filter_set, + .hw_filter_vlan_set = hw_atl2_hw_vlan_set, + .hw_filter_vlan_ctrl = hw_atl2_hw_vlan_ctrl, + .hw_multicast_list_set = hw_atl2_hw_multicast_list_set, + .hw_interrupt_moderation_set = hw_atl2_hw_interrupt_moderation_set, + .hw_rss_set = hw_atl2_hw_rss_set, + .hw_rss_hash_set = hw_atl_b0_hw_rss_hash_set, + .hw_tc_rate_limit_set = hw_atl2_hw_init_tx_tc_rate_limit, + .hw_get_hw_stats = hw_atl2_utils_get_hw_stats, + .hw_get_fw_version = hw_atl2_utils_get_fw_version, + .hw_set_offload = hw_atl_b0_hw_offload_set, + .hw_set_loopback = hw_atl_b0_set_loopback, + .hw_set_fc = hw_atl_b0_set_fc, +}; diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.h new file mode 100644 index 000000000..346f0dc99 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Atlantic Network Driver + * Copyright (C) 2020 Marvell International Ltd. + */ + +#ifndef HW_ATL2_H +#define HW_ATL2_H + +#include "aq_common.h" + +extern const struct aq_hw_caps_s hw_atl2_caps_aqc113; +extern const struct aq_hw_caps_s hw_atl2_caps_aqc115c; +extern const struct aq_hw_caps_s hw_atl2_caps_aqc116c; +extern const struct aq_hw_ops hw_atl2_ops; + +#endif /* HW_ATL2_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_internal.h new file mode 100644 index 000000000..5a89bb872 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_internal.h @@ -0,0 +1,127 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Atlantic Network Driver + * Copyright (C) 2020 Marvell International Ltd. + */ + +#ifndef HW_ATL2_INTERNAL_H +#define HW_ATL2_INTERNAL_H + +#include "hw_atl2_utils.h" + +#define HW_ATL2_MTU_JUMBO 16352U +#define HW_ATL2_MTU 1514U + +#define HW_ATL2_TX_RINGS 4U +#define HW_ATL2_RX_RINGS 4U + +#define HW_ATL2_RINGS_MAX 32U +#define HW_ATL2_TXD_SIZE (16U) +#define HW_ATL2_RXD_SIZE (16U) + +#define HW_ATL2_MAC_UC 0U +#define HW_ATL2_MAC_MIN 1U +#define HW_ATL2_MAC_MAX 38U + +/* interrupts */ +#define HW_ATL2_ERR_INT 8U +#define HW_ATL2_INT_MASK (0xFFFFFFFFU) + +#define HW_ATL2_TXBUF_MAX 128U +#define HW_ATL2_RXBUF_MAX 192U + +#define HW_ATL2_RSS_REDIRECTION_MAX 64U + +#define HW_ATL2_TC_MAX 8U +#define HW_ATL2_RSS_MAX 8U + +#define HW_ATL2_INTR_MODER_MAX 0x1FF +#define HW_ATL2_INTR_MODER_MIN 0xFF + +#define HW_ATL2_MIN_RXD \ + (ALIGN(AQ_CFG_SKB_FRAGS_MAX + 1U, AQ_HW_RXD_MULTIPLE)) +#define HW_ATL2_MIN_TXD \ + (ALIGN(AQ_CFG_SKB_FRAGS_MAX + 1U, AQ_HW_TXD_MULTIPLE)) + +#define HW_ATL2_MAX_RXD 8184U +#define HW_ATL2_MAX_TXD 8184U + +#define HW_ATL2_FW_SM_ACT_RSLVR 0x3U + +#define HW_ATL2_RPF_TAG_UC_OFFSET 0x0 +#define HW_ATL2_RPF_TAG_ALLMC_OFFSET 0x6 +#define HW_ATL2_RPF_TAG_ET_OFFSET 0x7 +#define HW_ATL2_RPF_TAG_VLAN_OFFSET 0xA +#define HW_ATL2_RPF_TAG_UNTAG_OFFSET 0xE +#define HW_ATL2_RPF_TAG_L3_V4_OFFSET 0xF +#define HW_ATL2_RPF_TAG_L3_V6_OFFSET 0x12 +#define HW_ATL2_RPF_TAG_L4_OFFSET 0x15 +#define HW_ATL2_RPF_TAG_L4_FLEX_OFFSET 0x18 +#define HW_ATL2_RPF_TAG_FLEX_OFFSET 0x1B +#define HW_ATL2_RPF_TAG_PCP_OFFSET 0x1D + +#define HW_ATL2_RPF_TAG_UC_MASK (0x0000003F << HW_ATL2_RPF_TAG_UC_OFFSET) +#define HW_ATL2_RPF_TAG_ALLMC_MASK (0x00000001 << HW_ATL2_RPF_TAG_ALLMC_OFFSET) +#define HW_ATL2_RPF_TAG_UNTAG_MASK (0x00000001 << HW_ATL2_RPF_TAG_UNTAG_OFFSET) +#define HW_ATL2_RPF_TAG_VLAN_MASK (0x0000000F << HW_ATL2_RPF_TAG_VLAN_OFFSET) +#define HW_ATL2_RPF_TAG_ET_MASK (0x00000007 << HW_ATL2_RPF_TAG_ET_OFFSET) +#define HW_ATL2_RPF_TAG_L3_V4_MASK (0x00000007 << HW_ATL2_RPF_TAG_L3_V4_OFFSET) +#define HW_ATL2_RPF_TAG_L3_V6_MASK (0x00000007 << HW_ATL2_RPF_TAG_L3_V6_OFFSET) +#define HW_ATL2_RPF_TAG_L4_MASK (0x00000007 << HW_ATL2_RPF_TAG_L4_OFFSET) +#define HW_ATL2_RPF_TAG_PCP_MASK (0x00000007 << HW_ATL2_RPF_TAG_PCP_OFFSET) + +#define HW_ATL2_RPF_TAG_BASE_UC BIT(HW_ATL2_RPF_TAG_UC_OFFSET) +#define HW_ATL2_RPF_TAG_BASE_ALLMC BIT(HW_ATL2_RPF_TAG_ALLMC_OFFSET) +#define HW_ATL2_RPF_TAG_BASE_UNTAG BIT(HW_ATL2_RPF_TAG_UNTAG_OFFSET) +#define HW_ATL2_RPF_TAG_BASE_VLAN BIT(HW_ATL2_RPF_TAG_VLAN_OFFSET) + +enum HW_ATL2_RPF_ART_INDEX { + HW_ATL2_RPF_L2_PROMISC_OFF_INDEX, + HW_ATL2_RPF_VLAN_PROMISC_OFF_INDEX, + HW_ATL2_RPF_L3L4_USER_INDEX = 8, + HW_ATL2_RPF_ET_PCP_USER_INDEX = HW_ATL2_RPF_L3L4_USER_INDEX + 16, + HW_ATL2_RPF_VLAN_USER_INDEX = HW_ATL2_RPF_ET_PCP_USER_INDEX + 16, + HW_ATL2_RPF_PCP_TO_TC_INDEX = HW_ATL2_RPF_VLAN_USER_INDEX + + HW_ATL_VLAN_MAX_FILTERS, +}; + +#define HW_ATL2_ACTION(ACTION, RSS, INDEX, VALID) \ + ((((ACTION) & 0x3U) << 8) | \ + (((RSS) & 0x1U) << 7) | \ + (((INDEX) & 0x3FU) << 2) | \ + (((VALID) & 0x1U) << 0)) + +#define HW_ATL2_ACTION_DROP HW_ATL2_ACTION(0, 0, 0, 1) +#define HW_ATL2_ACTION_DISABLE HW_ATL2_ACTION(0, 0, 0, 0) +#define HW_ATL2_ACTION_ASSIGN_QUEUE(QUEUE) HW_ATL2_ACTION(1, 0, (QUEUE), 1) +#define HW_ATL2_ACTION_ASSIGN_TC(TC) HW_ATL2_ACTION(1, 1, (TC), 1) + +enum HW_ATL2_RPF_RSS_HASH_TYPE { + HW_ATL2_RPF_RSS_HASH_TYPE_NONE = 0, + HW_ATL2_RPF_RSS_HASH_TYPE_IPV4 = BIT(0), + HW_ATL2_RPF_RSS_HASH_TYPE_IPV4_TCP = BIT(1), + HW_ATL2_RPF_RSS_HASH_TYPE_IPV4_UDP = BIT(2), + HW_ATL2_RPF_RSS_HASH_TYPE_IPV6 = BIT(3), + HW_ATL2_RPF_RSS_HASH_TYPE_IPV6_TCP = BIT(4), + HW_ATL2_RPF_RSS_HASH_TYPE_IPV6_UDP = BIT(5), + HW_ATL2_RPF_RSS_HASH_TYPE_IPV6_EX = BIT(6), + HW_ATL2_RPF_RSS_HASH_TYPE_IPV6_EX_TCP = BIT(7), + HW_ATL2_RPF_RSS_HASH_TYPE_IPV6_EX_UDP = BIT(8), + HW_ATL2_RPF_RSS_HASH_TYPE_ALL = HW_ATL2_RPF_RSS_HASH_TYPE_IPV4 | + HW_ATL2_RPF_RSS_HASH_TYPE_IPV4_TCP | + HW_ATL2_RPF_RSS_HASH_TYPE_IPV4_UDP | + HW_ATL2_RPF_RSS_HASH_TYPE_IPV6 | + HW_ATL2_RPF_RSS_HASH_TYPE_IPV6_TCP | + HW_ATL2_RPF_RSS_HASH_TYPE_IPV6_UDP | + HW_ATL2_RPF_RSS_HASH_TYPE_IPV6_EX | + HW_ATL2_RPF_RSS_HASH_TYPE_IPV6_EX_TCP | + HW_ATL2_RPF_RSS_HASH_TYPE_IPV6_EX_UDP, +}; + +#define HW_ATL_MCAST_FLT_ANY_TO_HOST 0x00010FFFU + +struct hw_atl2_priv { + struct statistics_s last_stats; + unsigned int art_base_index; +}; + +#endif /* HW_ATL2_INTERNAL_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_llh.c new file mode 100644 index 000000000..cd954b11d --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_llh.c @@ -0,0 +1,234 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Atlantic Network Driver + * Copyright (C) 2020 Marvell International Ltd. + */ + +#include "hw_atl2_llh.h" +#include "hw_atl2_llh_internal.h" +#include "aq_hw_utils.h" + +void hw_atl2_rpf_redirection_table2_select_set(struct aq_hw_s *aq_hw, + u32 select) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL2_RPF_PIF_RPF_REDIR2_ENI_ADR, + HW_ATL2_RPF_PIF_RPF_REDIR2_ENI_MSK, + HW_ATL2_RPF_PIF_RPF_REDIR2_ENI_SHIFT, select); +} + +void hw_atl2_rpf_rss_hash_type_set(struct aq_hw_s *aq_hw, u32 rss_hash_type) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL2_RPF_PIF_RPF_RSS_HASH_TYPEI_ADR, + HW_ATL2_RPF_PIF_RPF_RSS_HASH_TYPEI_MSK, + HW_ATL2_RPF_PIF_RPF_RSS_HASH_TYPEI_SHIFT, + rss_hash_type); +} + +/* rpf */ + +void hw_atl2_rpf_new_enable_set(struct aq_hw_s *aq_hw, u32 enable) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL2_RPF_NEW_EN_ADR, + HW_ATL2_RPF_NEW_EN_MSK, + HW_ATL2_RPF_NEW_EN_SHIFT, + enable); +} + +void hw_atl2_rpfl2_uc_flr_tag_set(struct aq_hw_s *aq_hw, u32 tag, u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL2_RPFL2UC_TAG_ADR(filter), + HW_ATL2_RPFL2UC_TAG_MSK, + HW_ATL2_RPFL2UC_TAG_SHIFT, + tag); +} + +void hw_atl2_rpfl2_bc_flr_tag_set(struct aq_hw_s *aq_hw, u32 tag) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL2_RPF_L2_BC_TAG_ADR, + HW_ATL2_RPF_L2_BC_TAG_MSK, + HW_ATL2_RPF_L2_BC_TAG_SHIFT, + tag); +} + +void hw_atl2_new_rpf_rss_redir_set(struct aq_hw_s *aq_hw, u32 tc, u32 index, + u32 queue) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL2_RPF_RSS_REDIR_ADR(tc, index), + HW_ATL2_RPF_RSS_REDIR_MSK(tc), + HW_ATL2_RPF_RSS_REDIR_SHIFT(tc), + queue); +} + +void hw_atl2_rpf_vlan_flr_tag_set(struct aq_hw_s *aq_hw, u32 tag, u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL2_RPF_VL_TAG_ADR(filter), + HW_ATL2_RPF_VL_TAG_MSK, + HW_ATL2_RPF_VL_TAG_SHIFT, + tag); +} + +/* TX */ + +void hw_atl2_tpb_tx_tc_q_rand_map_en_set(struct aq_hw_s *aq_hw, + const u32 tc_q_rand_map_en) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL2_TPB_TX_TC_Q_RAND_MAP_EN_ADR, + HW_ATL2_TPB_TX_TC_Q_RAND_MAP_EN_MSK, + HW_ATL2_TPB_TX_TC_Q_RAND_MAP_EN_SHIFT, + tc_q_rand_map_en); +} + +void hw_atl2_tpb_tx_buf_clk_gate_en_set(struct aq_hw_s *aq_hw, u32 clk_gate_en) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL2_TPB_TX_BUF_CLK_GATE_EN_ADR, + HW_ATL2_TPB_TX_BUF_CLK_GATE_EN_MSK, + HW_ATL2_TPB_TX_BUF_CLK_GATE_EN_SHIFT, + clk_gate_en); +} + +void hw_atl2_reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw, + u32 tx_intr_moderation_ctl, + u32 queue) +{ + aq_hw_write_reg(aq_hw, HW_ATL2_TX_INTR_MODERATION_CTL_ADR(queue), + tx_intr_moderation_ctl); +} + +void hw_atl2_tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw_s *aq_hw, + const u32 data_arb_mode) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL2_TPS_DATA_TC_ARB_MODE_ADR, + HW_ATL2_TPS_DATA_TC_ARB_MODE_MSK, + HW_ATL2_TPS_DATA_TC_ARB_MODE_SHIFT, + data_arb_mode); +} + +void hw_atl2_tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw, + const u32 tc, + const u32 max_credit) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL2_TPS_DATA_TCTCREDIT_MAX_ADR(tc), + HW_ATL2_TPS_DATA_TCTCREDIT_MAX_MSK, + HW_ATL2_TPS_DATA_TCTCREDIT_MAX_SHIFT, + max_credit); +} + +void hw_atl2_tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw, + const u32 tc, + const u32 weight) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL2_TPS_DATA_TCTWEIGHT_ADR(tc), + HW_ATL2_TPS_DATA_TCTWEIGHT_MSK, + HW_ATL2_TPS_DATA_TCTWEIGHT_SHIFT, + weight); +} + +u32 hw_atl2_get_hw_version(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg(aq_hw, HW_ATL2_FPGA_VER_ADR); +} + +void hw_atl2_init_launchtime(struct aq_hw_s *aq_hw) +{ + u32 hw_ver = hw_atl2_get_hw_version(aq_hw); + + aq_hw_write_reg_bit(aq_hw, HW_ATL2_LT_CTRL_ADR, + HW_ATL2_LT_CTRL_CLK_RATIO_MSK, + HW_ATL2_LT_CTRL_CLK_RATIO_SHIFT, + hw_ver < HW_ATL2_FPGA_VER_U32(1, 0, 0, 0) ? + HW_ATL2_LT_CTRL_CLK_RATIO_FULL_SPEED : + hw_ver >= HW_ATL2_FPGA_VER_U32(1, 0, 85, 2) ? + HW_ATL2_LT_CTRL_CLK_RATIO_HALF_SPEED : + HW_ATL2_LT_CTRL_CLK_RATIO_QUATER_SPEED); +} + +/* set action resolver record */ +void hw_atl2_rpf_act_rslvr_record_set(struct aq_hw_s *aq_hw, u8 location, + u32 tag, u32 mask, u32 action) +{ + aq_hw_write_reg(aq_hw, + HW_ATL2_RPF_ACT_RSLVR_REQ_TAG_ADR(location), + tag); + aq_hw_write_reg(aq_hw, + HW_ATL2_RPF_ACT_RSLVR_TAG_MASK_ADR(location), + mask); + aq_hw_write_reg(aq_hw, + HW_ATL2_RPF_ACT_RSLVR_ACTN_ADR(location), + action); +} + +void hw_atl2_rpf_act_rslvr_section_en_set(struct aq_hw_s *aq_hw, u32 sections) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL2_RPF_REC_TAB_EN_ADR, + HW_ATL2_RPF_REC_TAB_EN_MSK, + HW_ATL2_RPF_REC_TAB_EN_SHIFT, + sections); +} + +void hw_atl2_mif_shared_buf_get(struct aq_hw_s *aq_hw, int offset, u32 *data, + int len) +{ + int j = 0; + int i; + + for (i = offset; i < offset + len; i++, j++) + data[j] = aq_hw_read_reg(aq_hw, + HW_ATL2_MIF_SHARED_BUFFER_IN_ADR(i)); +} + +void hw_atl2_mif_shared_buf_write(struct aq_hw_s *aq_hw, int offset, u32 *data, + int len) +{ + int j = 0; + int i; + + for (i = offset; i < offset + len; i++, j++) + aq_hw_write_reg(aq_hw, HW_ATL2_MIF_SHARED_BUFFER_IN_ADR(i), + data[j]); +} + +void hw_atl2_mif_shared_buf_read(struct aq_hw_s *aq_hw, int offset, u32 *data, + int len) +{ + int j = 0; + int i; + + for (i = offset; i < offset + len; i++, j++) + data[j] = aq_hw_read_reg(aq_hw, + HW_ATL2_MIF_SHARED_BUFFER_OUT_ADR(i)); +} + +void hw_atl2_mif_host_finished_write_set(struct aq_hw_s *aq_hw, u32 finish) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL2_MIF_HOST_FINISHED_WRITE_ADR, + HW_ATL2_MIF_HOST_FINISHED_WRITE_MSK, + HW_ATL2_MIF_HOST_FINISHED_WRITE_SHIFT, + finish); +} + +u32 hw_atl2_mif_mcp_finished_read_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg_bit(aq_hw, HW_ATL2_MIF_MCP_FINISHED_READ_ADR, + HW_ATL2_MIF_MCP_FINISHED_READ_MSK, + HW_ATL2_MIF_MCP_FINISHED_READ_SHIFT); +} + +u32 hw_atl2_mif_mcp_boot_reg_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg(aq_hw, HW_ATL2_MIF_BOOT_REG_ADR); +} + +void hw_atl2_mif_mcp_boot_reg_set(struct aq_hw_s *aq_hw, u32 val) +{ + return aq_hw_write_reg(aq_hw, HW_ATL2_MIF_BOOT_REG_ADR, val); +} + +u32 hw_atl2_mif_host_req_int_get(struct aq_hw_s *aq_hw) +{ + return aq_hw_read_reg(aq_hw, HW_ATL2_MCP_HOST_REQ_INT_ADR); +} + +void hw_atl2_mif_host_req_int_clr(struct aq_hw_s *aq_hw, u32 val) +{ + return aq_hw_write_reg(aq_hw, HW_ATL2_MCP_HOST_REQ_INT_CLR_ADR, + val); +} diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_llh.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_llh.h new file mode 100644 index 000000000..98c7a4621 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_llh.h @@ -0,0 +1,102 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Atlantic Network Driver + * Copyright (C) 2020 Marvell International Ltd. + */ + +#ifndef HW_ATL2_LLH_H +#define HW_ATL2_LLH_H + +#include <linux/types.h> + +struct aq_hw_s; + +/* Set TX Interrupt Moderation Control Register */ +void hw_atl2_reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw, + u32 tx_intr_moderation_ctl, + u32 queue); + +/* Set Redirection Table 2 Select */ +void hw_atl2_rpf_redirection_table2_select_set(struct aq_hw_s *aq_hw, + u32 select); + +/** Set RSS HASH type */ +void hw_atl2_rpf_rss_hash_type_set(struct aq_hw_s *aq_hw, u32 rss_hash_type); + +/* set new RPF enable */ +void hw_atl2_rpf_new_enable_set(struct aq_hw_s *aq_hw, u32 enable); + +/* set l2 unicast filter tag */ +void hw_atl2_rpfl2_uc_flr_tag_set(struct aq_hw_s *aq_hw, u32 tag, u32 filter); + +/* set l2 broadcast filter tag */ +void hw_atl2_rpfl2_bc_flr_tag_set(struct aq_hw_s *aq_hw, u32 tag); + +/* set new rss redirection table */ +void hw_atl2_new_rpf_rss_redir_set(struct aq_hw_s *aq_hw, u32 tc, u32 index, + u32 queue); + +/* Set VLAN filter tag */ +void hw_atl2_rpf_vlan_flr_tag_set(struct aq_hw_s *aq_hw, u32 tag, u32 filter); + +/* set tx random TC-queue mapping enable bit */ +void hw_atl2_tpb_tx_tc_q_rand_map_en_set(struct aq_hw_s *aq_hw, + const u32 tc_q_rand_map_en); + +/* set tx buffer clock gate enable */ +void hw_atl2_tpb_tx_buf_clk_gate_en_set(struct aq_hw_s *aq_hw, u32 clk_gate_en); + +void hw_atl2_tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw_s *aq_hw, + const u32 data_arb_mode); + +/* set tx packet scheduler tc data max credit */ +void hw_atl2_tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw, + const u32 tc, + const u32 max_credit); + +/* set tx packet scheduler tc data weight */ +void hw_atl2_tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw, + const u32 tc, + const u32 weight); + +u32 hw_atl2_get_hw_version(struct aq_hw_s *aq_hw); + +void hw_atl2_init_launchtime(struct aq_hw_s *aq_hw); + +/* set action resolver record */ +void hw_atl2_rpf_act_rslvr_record_set(struct aq_hw_s *aq_hw, u8 location, + u32 tag, u32 mask, u32 action); + +/* set enable action resolver section */ +void hw_atl2_rpf_act_rslvr_section_en_set(struct aq_hw_s *aq_hw, u32 sections); + +/* get data from firmware shared input buffer */ +void hw_atl2_mif_shared_buf_get(struct aq_hw_s *aq_hw, int offset, u32 *data, + int len); + +/* set data into firmware shared input buffer */ +void hw_atl2_mif_shared_buf_write(struct aq_hw_s *aq_hw, int offset, u32 *data, + int len); + +/* get data from firmware shared output buffer */ +void hw_atl2_mif_shared_buf_read(struct aq_hw_s *aq_hw, int offset, u32 *data, + int len); + +/* set host finished write shared buffer indication */ +void hw_atl2_mif_host_finished_write_set(struct aq_hw_s *aq_hw, u32 finish); + +/* get mcp finished read shared buffer indication */ +u32 hw_atl2_mif_mcp_finished_read_get(struct aq_hw_s *aq_hw); + +/* get mcp boot register */ +u32 hw_atl2_mif_mcp_boot_reg_get(struct aq_hw_s *aq_hw); + +/* set mcp boot register */ +void hw_atl2_mif_mcp_boot_reg_set(struct aq_hw_s *aq_hw, u32 val); + +/* get host interrupt request */ +u32 hw_atl2_mif_host_req_int_get(struct aq_hw_s *aq_hw); + +/* clear host interrupt request */ +void hw_atl2_mif_host_req_int_clr(struct aq_hw_s *aq_hw, u32 val); + +#endif /* HW_ATL2_LLH_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_llh_internal.h new file mode 100644 index 000000000..e34c5cda0 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_llh_internal.h @@ -0,0 +1,391 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Atlantic Network Driver + * Copyright (C) 2020 Marvell International Ltd. + */ + +#ifndef HW_ATL2_LLH_INTERNAL_H +#define HW_ATL2_LLH_INTERNAL_H + +/* RX pif_rpf_redir_2_en_i Bitfield Definitions + * PORT="pif_rpf_redir_2_en_i" + */ +#define HW_ATL2_RPF_PIF_RPF_REDIR2_ENI_ADR 0x000054C8 +#define HW_ATL2_RPF_PIF_RPF_REDIR2_ENI_MSK 0x00001000 +#define HW_ATL2_RPF_PIF_RPF_REDIR2_ENI_MSKN 0xFFFFEFFF +#define HW_ATL2_RPF_PIF_RPF_REDIR2_ENI_SHIFT 12 +#define HW_ATL2_RPF_PIF_RPF_REDIR2_ENI_WIDTH 1 +#define HW_ATL2_RPF_PIF_RPF_REDIR2_ENI_DEFAULT 0x0 + +/* RX pif_rpf_rss_hash_type_i Bitfield Definitions + */ +#define HW_ATL2_RPF_PIF_RPF_RSS_HASH_TYPEI_ADR 0x000054C8 +#define HW_ATL2_RPF_PIF_RPF_RSS_HASH_TYPEI_MSK 0x000001FF +#define HW_ATL2_RPF_PIF_RPF_RSS_HASH_TYPEI_MSKN 0xFFFFFE00 +#define HW_ATL2_RPF_PIF_RPF_RSS_HASH_TYPEI_SHIFT 0 +#define HW_ATL2_RPF_PIF_RPF_RSS_HASH_TYPEI_WIDTH 9 + +/* rx rpf_new_rpf_en bitfield definitions + * preprocessor definitions for the bitfield "rpf_new_rpf_en_i". + * port="pif_rpf_new_rpf_en_i + */ + +/* register address for bitfield rpf_new_rpf_en */ +#define HW_ATL2_RPF_NEW_EN_ADR 0x00005104 +/* bitmask for bitfield rpf_new_rpf_en */ +#define HW_ATL2_RPF_NEW_EN_MSK 0x00000800 +/* inverted bitmask for bitfield rpf_new_rpf_en */ +#define HW_ATL2_RPF_NEW_EN_MSKN 0xfffff7ff +/* lower bit position of bitfield rpf_new_rpf_en */ +#define HW_ATL2_RPF_NEW_EN_SHIFT 11 +/* width of bitfield rpf_new_rpf_en */ +#define HW_ATL2_RPF_NEW_EN_WIDTH 1 +/* default value of bitfield rpf_new_rpf_en */ +#define HW_ATL2_RPF_NEW_EN_DEFAULT 0x0 + +/* rx l2_uc_req_tag0{f}[5:0] bitfield definitions + * preprocessor definitions for the bitfield "l2_uc_req_tag0{f}[7:0]". + * parameter: filter {f} | stride size 0x8 | range [0, 37] + * port="pif_rpf_l2_uc_req_tag0[5:0]" + */ + +/* register address for bitfield l2_uc_req_tag0{f}[2:0] */ +#define HW_ATL2_RPFL2UC_TAG_ADR(filter) (0x00005114 + (filter) * 0x8) +/* bitmask for bitfield l2_uc_req_tag0{f}[2:0] */ +#define HW_ATL2_RPFL2UC_TAG_MSK 0x0FC00000 +/* inverted bitmask for bitfield l2_uc_req_tag0{f}[2:0] */ +#define HW_ATL2_RPFL2UC_TAG_MSKN 0xF03FFFFF +/* lower bit position of bitfield l2_uc_req_tag0{f}[2:0] */ +#define HW_ATL2_RPFL2UC_TAG_SHIFT 22 +/* width of bitfield l2_uc_req_tag0{f}[2:0] */ +#define HW_ATL2_RPFL2UC_TAG_WIDTH 6 +/* default value of bitfield l2_uc_req_tag0{f}[2:0] */ +#define HW_ATL2_RPFL2UC_TAG_DEFAULT 0x0 + +/* rpf_l2_bc_req_tag[5:0] bitfield definitions + * preprocessor definitions for the bitfield "rpf_l2_bc_req_tag[5:0]". + * port="pifrpf_l2_bc_req_tag_i[5:0]" + */ + +/* register address for bitfield rpf_l2_bc_req_tag */ +#define HW_ATL2_RPF_L2_BC_TAG_ADR 0x000050F0 +/* bitmask for bitfield rpf_l2_bc_req_tag */ +#define HW_ATL2_RPF_L2_BC_TAG_MSK 0x0000003F +/* inverted bitmask for bitfield rpf_l2_bc_req_tag */ +#define HW_ATL2_RPF_L2_BC_TAG_MSKN 0xffffffc0 +/* lower bit position of bitfield rpf_l2_bc_req_tag */ +#define HW_ATL2_RPF_L2_BC_TAG_SHIFT 0 +/* width of bitfield rpf_l2_bc_req_tag */ +#define HW_ATL2_RPF_L2_BC_TAG_WIDTH 6 +/* default value of bitfield rpf_l2_bc_req_tag */ +#define HW_ATL2_RPF_L2_BC_TAG_DEFAULT 0x0 + +/* rx rpf_rss_red1_data_[4:0] bitfield definitions + * preprocessor definitions for the bitfield "rpf_rss_red1_data[4:0]". + * port="pif_rpf_rss_red1_data_i[4:0]" + */ + +/* register address for bitfield rpf_rss_red1_data[4:0] */ +#define HW_ATL2_RPF_RSS_REDIR_ADR(TC, INDEX) (0x00006200 + \ + (0x100 * !!((TC) > 3)) + (INDEX) * 4) +/* bitmask for bitfield rpf_rss_red1_data[4:0] */ +#define HW_ATL2_RPF_RSS_REDIR_MSK(TC) (0x00000001F << (5 * ((TC) % 4))) +/* lower bit position of bitfield rpf_rss_red1_data[4:0] */ +#define HW_ATL2_RPF_RSS_REDIR_SHIFT(TC) (5 * ((TC) % 4)) +/* width of bitfield rpf_rss_red1_data[4:0] */ +#define HW_ATL2_RPF_RSS_REDIR_WIDTH 5 +/* default value of bitfield rpf_rss_red1_data[4:0] */ +#define HW_ATL2_RPF_RSS_REDIR_DEFAULT 0x0 + +/* rx vlan_req_tag0{f}[3:0] bitfield definitions + * preprocessor definitions for the bitfield "vlan_req_tag0{f}[3:0]". + * parameter: filter {f} | stride size 0x4 | range [0, 15] + * port="pif_rpf_vlan_req_tag0[3:0]" + */ + +/* register address for bitfield vlan_req_tag0{f}[3:0] */ +#define HW_ATL2_RPF_VL_TAG_ADR(filter) (0x00005290 + (filter) * 0x4) +/* bitmask for bitfield vlan_req_tag0{f}[3:0] */ +#define HW_ATL2_RPF_VL_TAG_MSK 0x0000F000 +/* inverted bitmask for bitfield vlan_req_tag0{f}[3:0] */ +#define HW_ATL2_RPF_VL_TAG_MSKN 0xFFFF0FFF +/* lower bit position of bitfield vlan_req_tag0{f}[3:0] */ +#define HW_ATL2_RPF_VL_TAG_SHIFT 12 +/* width of bitfield vlan_req_tag0{f}[3:0] */ +#define HW_ATL2_RPF_VL_TAG_WIDTH 4 +/* default value of bitfield vlan_req_tag0{f}[3:0] */ +#define HW_ATL2_RPF_VL_TAG_DEFAULT 0x0 + +/* RX rx_q{Q}_tc_map[2:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "rx_q{Q}_tc_map[2:0]". + * Parameter: Queue {Q} | bit-level stride | range [0, 31] + * PORT="pif_rx_q0_tc_map_i[2:0]" + */ + +/* Register address for bitfield rx_q{Q}_tc_map[2:0] */ +#define HW_ATL2_RX_Q_TC_MAP_ADR(queue) \ + (((queue) < 32) ? 0x00005900 + ((queue) / 8) * 4 : 0) +/* Lower bit position of bitfield rx_q{Q}_tc_map[2:0] */ +#define HW_ATL2_RX_Q_TC_MAP_SHIFT(queue) \ + (((queue) < 32) ? ((queue) * 4) % 32 : 0) +/* Width of bitfield rx_q{Q}_tc_map[2:0] */ +#define HW_ATL2_RX_Q_TC_MAP_WIDTH 3 +/* Default value of bitfield rx_q{Q}_tc_map[2:0] */ +#define HW_ATL2_RX_Q_TC_MAP_DEFAULT 0x0 + +/* tx tx_tc_q_rand_map_en bitfield definitions + * preprocessor definitions for the bitfield "tx_tc_q_rand_map_en". + * port="pif_tpb_tx_tc_q_rand_map_en_i" + */ + +/* register address for bitfield tx_tc_q_rand_map_en */ +#define HW_ATL2_TPB_TX_TC_Q_RAND_MAP_EN_ADR 0x00007900 +/* bitmask for bitfield tx_tc_q_rand_map_en */ +#define HW_ATL2_TPB_TX_TC_Q_RAND_MAP_EN_MSK 0x00000200 +/* inverted bitmask for bitfield tx_tc_q_rand_map_en */ +#define HW_ATL2_TPB_TX_TC_Q_RAND_MAP_EN_MSKN 0xFFFFFDFF +/* lower bit position of bitfield tx_tc_q_rand_map_en */ +#define HW_ATL2_TPB_TX_TC_Q_RAND_MAP_EN_SHIFT 9 +/* width of bitfield tx_tc_q_rand_map_en */ +#define HW_ATL2_TPB_TX_TC_Q_RAND_MAP_EN_WIDTH 1 +/* default value of bitfield tx_tc_q_rand_map_en */ +#define HW_ATL2_TPB_TX_TC_Q_RAND_MAP_EN_DEFAULT 0x0 + +/* tx tx_buffer_clk_gate_en bitfield definitions + * preprocessor definitions for the bitfield "tx_buffer_clk_gate_en". + * port="pif_tpb_tx_buffer_clk_gate_en_i" + */ + +/* register address for bitfield tx_buffer_clk_gate_en */ +#define HW_ATL2_TPB_TX_BUF_CLK_GATE_EN_ADR 0x00007900 +/* bitmask for bitfield tx_buffer_clk_gate_en */ +#define HW_ATL2_TPB_TX_BUF_CLK_GATE_EN_MSK 0x00000020 +/* inverted bitmask for bitfield tx_buffer_clk_gate_en */ +#define HW_ATL2_TPB_TX_BUF_CLK_GATE_EN_MSKN 0xffffffdf +/* lower bit position of bitfield tx_buffer_clk_gate_en */ +#define HW_ATL2_TPB_TX_BUF_CLK_GATE_EN_SHIFT 5 +/* width of bitfield tx_buffer_clk_gate_en */ +#define HW_ATL2_TPB_TX_BUF_CLK_GATE_EN_WIDTH 1 +/* default value of bitfield tx_buffer_clk_gate_en */ +#define HW_ATL2_TPB_TX_BUF_CLK_GATE_EN_DEFAULT 0x0 + +/* tx tx_q_tc_map{q} bitfield definitions + * preprocessor definitions for the bitfield "tx_q_tc_map{q}". + * parameter: queue {q} | bit-level stride | range [0, 31] + * port="pif_tpb_tx_q_tc_map0_i[2:0]" + */ + +/* register address for bitfield tx_q_tc_map{q} */ +#define HW_ATL2_TX_Q_TC_MAP_ADR(queue) \ + (((queue) < 32) ? 0x0000799C + ((queue) / 4) * 4 : 0) +/* lower bit position of bitfield tx_q_tc_map{q} */ +#define HW_ATL2_TX_Q_TC_MAP_SHIFT(queue) \ + (((queue) < 32) ? ((queue) * 8) % 32 : 0) +/* width of bitfield tx_q_tc_map{q} */ +#define HW_ATL2_TX_Q_TC_MAP_WIDTH 3 +/* default value of bitfield tx_q_tc_map{q} */ +#define HW_ATL2_TX_Q_TC_MAP_DEFAULT 0x0 + +/* tx data_tc_arb_mode bitfield definitions + * preprocessor definitions for the bitfield "data_tc_arb_mode". + * port="pif_tps_data_tc_arb_mode_i" + */ + +/* register address for bitfield data_tc_arb_mode */ +#define HW_ATL2_TPS_DATA_TC_ARB_MODE_ADR 0x00007100 +/* bitmask for bitfield data_tc_arb_mode */ +#define HW_ATL2_TPS_DATA_TC_ARB_MODE_MSK 0x00000003 +/* inverted bitmask for bitfield data_tc_arb_mode */ +#define HW_ATL2_TPS_DATA_TC_ARB_MODE_MSKN 0xfffffffc +/* lower bit position of bitfield data_tc_arb_mode */ +#define HW_ATL2_TPS_DATA_TC_ARB_MODE_SHIFT 0 +/* width of bitfield data_tc_arb_mode */ +#define HW_ATL2_TPS_DATA_TC_ARB_MODE_WIDTH 2 +/* default value of bitfield data_tc_arb_mode */ +#define HW_ATL2_TPS_DATA_TC_ARB_MODE_DEFAULT 0x0 + +/* tx data_tc{t}_credit_max[f:0] bitfield definitions + * preprocessor definitions for the bitfield "data_tc{t}_credit_max[f:0]". + * parameter: tc {t} | stride size 0x4 | range [0, 7] + * port="pif_tps_data_tc0_credit_max_i[15:0]" + */ + +/* register address for bitfield data_tc{t}_credit_max[f:0] */ +#define HW_ATL2_TPS_DATA_TCTCREDIT_MAX_ADR(tc) (0x00007110 + (tc) * 0x4) +/* bitmask for bitfield data_tc{t}_credit_max[f:0] */ +#define HW_ATL2_TPS_DATA_TCTCREDIT_MAX_MSK 0xffff0000 +/* inverted bitmask for bitfield data_tc{t}_credit_max[f:0] */ +#define HW_ATL2_TPS_DATA_TCTCREDIT_MAX_MSKN 0x0000ffff +/* lower bit position of bitfield data_tc{t}_credit_max[f:0] */ +#define HW_ATL2_TPS_DATA_TCTCREDIT_MAX_SHIFT 16 +/* width of bitfield data_tc{t}_credit_max[f:0] */ +#define HW_ATL2_TPS_DATA_TCTCREDIT_MAX_WIDTH 16 +/* default value of bitfield data_tc{t}_credit_max[f:0] */ +#define HW_ATL2_TPS_DATA_TCTCREDIT_MAX_DEFAULT 0x0 + +/* tx data_tc{t}_weight[e:0] bitfield definitions + * preprocessor definitions for the bitfield "data_tc{t}_weight[e:0]". + * parameter: tc {t} | stride size 0x4 | range [0, 7] + * port="pif_tps_data_tc0_weight_i[14:0]" + */ + +/* register address for bitfield data_tc{t}_weight[e:0] */ +#define HW_ATL2_TPS_DATA_TCTWEIGHT_ADR(tc) (0x00007110 + (tc) * 0x4) +/* bitmask for bitfield data_tc{t}_weight[e:0] */ +#define HW_ATL2_TPS_DATA_TCTWEIGHT_MSK 0x00007fff +/* inverted bitmask for bitfield data_tc{t}_weight[e:0] */ +#define HW_ATL2_TPS_DATA_TCTWEIGHT_MSKN 0xffff8000 +/* lower bit position of bitfield data_tc{t}_weight[e:0] */ +#define HW_ATL2_TPS_DATA_TCTWEIGHT_SHIFT 0 +/* width of bitfield data_tc{t}_weight[e:0] */ +#define HW_ATL2_TPS_DATA_TCTWEIGHT_WIDTH 15 +/* default value of bitfield data_tc{t}_weight[e:0] */ +#define HW_ATL2_TPS_DATA_TCTWEIGHT_DEFAULT 0x0 + +/* tx interrupt moderation control register definitions + * Preprocessor definitions for TX Interrupt Moderation Control Register + * Base Address: 0x00007c28 + * Parameter: queue {Q} | stride size 0x4 | range [0, 31] + */ + +#define HW_ATL2_TX_INTR_MODERATION_CTL_ADR(queue) (0x00007c28u + (queue) * 0x40) + +/* Launch time control register */ +#define HW_ATL2_LT_CTRL_ADR 0x00007a1c + +#define HW_ATL2_LT_CTRL_AVB_LEN_CMP_TRSHLD_MSK 0xFFFF0000 +#define HW_ATL2_LT_CTRL_AVB_LEN_CMP_TRSHLD_SHIFT 16 + +#define HW_ATL2_LT_CTRL_CLK_RATIO_MSK 0x0000FF00 +#define HW_ATL2_LT_CTRL_CLK_RATIO_SHIFT 8 +#define HW_ATL2_LT_CTRL_CLK_RATIO_QUATER_SPEED 4 +#define HW_ATL2_LT_CTRL_CLK_RATIO_HALF_SPEED 2 +#define HW_ATL2_LT_CTRL_CLK_RATIO_FULL_SPEED 1 + +#define HW_ATL2_LT_CTRL_25G_MODE_SUPPORT_MSK 0x00000008 +#define HW_ATL2_LT_CTRL_25G_MODE_SUPPORT_SHIFT 3 + +#define HW_ATL2_LT_CTRL_LINK_SPEED_MSK 0x00000007 +#define HW_ATL2_LT_CTRL_LINK_SPEED_SHIFT 0 + +/* FPGA VER register */ +#define HW_ATL2_FPGA_VER_ADR 0x000000f4 +#define HW_ATL2_FPGA_VER_U32(mj, mi, bl, rv) \ + ((((mj) & 0xff) << 24) | \ + (((mi) & 0xff) << 16) | \ + (((bl) & 0xff) << 8) | \ + (((rv) & 0xff) << 0)) + +/* ahb_mem_addr{f}[31:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "ahb_mem_addr{f}[31:0]". + * Parameter: filter {f} | stride size 0x10 | range [0, 127] + * PORT="ahb_mem_addr{f}[31:0]" + */ + +/* Register address for bitfield ahb_mem_addr{f}[31:0] */ +#define HW_ATL2_RPF_ACT_RSLVR_REQ_TAG_ADR(filter) \ + (0x00014000u + (filter) * 0x10) +/* Bitmask for bitfield ahb_mem_addr{f}[31:0] */ +#define HW_ATL2_RPF_ACT_RSLVR_REQ_TAG_MSK 0xFFFFFFFFu +/* Inverted bitmask for bitfield ahb_mem_addr{f}[31:0] */ +#define HW_ATL2_RPF_ACT_RSLVR_REQ_TAG_MSKN 0x00000000u +/* Lower bit position of bitfield ahb_mem_addr{f}[31:0] */ +#define HW_ATL2_RPF_ACT_RSLVR_REQ_TAG_SHIFT 0 +/* Width of bitfield ahb_mem_addr{f}[31:0] */ +#define HW_ATL2_RPF_ACT_RSLVR_REQ_TAG_WIDTH 31 +/* Default value of bitfield ahb_mem_addr{f}[31:0] */ +#define HW_ATL2_RPF_ACT_RSLVR_REQ_TAG_DEFAULT 0x0 + +/* Register address for bitfield ahb_mem_addr{f}[31:0] */ +#define HW_ATL2_RPF_ACT_RSLVR_TAG_MASK_ADR(filter) \ + (0x00014004u + (filter) * 0x10) +/* Bitmask for bitfield ahb_mem_addr{f}[31:0] */ +#define HW_ATL2_RPF_ACT_RSLVR_TAG_MASK_MSK 0xFFFFFFFFu +/* Inverted bitmask for bitfield ahb_mem_addr{f}[31:0] */ +#define HW_ATL2_RPF_ACT_RSLVR_TAG_MASK_MSKN 0x00000000u +/* Lower bit position of bitfield ahb_mem_addr{f}[31:0] */ +#define HW_ATL2_RPF_ACT_RSLVR_TAG_MASK_SHIFT 0 +/* Width of bitfield ahb_mem_addr{f}[31:0] */ +#define HW_ATL2_RPF_ACT_RSLVR_TAG_MASK_WIDTH 31 +/* Default value of bitfield ahb_mem_addr{f}[31:0] */ +#define HW_ATL2_RPF_ACT_RSLVR_TAG_MASK_DEFAULT 0x0 + +/* Register address for bitfield ahb_mem_addr{f}[31:0] */ +#define HW_ATL2_RPF_ACT_RSLVR_ACTN_ADR(filter) \ + (0x00014008u + (filter) * 0x10) +/* Bitmask for bitfield ahb_mem_addr{f}[31:0] */ +#define HW_ATL2_RPF_ACT_RSLVR_ACTN_MSK 0x000007FFu +/* Inverted bitmask for bitfield ahb_mem_addr{f}[31:0] */ +#define HW_ATL2_RPF_ACT_RSLVR_ACTN_MSKN 0xFFFFF800u +/* Lower bit position of bitfield ahb_mem_addr{f}[31:0] */ +#define HW_ATL2_RPF_ACT_RSLVR_ACTN_SHIFT 0 +/* Width of bitfield ahb_mem_addr{f}[31:0] */ +#define HW_ATL2_RPF_ACT_RSLVR_ACTN_WIDTH 10 +/* Default value of bitfield ahb_mem_addr{f}[31:0] */ +#define HW_ATL2_RPF_ACT_RSLVR_ACTN_DEFAULT 0x0 + +/* rpf_rec_tab_en[15:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "rpf_rec_tab_en[15:0]". + * PORT="pif_rpf_rec_tab_en[15:0]" + */ +/* Register address for bitfield rpf_rec_tab_en[15:0] */ +#define HW_ATL2_RPF_REC_TAB_EN_ADR 0x00006ff0u +/* Bitmask for bitfield rpf_rec_tab_en[15:0] */ +#define HW_ATL2_RPF_REC_TAB_EN_MSK 0x0000FFFFu +/* Inverted bitmask for bitfield rpf_rec_tab_en[15:0] */ +#define HW_ATL2_RPF_REC_TAB_EN_MSKN 0xFFFF0000u +/* Lower bit position of bitfield rpf_rec_tab_en[15:0] */ +#define HW_ATL2_RPF_REC_TAB_EN_SHIFT 0 +/* Width of bitfield rpf_rec_tab_en[15:0] */ +#define HW_ATL2_RPF_REC_TAB_EN_WIDTH 16 +/* Default value of bitfield rpf_rec_tab_en[15:0] */ +#define HW_ATL2_RPF_REC_TAB_EN_DEFAULT 0x0 + +/* Register address for firmware shared input buffer */ +#define HW_ATL2_MIF_SHARED_BUFFER_IN_ADR(dword) (0x00012000U + (dword) * 0x4U) +/* Register address for firmware shared output buffer */ +#define HW_ATL2_MIF_SHARED_BUFFER_OUT_ADR(dword) (0x00013000U + (dword) * 0x4U) + +/* pif_host_finished_buf_wr_i Bitfield Definitions + * Preprocessor definitions for the bitfield "pif_host_finished_buf_wr_i". + * PORT="pif_host_finished_buf_wr_i" + */ +/* Register address for bitfield rpif_host_finished_buf_wr_i */ +#define HW_ATL2_MIF_HOST_FINISHED_WRITE_ADR 0x00000e00u +/* Bitmask for bitfield pif_host_finished_buf_wr_i */ +#define HW_ATL2_MIF_HOST_FINISHED_WRITE_MSK 0x00000001u +/* Inverted bitmask for bitfield pif_host_finished_buf_wr_i */ +#define HW_ATL2_MIF_HOST_FINISHED_WRITE_MSKN 0xFFFFFFFEu +/* Lower bit position of bitfield pif_host_finished_buf_wr_i */ +#define HW_ATL2_MIF_HOST_FINISHED_WRITE_SHIFT 0 +/* Width of bitfield pif_host_finished_buf_wr_i */ +#define HW_ATL2_MIF_HOST_FINISHED_WRITE_WIDTH 1 +/* Default value of bitfield pif_host_finished_buf_wr_i */ +#define HW_ATL2_MIF_HOST_FINISHED_WRITE_DEFAULT 0x0 + +/* pif_mcp_finished_buf_rd_i Bitfield Definitions + * Preprocessor definitions for the bitfield "pif_mcp_finished_buf_rd_i". + * PORT="pif_mcp_finished_buf_rd_i" + */ +/* Register address for bitfield pif_mcp_finished_buf_rd_i */ +#define HW_ATL2_MIF_MCP_FINISHED_READ_ADR 0x00000e04u +/* Bitmask for bitfield pif_mcp_finished_buf_rd_i */ +#define HW_ATL2_MIF_MCP_FINISHED_READ_MSK 0x00000001u +/* Inverted bitmask for bitfield pif_mcp_finished_buf_rd_i */ +#define HW_ATL2_MIF_MCP_FINISHED_READ_MSKN 0xFFFFFFFEu +/* Lower bit position of bitfield pif_mcp_finished_buf_rd_i */ +#define HW_ATL2_MIF_MCP_FINISHED_READ_SHIFT 0 +/* Width of bitfield pif_mcp_finished_buf_rd_i */ +#define HW_ATL2_MIF_MCP_FINISHED_READ_WIDTH 1 +/* Default value of bitfield pif_mcp_finished_buf_rd_i */ +#define HW_ATL2_MIF_MCP_FINISHED_READ_DEFAULT 0x0 + +/* Register address for bitfield pif_mcp_boot_reg */ +#define HW_ATL2_MIF_BOOT_REG_ADR 0x00003040u + +#define HW_ATL2_MCP_HOST_REQ_INT_READY BIT(0) + +#define HW_ATL2_MCP_HOST_REQ_INT_ADR 0x00000F00u +#define HW_ATL2_MCP_HOST_REQ_INT_SET_ADR 0x00000F04u +#define HW_ATL2_MCP_HOST_REQ_INT_CLR_ADR 0x00000F08u + +#endif /* HW_ATL2_LLH_INTERNAL_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.c new file mode 100644 index 000000000..0fe6257d9 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.c @@ -0,0 +1,130 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Atlantic Network Driver + * Copyright (C) 2020 Marvell International Ltd. + */ + +#include <linux/iopoll.h> + +#include "aq_hw_utils.h" +#include "hw_atl/hw_atl_utils.h" +#include "hw_atl2_utils.h" +#include "hw_atl2_llh.h" +#include "hw_atl2_llh_internal.h" + +#define HW_ATL2_FW_VER_1X 0x01000000U + +#define AQ_A2_BOOT_STARTED BIT(0x18) +#define AQ_A2_CRASH_INIT BIT(0x1B) +#define AQ_A2_BOOT_CODE_FAILED BIT(0x1C) +#define AQ_A2_FW_INIT_FAILED BIT(0x1D) +#define AQ_A2_FW_INIT_COMP_SUCCESS BIT(0x1F) + +#define AQ_A2_FW_BOOT_FAILED_MASK (AQ_A2_CRASH_INIT | \ + AQ_A2_BOOT_CODE_FAILED | \ + AQ_A2_FW_INIT_FAILED) +#define AQ_A2_FW_BOOT_COMPLETE_MASK (AQ_A2_FW_BOOT_FAILED_MASK | \ + AQ_A2_FW_INIT_COMP_SUCCESS) + +#define AQ_A2_FW_BOOT_REQ_REBOOT BIT(0x0) +#define AQ_A2_FW_BOOT_REQ_HOST_BOOT BIT(0x8) +#define AQ_A2_FW_BOOT_REQ_MAC_FAST_BOOT BIT(0xA) +#define AQ_A2_FW_BOOT_REQ_PHY_FAST_BOOT BIT(0xB) + +int hw_atl2_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops) +{ + int err; + + self->fw_ver_actual = hw_atl2_utils_get_fw_version(self); + + if (hw_atl_utils_ver_match(HW_ATL2_FW_VER_1X, self->fw_ver_actual)) { + *fw_ops = &aq_a2_fw_ops; + } else { + aq_pr_err("Bad FW version detected: %x, but continue\n", + self->fw_ver_actual); + *fw_ops = &aq_a2_fw_ops; + } + aq_pr_trace("Detect ATL2FW %x\n", self->fw_ver_actual); + self->aq_fw_ops = *fw_ops; + err = self->aq_fw_ops->init(self); + + self->chip_features |= ATL_HW_CHIP_ANTIGUA; + + return err; +} + +static bool hw_atl2_mcp_boot_complete(struct aq_hw_s *self) +{ + u32 rbl_status; + + rbl_status = hw_atl2_mif_mcp_boot_reg_get(self); + if (rbl_status & AQ_A2_FW_BOOT_COMPLETE_MASK) + return true; + + /* Host boot requested */ + if (hw_atl2_mif_host_req_int_get(self) & HW_ATL2_MCP_HOST_REQ_INT_READY) + return true; + + return false; +} + +int hw_atl2_utils_soft_reset(struct aq_hw_s *self) +{ + bool rbl_complete = false; + u32 rbl_status = 0; + u32 rbl_request; + int err; + + hw_atl2_mif_host_req_int_clr(self, 0x01); + rbl_request = AQ_A2_FW_BOOT_REQ_REBOOT; +#ifdef AQ_CFG_FAST_START + rbl_request |= AQ_A2_FW_BOOT_REQ_MAC_FAST_BOOT; +#endif + hw_atl2_mif_mcp_boot_reg_set(self, rbl_request); + + /* Wait for RBL boot */ + err = readx_poll_timeout_atomic(hw_atl2_mif_mcp_boot_reg_get, self, + rbl_status, + ((rbl_status & AQ_A2_BOOT_STARTED) && + (rbl_status != 0xFFFFFFFFu)), + 10, 200000); + if (err) { + aq_pr_err("Boot code hanged"); + goto err_exit; + } + + err = readx_poll_timeout_atomic(hw_atl2_mcp_boot_complete, self, + rbl_complete, + rbl_complete, + 10, 2000000); + + if (err) { + aq_pr_err("FW Restart timed out"); + goto err_exit; + } + + rbl_status = hw_atl2_mif_mcp_boot_reg_get(self); + + if (rbl_status & AQ_A2_FW_BOOT_FAILED_MASK) { + err = -EIO; + aq_pr_err("FW Restart failed"); + goto err_exit; + } + + if (hw_atl2_mif_host_req_int_get(self) & + HW_ATL2_MCP_HOST_REQ_INT_READY) { + err = -EIO; + aq_pr_err("No FW detected. Dynamic FW load not implemented"); + goto err_exit; + } + + if (self->aq_fw_ops) { + err = self->aq_fw_ops->init(self); + if (err) { + aq_pr_err("FW Init failed"); + goto err_exit; + } + } + +err_exit: + return err; +} diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.h new file mode 100644 index 000000000..6bad64c77 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils.h @@ -0,0 +1,636 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Atlantic Network Driver + * Copyright (C) 2020 Marvell International Ltd. + */ + +#ifndef HW_ATL2_UTILS_H +#define HW_ATL2_UTILS_H + +#include "aq_hw.h" + +/* F W A P I */ + +struct link_options_s { + u8 link_up:1; + u8 link_renegotiate:1; + u8 minimal_link_speed:1; + u8 internal_loopback:1; + u8 external_loopback:1; + u8 rate_10M_hd:1; + u8 rate_100M_hd:1; + u8 rate_1G_hd:1; + + u8 rate_10M:1; + u8 rate_100M:1; + u8 rate_1G:1; + u8 rate_2P5G:1; + u8 rate_N2P5G:1; + u8 rate_5G:1; + u8 rate_N5G:1; + u8 rate_10G:1; + + u8 eee_100M:1; + u8 eee_1G:1; + u8 eee_2P5G:1; + u8 eee_5G:1; + u8 eee_10G:1; + u8 rsvd3:3; + + u8 pause_rx:1; + u8 pause_tx:1; + u8 rsvd4:1; + u8 downshift:1; + u8 downshift_retry:4; +}; + +struct link_control_s { + u8 mode:4; + u8 disable_crc_corruption:1; + u8 discard_short_frames:1; + u8 flow_control_mode:1; + u8 disable_length_check:1; + + u8 discard_errored_frames:1; + u8 control_frame_enable:1; + u8 enable_tx_padding:1; + u8 enable_crc_forwarding:1; + u8 enable_frame_padding_removal_rx: 1; + u8 promiscuous_mode: 1; + u8 rsvd:2; + + u16 rsvd2; +}; + +struct thermal_shutdown_s { + u8 enable:1; + u8 warning_enable:1; + u8 rsvd:6; + + u8 shutdown_temperature; + u8 cold_temperature; + u8 warning_temperature; +}; + +struct mac_address_s { + u8 mac_address[6]; +}; + +struct mac_address_aligned_s { + struct mac_address_s aligned; + u16 rsvd; +}; + +struct sleep_proxy_s { + struct wake_on_lan_s { + u8 wake_on_magic_packet:1; + u8 wake_on_pattern:1; + u8 wake_on_link_up:1; + u8 wake_on_link_down:1; + u8 wake_on_ping:1; + u8 wake_on_timer:1; + u8 rsvd:2; + + u8 rsvd2; + u16 rsvd3; + + u32 link_up_timeout; + u32 link_down_timeout; + u32 timer; + } wake_on_lan; + + struct { + u32 mask[4]; + u32 crc32; + } wake_up_pattern[8]; + + struct __packed { + u8 arp_responder:1; + u8 echo_responder:1; + u8 igmp_client:1; + u8 echo_truncate:1; + u8 address_guard:1; + u8 ignore_fragmented:1; + u8 rsvd:2; + + u16 echo_max_len; + u8 rsvd2; + } ipv4_offload; + + u32 ipv4_offload_addr[8]; + u32 reserved[8]; + + struct __packed { + u8 ns_responder:1; + u8 echo_responder:1; + u8 mld_client:1; + u8 echo_truncate:1; + u8 address_guard:1; + u8 rsvd:3; + + u16 echo_max_len; + u8 rsvd2; + } ipv6_offload; + + u32 ipv6_offload_addr[16][4]; + + struct { + u16 port[16]; + } tcp_port_offload; + + struct { + u16 port[16]; + } udp_port_offload; + + struct { + u32 retry_count; + u32 retry_interval; + } ka4_offload; + + struct { + u32 timeout; + u16 local_port; + u16 remote_port; + u8 remote_mac_addr[6]; + u16 rsvd; + u32 rsvd2; + u32 rsvd3; + u16 rsvd4; + u16 win_size; + u32 seq_num; + u32 ack_num; + u32 local_ip; + u32 remote_ip; + } ka4_connection[16]; + + struct { + u32 retry_count; + u32 retry_interval; + } ka6_offload; + + struct { + u32 timeout; + u16 local_port; + u16 remote_port; + u8 remote_mac_addr[6]; + u16 rsvd; + u32 rsvd2; + u32 rsvd3; + u16 rsvd4; + u16 win_size; + u32 seq_num; + u32 ack_num; + u32 local_ip[4]; + u32 remote_ip[4]; + } ka6_connection[16]; + + struct { + u32 rr_count; + u32 rr_buf_len; + u32 idx_offset; + u32 rr__offset; + } mdns_offload; +}; + +struct pause_quanta_s { + u16 quanta_10M; + u16 threshold_10M; + u16 quanta_100M; + u16 threshold_100M; + u16 quanta_1G; + u16 threshold_1G; + u16 quanta_2P5G; + u16 threshold_2P5G; + u16 quanta_5G; + u16 threshold_5G; + u16 quanta_10G; + u16 threshold_10G; +}; + +struct data_buffer_status_s { + u32 data_offset; + u32 data_length; +}; + +struct device_caps_s { + u8 finite_flashless:1; + u8 cable_diag:1; + u8 ncsi:1; + u8 avb:1; + u8 rsvd:4; + + u8 rsvd2; + u16 rsvd3; + u32 rsvd4; +}; + +struct version_s { + struct bundle_version_t { + u8 major; + u8 minor; + u16 build; + } bundle; + struct mac_version_t { + u8 major; + u8 minor; + u16 build; + } mac; + struct phy_version_t { + u8 major; + u8 minor; + u16 build; + } phy; + u32 drv_iface_ver:4; + u32 rsvd:28; +}; + +struct link_status_s { + u8 link_state:4; + u8 link_rate:4; + + u8 pause_tx:1; + u8 pause_rx:1; + u8 eee:1; + u8 duplex:1; + u8 rsvd:4; + + u16 rsvd2; +}; + +struct wol_status_s { + u8 wake_count; + u8 wake_reason; + + u16 wake_up_packet_length :12; + u16 wake_up_pattern_number :3; + u16 rsvd:1; + + u32 wake_up_packet[379]; +}; + +struct mac_health_monitor_s { + u8 mac_ready:1; + u8 mac_fault:1; + u8 mac_flashless_finished:1; + u8 rsvd:5; + + u8 mac_temperature; + u16 mac_heart_beat; + u16 mac_fault_code; + u16 rsvd2; +}; + +struct phy_health_monitor_s { + u8 phy_ready:1; + u8 phy_fault:1; + u8 phy_hot_warning:1; + u8 rsvd:5; + + u8 phy_temperature; + u16 phy_heart_beat; + u16 phy_fault_code; + u16 rsvd2; +}; + +struct device_link_caps_s { + u8 rsvd:3; + u8 internal_loopback:1; + u8 external_loopback:1; + u8 rate_10M_hd:1; + u8 rate_100M_hd:1; + u8 rate_1G_hd:1; + + u8 rate_10M:1; + u8 rate_100M:1; + u8 rate_1G:1; + u8 rate_2P5G:1; + u8 rate_N2P5G:1; + u8 rate_5G:1; + u8 rate_N5G:1; + u8 rate_10G:1; + + u8 rsvd3:1; + u8 eee_100M:1; + u8 eee_1G:1; + u8 eee_2P5G:1; + u8 rsvd4:1; + u8 eee_5G:1; + u8 rsvd5:1; + u8 eee_10G:1; + + u8 pause_rx:1; + u8 pause_tx:1; + u8 pfc:1; + u8 downshift:1; + u8 downshift_retry:4; +}; + +struct sleep_proxy_caps_s { + u8 ipv4_offload:1; + u8 ipv6_offload:1; + u8 tcp_port_offload:1; + u8 udp_port_offload:1; + u8 ka4_offload:1; + u8 ka6_offload:1; + u8 mdns_offload:1; + u8 wake_on_ping:1; + + u8 wake_on_magic_packet:1; + u8 wake_on_pattern:1; + u8 wake_on_timer:1; + u8 wake_on_link:1; + u8 wake_patterns_count:4; + + u8 ipv4_count; + u8 ipv6_count; + + u8 tcp_port_offload_count; + u8 udp_port_offload_count; + + u8 tcp4_ka_count; + u8 tcp6_ka_count; + + u8 igmp_offload:1; + u8 mld_offload:1; + u8 rsvd:6; + + u8 rsvd2; + u16 rsvd3; +}; + +struct lkp_link_caps_s { + u8 rsvd:5; + u8 rate_10M_hd:1; + u8 rate_100M_hd:1; + u8 rate_1G_hd:1; + + u8 rate_10M:1; + u8 rate_100M:1; + u8 rate_1G:1; + u8 rate_2P5G:1; + u8 rate_N2P5G:1; + u8 rate_5G:1; + u8 rate_N5G:1; + u8 rate_10G:1; + + u8 rsvd2:1; + u8 eee_100M:1; + u8 eee_1G:1; + u8 eee_2P5G:1; + u8 rsvd3:1; + u8 eee_5G:1; + u8 rsvd4:1; + u8 eee_10G:1; + + u8 pause_rx:1; + u8 pause_tx:1; + u8 rsvd5:6; +}; + +struct core_dump_s { + u32 reg0; + u32 reg1; + u32 reg2; + + u32 hi; + u32 lo; + + u32 regs[32]; +}; + +struct trace_s { + u32 sync_counter; + u32 mem_buffer[0x1ff]; +}; + +struct cable_diag_control_s { + u8 toggle :1; + u8 rsvd:7; + + u8 wait_timeout_sec; + u16 rsvd2; +}; + +struct cable_diag_lane_data_s { + u8 result_code; + u8 dist; + u8 far_dist; + u8 rsvd; +}; + +struct cable_diag_status_s { + struct cable_diag_lane_data_s lane_data[4]; + u8 transact_id; + u8 status:4; + u8 rsvd:4; + u16 rsvd2; +}; + +struct statistics_a0_s { + struct { + u32 link_up; + u32 link_down; + } link; + + struct { + u64 tx_unicast_octets; + u64 tx_multicast_octets; + u64 tx_broadcast_octets; + u64 rx_unicast_octets; + u64 rx_multicast_octets; + u64 rx_broadcast_octets; + + u32 tx_unicast_frames; + u32 tx_multicast_frames; + u32 tx_broadcast_frames; + u32 tx_errors; + + u32 rx_unicast_frames; + u32 rx_multicast_frames; + u32 rx_broadcast_frames; + u32 rx_dropped_frames; + u32 rx_error_frames; + + u32 tx_good_frames; + u32 rx_good_frames; + u32 reserve_fw_gap; + } msm; + u32 main_loop_cycles; + u32 reserve_fw_gap; +}; + +struct __packed statistics_b0_s { + u64 rx_good_octets; + u64 rx_pause_frames; + u64 rx_good_frames; + u64 rx_errors; + u64 rx_unicast_frames; + u64 rx_multicast_frames; + u64 rx_broadcast_frames; + + u64 tx_good_octets; + u64 tx_pause_frames; + u64 tx_good_frames; + u64 tx_errors; + u64 tx_unicast_frames; + u64 tx_multicast_frames; + u64 tx_broadcast_frames; + + u32 main_loop_cycles; +}; + +struct __packed statistics_s { + union __packed { + struct statistics_a0_s a0; + struct statistics_b0_s b0; + }; +}; + +struct filter_caps_s { + u8 l2_filters_base_index:6; + u8 flexible_filter_mask:2; + u8 l2_filter_count; + u8 ethertype_filter_base_index; + u8 ethertype_filter_count; + + u8 vlan_filter_base_index; + u8 vlan_filter_count; + u8 l3_ip4_filter_base_index:4; + u8 l3_ip4_filter_count:4; + u8 l3_ip6_filter_base_index:4; + u8 l3_ip6_filter_count:4; + + u8 l4_filter_base_index:4; + u8 l4_filter_count:4; + u8 l4_flex_filter_base_index:4; + u8 l4_flex_filter_count:4; + u8 rslv_tbl_base_index; + u8 rslv_tbl_count; +}; + +struct request_policy_s { + struct { + u8 all:1; + u8 mcast:1; + u8 rx_queue_tc_index:5; + u8 queue_or_tc:1; + } promisc; + + struct { + u8 accept:1; + u8 rsvd:1; + u8 rx_queue_tc_index:5; + u8 queue_or_tc:1; + } bcast; + + struct { + u8 accept:1; + u8 rsvd:1; + u8 rx_queue_tc_index:5; + u8 queue_or_tc:1; + } mcast; + + u8 rsvd:8; +}; + +struct fw_interface_in { + u32 mtu; + u32 rsvd1; + struct mac_address_aligned_s mac_address; + struct link_control_s link_control; + u32 rsvd2; + struct link_options_s link_options; + u32 rsvd3; + struct thermal_shutdown_s thermal_shutdown; + u32 rsvd4; + struct sleep_proxy_s sleep_proxy; + u32 rsvd5; + struct pause_quanta_s pause_quanta[8]; + struct cable_diag_control_s cable_diag_control; + u32 rsvd6; + struct data_buffer_status_s data_buffer_status; + u32 rsvd7; + struct request_policy_s request_policy; +}; + +struct transaction_counter_s { + u16 transaction_cnt_a; + u16 transaction_cnt_b; +}; + +struct management_status_s { + struct mac_address_s mac_address; + u16 vlan; + + struct{ + u32 enable : 1; + u32 rsvd:31; + } flags; + + u32 rsvd1; + u32 rsvd2; + u32 rsvd3; + u32 rsvd4; + u32 rsvd5; +}; + +struct __packed fw_interface_out { + struct transaction_counter_s transaction_id; + struct version_s version; + struct link_status_s link_status; + struct wol_status_s wol_status; + u32 rsvd; + u32 rsvd2; + struct mac_health_monitor_s mac_health_monitor; + u32 rsvd3; + u32 rsvd4; + struct phy_health_monitor_s phy_health_monitor; + u32 rsvd5; + u32 rsvd6; + struct cable_diag_status_s cable_diag_status; + u32 rsvd7; + struct device_link_caps_s device_link_caps; + u32 rsvd8; + struct sleep_proxy_caps_s sleep_proxy_caps; + u32 rsvd9; + struct lkp_link_caps_s lkp_link_caps; + u32 rsvd10; + struct core_dump_s core_dump; + u32 rsvd11; + struct statistics_s stats; + struct filter_caps_s filter_caps; + struct device_caps_s device_caps; + u32 rsvd13; + struct management_status_s management_status; + u32 reserve[21]; + struct trace_s trace; +}; + +#define AQ_A2_FW_LINK_RATE_INVALID 0 +#define AQ_A2_FW_LINK_RATE_10M 1 +#define AQ_A2_FW_LINK_RATE_100M 2 +#define AQ_A2_FW_LINK_RATE_1G 3 +#define AQ_A2_FW_LINK_RATE_2G5 4 +#define AQ_A2_FW_LINK_RATE_5G 5 +#define AQ_A2_FW_LINK_RATE_10G 6 + +#define AQ_HOST_MODE_INVALID 0U +#define AQ_HOST_MODE_ACTIVE 1U +#define AQ_HOST_MODE_SLEEP_PROXY 2U +#define AQ_HOST_MODE_LOW_POWER 3U +#define AQ_HOST_MODE_SHUTDOWN 4U + +#define AQ_A2_FW_INTERFACE_A0 0 +#define AQ_A2_FW_INTERFACE_B0 1 + +int hw_atl2_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops); + +int hw_atl2_utils_soft_reset(struct aq_hw_s *self); + +u32 hw_atl2_utils_get_fw_version(struct aq_hw_s *self); + +int hw_atl2_utils_get_action_resolve_table_caps(struct aq_hw_s *self, + u8 *base_index, u8 *count); + +extern const struct aq_fw_ops aq_a2_fw_ops; + +#endif /* HW_ATL2_UTILS_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c new file mode 100644 index 000000000..58d426dda --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl2/hw_atl2_utils_fw.c @@ -0,0 +1,616 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* Atlantic Network Driver + * Copyright (C) 2020 Marvell International Ltd. + */ + +#include <linux/iopoll.h> + +#include "aq_hw.h" +#include "aq_hw_utils.h" +#include "aq_nic.h" +#include "hw_atl/hw_atl_llh.h" +#include "hw_atl2_utils.h" +#include "hw_atl2_llh.h" +#include "hw_atl2_internal.h" + +#define AQ_A2_FW_READ_TRY_MAX 1000 + +#define hw_atl2_shared_buffer_write(HW, ITEM, VARIABLE) \ +{\ + BUILD_BUG_ON_MSG((offsetof(struct fw_interface_in, ITEM) % \ + sizeof(u32)) != 0,\ + "Unaligned write " # ITEM);\ + BUILD_BUG_ON_MSG((sizeof(VARIABLE) % sizeof(u32)) != 0,\ + "Unaligned write length " # ITEM);\ + hw_atl2_mif_shared_buf_write(HW,\ + (offsetof(struct fw_interface_in, ITEM) / sizeof(u32)),\ + (u32 *)&(VARIABLE), sizeof(VARIABLE) / sizeof(u32));\ +} + +#define hw_atl2_shared_buffer_get(HW, ITEM, VARIABLE) \ +{\ + BUILD_BUG_ON_MSG((offsetof(struct fw_interface_in, ITEM) % \ + sizeof(u32)) != 0,\ + "Unaligned get " # ITEM);\ + BUILD_BUG_ON_MSG((sizeof(VARIABLE) % sizeof(u32)) != 0,\ + "Unaligned get length " # ITEM);\ + hw_atl2_mif_shared_buf_get(HW, \ + (offsetof(struct fw_interface_in, ITEM) / sizeof(u32)),\ + (u32 *)&(VARIABLE), \ + sizeof(VARIABLE) / sizeof(u32));\ +} + +/* This should never be used on non atomic fields, + * treat any > u32 read as non atomic. + */ +#define hw_atl2_shared_buffer_read(HW, ITEM, VARIABLE) \ +{\ + BUILD_BUG_ON_MSG((offsetof(struct fw_interface_out, ITEM) % \ + sizeof(u32)) != 0,\ + "Unaligned read " # ITEM);\ + BUILD_BUG_ON_MSG((sizeof(VARIABLE) % sizeof(u32)) != 0,\ + "Unaligned read length " # ITEM);\ + BUILD_BUG_ON_MSG(sizeof(VARIABLE) > sizeof(u32),\ + "Non atomic read " # ITEM);\ + hw_atl2_mif_shared_buf_read(HW, \ + (offsetof(struct fw_interface_out, ITEM) / sizeof(u32)),\ + (u32 *)&(VARIABLE), sizeof(VARIABLE) / sizeof(u32));\ +} + +#define hw_atl2_shared_buffer_read_safe(HW, ITEM, DATA) \ +({\ + BUILD_BUG_ON_MSG((offsetof(struct fw_interface_out, ITEM) % \ + sizeof(u32)) != 0,\ + "Unaligned read_safe " # ITEM);\ + BUILD_BUG_ON_MSG((sizeof(((struct fw_interface_out *)0)->ITEM) % \ + sizeof(u32)) != 0,\ + "Unaligned read_safe length " # ITEM);\ + hw_atl2_shared_buffer_read_block((HW), \ + (offsetof(struct fw_interface_out, ITEM) / sizeof(u32)),\ + sizeof(((struct fw_interface_out *)0)->ITEM) / sizeof(u32),\ + (DATA));\ +}) + +static int hw_atl2_shared_buffer_read_block(struct aq_hw_s *self, + u32 offset, u32 dwords, void *data) +{ + struct transaction_counter_s tid1, tid2; + int cnt = 0; + + do { + do { + hw_atl2_shared_buffer_read(self, transaction_id, tid1); + cnt++; + if (cnt > AQ_A2_FW_READ_TRY_MAX) + return -ETIME; + if (tid1.transaction_cnt_a != tid1.transaction_cnt_b) + mdelay(1); + } while (tid1.transaction_cnt_a != tid1.transaction_cnt_b); + + hw_atl2_mif_shared_buf_read(self, offset, (u32 *)data, dwords); + + hw_atl2_shared_buffer_read(self, transaction_id, tid2); + + cnt++; + if (cnt > AQ_A2_FW_READ_TRY_MAX) + return -ETIME; + } while (tid2.transaction_cnt_a != tid2.transaction_cnt_b || + tid1.transaction_cnt_a != tid2.transaction_cnt_a); + + return 0; +} + +static inline int hw_atl2_shared_buffer_finish_ack(struct aq_hw_s *self) +{ + u32 val; + int err; + + hw_atl2_mif_host_finished_write_set(self, 1U); + err = readx_poll_timeout_atomic(hw_atl2_mif_mcp_finished_read_get, + self, val, val == 0U, + 100, 100000U); + WARN(err, "hw_atl2_shared_buffer_finish_ack"); + + return err; +} + +static int aq_a2_fw_init(struct aq_hw_s *self) +{ + struct link_control_s link_control; + u32 mtu; + u32 val; + int err; + + hw_atl2_shared_buffer_get(self, link_control, link_control); + link_control.mode = AQ_HOST_MODE_ACTIVE; + hw_atl2_shared_buffer_write(self, link_control, link_control); + + hw_atl2_shared_buffer_get(self, mtu, mtu); + mtu = HW_ATL2_MTU_JUMBO; + hw_atl2_shared_buffer_write(self, mtu, mtu); + + hw_atl2_mif_host_finished_write_set(self, 1U); + err = readx_poll_timeout_atomic(hw_atl2_mif_mcp_finished_read_get, + self, val, val == 0U, + 100, 5000000U); + WARN(err, "hw_atl2_shared_buffer_finish_ack"); + + return err; +} + +static int aq_a2_fw_deinit(struct aq_hw_s *self) +{ + struct link_control_s link_control; + + hw_atl2_shared_buffer_get(self, link_control, link_control); + link_control.mode = AQ_HOST_MODE_SHUTDOWN; + hw_atl2_shared_buffer_write(self, link_control, link_control); + + return hw_atl2_shared_buffer_finish_ack(self); +} + +static void a2_link_speed_mask2fw(u32 speed, + struct link_options_s *link_options) +{ + link_options->rate_10G = !!(speed & AQ_NIC_RATE_10G); + link_options->rate_5G = !!(speed & AQ_NIC_RATE_5G); + link_options->rate_N5G = link_options->rate_5G; + link_options->rate_2P5G = !!(speed & AQ_NIC_RATE_2G5); + link_options->rate_N2P5G = link_options->rate_2P5G; + link_options->rate_1G = !!(speed & AQ_NIC_RATE_1G); + link_options->rate_100M = !!(speed & AQ_NIC_RATE_100M); + link_options->rate_10M = !!(speed & AQ_NIC_RATE_10M); + + link_options->rate_1G_hd = !!(speed & AQ_NIC_RATE_1G_HALF); + link_options->rate_100M_hd = !!(speed & AQ_NIC_RATE_100M_HALF); + link_options->rate_10M_hd = !!(speed & AQ_NIC_RATE_10M_HALF); +} + +static u32 a2_fw_dev_to_eee_mask(struct device_link_caps_s *device_link_caps) +{ + u32 rate = 0; + + if (device_link_caps->eee_10G) + rate |= AQ_NIC_RATE_EEE_10G; + if (device_link_caps->eee_5G) + rate |= AQ_NIC_RATE_EEE_5G; + if (device_link_caps->eee_2P5G) + rate |= AQ_NIC_RATE_EEE_2G5; + if (device_link_caps->eee_1G) + rate |= AQ_NIC_RATE_EEE_1G; + if (device_link_caps->eee_100M) + rate |= AQ_NIC_RATE_EEE_100M; + + return rate; +} + +static u32 a2_fw_lkp_to_mask(struct lkp_link_caps_s *lkp_link_caps) +{ + u32 rate = 0; + + if (lkp_link_caps->rate_10G) + rate |= AQ_NIC_RATE_10G; + if (lkp_link_caps->rate_5G) + rate |= AQ_NIC_RATE_5G; + if (lkp_link_caps->rate_2P5G) + rate |= AQ_NIC_RATE_2G5; + if (lkp_link_caps->rate_1G) + rate |= AQ_NIC_RATE_1G; + if (lkp_link_caps->rate_1G_hd) + rate |= AQ_NIC_RATE_1G_HALF; + if (lkp_link_caps->rate_100M) + rate |= AQ_NIC_RATE_100M; + if (lkp_link_caps->rate_100M_hd) + rate |= AQ_NIC_RATE_100M_HALF; + if (lkp_link_caps->rate_10M) + rate |= AQ_NIC_RATE_10M; + if (lkp_link_caps->rate_10M_hd) + rate |= AQ_NIC_RATE_10M_HALF; + + if (lkp_link_caps->eee_10G) + rate |= AQ_NIC_RATE_EEE_10G; + if (lkp_link_caps->eee_5G) + rate |= AQ_NIC_RATE_EEE_5G; + if (lkp_link_caps->eee_2P5G) + rate |= AQ_NIC_RATE_EEE_2G5; + if (lkp_link_caps->eee_1G) + rate |= AQ_NIC_RATE_EEE_1G; + if (lkp_link_caps->eee_100M) + rate |= AQ_NIC_RATE_EEE_100M; + + return rate; +} + +static int aq_a2_fw_set_link_speed(struct aq_hw_s *self, u32 speed) +{ + struct link_options_s link_options; + + hw_atl2_shared_buffer_get(self, link_options, link_options); + link_options.link_up = 1U; + a2_link_speed_mask2fw(speed, &link_options); + hw_atl2_shared_buffer_write(self, link_options, link_options); + + return hw_atl2_shared_buffer_finish_ack(self); +} + +static void aq_a2_fw_set_mpi_flow_control(struct aq_hw_s *self, + struct link_options_s *link_options) +{ + u32 flow_control = self->aq_nic_cfg->fc.req; + + link_options->pause_rx = !!(flow_control & AQ_NIC_FC_RX); + link_options->pause_tx = !!(flow_control & AQ_NIC_FC_TX); +} + +static void aq_a2_fw_upd_eee_rate_bits(struct aq_hw_s *self, + struct link_options_s *link_options, + u32 eee_speeds) +{ + link_options->eee_10G = !!(eee_speeds & AQ_NIC_RATE_EEE_10G); + link_options->eee_5G = !!(eee_speeds & AQ_NIC_RATE_EEE_5G); + link_options->eee_2P5G = !!(eee_speeds & AQ_NIC_RATE_EEE_2G5); + link_options->eee_1G = !!(eee_speeds & AQ_NIC_RATE_EEE_1G); + link_options->eee_100M = !!(eee_speeds & AQ_NIC_RATE_EEE_100M); +} + +static int aq_a2_fw_set_state(struct aq_hw_s *self, + enum hal_atl_utils_fw_state_e state) +{ + struct link_options_s link_options; + + hw_atl2_shared_buffer_get(self, link_options, link_options); + + switch (state) { + case MPI_INIT: + link_options.link_up = 1U; + aq_a2_fw_upd_eee_rate_bits(self, &link_options, + self->aq_nic_cfg->eee_speeds); + aq_a2_fw_set_mpi_flow_control(self, &link_options); + break; + case MPI_DEINIT: + link_options.link_up = 0U; + break; + case MPI_RESET: + case MPI_POWER: + /* No actions */ + break; + } + + hw_atl2_shared_buffer_write(self, link_options, link_options); + + return hw_atl2_shared_buffer_finish_ack(self); +} + +static int aq_a2_fw_update_link_status(struct aq_hw_s *self) +{ + struct lkp_link_caps_s lkp_link_caps; + struct link_status_s link_status; + + hw_atl2_shared_buffer_read(self, link_status, link_status); + + switch (link_status.link_rate) { + case AQ_A2_FW_LINK_RATE_10G: + self->aq_link_status.mbps = 10000; + break; + case AQ_A2_FW_LINK_RATE_5G: + self->aq_link_status.mbps = 5000; + break; + case AQ_A2_FW_LINK_RATE_2G5: + self->aq_link_status.mbps = 2500; + break; + case AQ_A2_FW_LINK_RATE_1G: + self->aq_link_status.mbps = 1000; + break; + case AQ_A2_FW_LINK_RATE_100M: + self->aq_link_status.mbps = 100; + break; + case AQ_A2_FW_LINK_RATE_10M: + self->aq_link_status.mbps = 10; + break; + default: + self->aq_link_status.mbps = 0; + } + self->aq_link_status.full_duplex = link_status.duplex; + + hw_atl2_shared_buffer_read(self, lkp_link_caps, lkp_link_caps); + + self->aq_link_status.lp_link_speed_msk = + a2_fw_lkp_to_mask(&lkp_link_caps); + self->aq_link_status.lp_flow_control = + ((lkp_link_caps.pause_rx) ? AQ_NIC_FC_RX : 0) | + ((lkp_link_caps.pause_tx) ? AQ_NIC_FC_TX : 0); + + return 0; +} + +static int aq_a2_fw_get_mac_permanent(struct aq_hw_s *self, u8 *mac) +{ + struct mac_address_aligned_s mac_address; + + hw_atl2_shared_buffer_get(self, mac_address, mac_address); + ether_addr_copy(mac, (u8 *)mac_address.aligned.mac_address); + + return 0; +} + +static void aq_a2_fill_a0_stats(struct aq_hw_s *self, + struct statistics_s *stats) +{ + struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv; + struct aq_stats_s *cs = &self->curr_stats; + struct aq_stats_s curr_stats = *cs; + bool corrupted_stats = false; + +#define AQ_SDELTA(_N, _F) \ +do { \ + if (!corrupted_stats && \ + ((s64)(stats->a0.msm._F - priv->last_stats.a0.msm._F)) >= 0) \ + curr_stats._N += stats->a0.msm._F - priv->last_stats.a0.msm._F;\ + else \ + corrupted_stats = true; \ +} while (0) + + if (self->aq_link_status.mbps) { + AQ_SDELTA(uprc, rx_unicast_frames); + AQ_SDELTA(mprc, rx_multicast_frames); + AQ_SDELTA(bprc, rx_broadcast_frames); + AQ_SDELTA(erpr, rx_error_frames); + + AQ_SDELTA(uptc, tx_unicast_frames); + AQ_SDELTA(mptc, tx_multicast_frames); + AQ_SDELTA(bptc, tx_broadcast_frames); + AQ_SDELTA(erpt, tx_errors); + + AQ_SDELTA(ubrc, rx_unicast_octets); + AQ_SDELTA(ubtc, tx_unicast_octets); + AQ_SDELTA(mbrc, rx_multicast_octets); + AQ_SDELTA(mbtc, tx_multicast_octets); + AQ_SDELTA(bbrc, rx_broadcast_octets); + AQ_SDELTA(bbtc, tx_broadcast_octets); + + if (!corrupted_stats) + *cs = curr_stats; + } +#undef AQ_SDELTA + +} + +static void aq_a2_fill_b0_stats(struct aq_hw_s *self, + struct statistics_s *stats) +{ + struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv; + struct aq_stats_s *cs = &self->curr_stats; + struct aq_stats_s curr_stats = *cs; + bool corrupted_stats = false; + +#define AQ_SDELTA(_N, _F) \ +do { \ + if (!corrupted_stats && \ + ((s64)(stats->b0._F - priv->last_stats.b0._F)) >= 0) \ + curr_stats._N += stats->b0._F - priv->last_stats.b0._F; \ + else \ + corrupted_stats = true; \ +} while (0) + + if (self->aq_link_status.mbps) { + AQ_SDELTA(uprc, rx_unicast_frames); + AQ_SDELTA(mprc, rx_multicast_frames); + AQ_SDELTA(bprc, rx_broadcast_frames); + AQ_SDELTA(erpr, rx_errors); + AQ_SDELTA(brc, rx_good_octets); + + AQ_SDELTA(uptc, tx_unicast_frames); + AQ_SDELTA(mptc, tx_multicast_frames); + AQ_SDELTA(bptc, tx_broadcast_frames); + AQ_SDELTA(erpt, tx_errors); + AQ_SDELTA(btc, tx_good_octets); + + if (!corrupted_stats) + *cs = curr_stats; + } +#undef AQ_SDELTA +} + +static int aq_a2_fw_update_stats(struct aq_hw_s *self) +{ + struct hw_atl2_priv *priv = (struct hw_atl2_priv *)self->priv; + struct aq_stats_s *cs = &self->curr_stats; + struct statistics_s stats; + struct version_s version; + int err; + + err = hw_atl2_shared_buffer_read_safe(self, version, &version); + if (err) + return err; + + err = hw_atl2_shared_buffer_read_safe(self, stats, &stats); + if (err) + return err; + + if (version.drv_iface_ver == AQ_A2_FW_INTERFACE_A0) + aq_a2_fill_a0_stats(self, &stats); + else + aq_a2_fill_b0_stats(self, &stats); + + cs->dma_pkt_rc = hw_atl_stats_rx_dma_good_pkt_counter_get(self); + cs->dma_pkt_tc = hw_atl_stats_tx_dma_good_pkt_counter_get(self); + cs->dma_oct_rc = hw_atl_stats_rx_dma_good_octet_counter_get(self); + cs->dma_oct_tc = hw_atl_stats_tx_dma_good_octet_counter_get(self); + cs->dpc = hw_atl_rpb_rx_dma_drop_pkt_cnt_get(self); + + memcpy(&priv->last_stats, &stats, sizeof(stats)); + + return 0; +} + +static int aq_a2_fw_get_phy_temp(struct aq_hw_s *self, int *temp) +{ + struct phy_health_monitor_s phy_health_monitor; + + hw_atl2_shared_buffer_read_safe(self, phy_health_monitor, + &phy_health_monitor); + + *temp = (int8_t)phy_health_monitor.phy_temperature * 1000; + return 0; +} + +static int aq_a2_fw_get_mac_temp(struct aq_hw_s *self, int *temp) +{ + /* There's only one temperature sensor on A2, use it for + * both MAC and PHY. + */ + return aq_a2_fw_get_phy_temp(self, temp); +} + +static int aq_a2_fw_set_eee_rate(struct aq_hw_s *self, u32 speed) +{ + struct link_options_s link_options; + + hw_atl2_shared_buffer_get(self, link_options, link_options); + + aq_a2_fw_upd_eee_rate_bits(self, &link_options, speed); + + hw_atl2_shared_buffer_write(self, link_options, link_options); + + return hw_atl2_shared_buffer_finish_ack(self); +} + +static int aq_a2_fw_get_eee_rate(struct aq_hw_s *self, u32 *rate, + u32 *supported_rates) +{ + struct device_link_caps_s device_link_caps; + struct lkp_link_caps_s lkp_link_caps; + + hw_atl2_shared_buffer_read(self, device_link_caps, device_link_caps); + hw_atl2_shared_buffer_read(self, lkp_link_caps, lkp_link_caps); + + *supported_rates = a2_fw_dev_to_eee_mask(&device_link_caps); + *rate = a2_fw_lkp_to_mask(&lkp_link_caps); + + return 0; +} + +static int aq_a2_fw_renegotiate(struct aq_hw_s *self) +{ + struct link_options_s link_options; + int err; + + hw_atl2_shared_buffer_get(self, link_options, link_options); + link_options.link_renegotiate = 1U; + hw_atl2_shared_buffer_write(self, link_options, link_options); + + err = hw_atl2_shared_buffer_finish_ack(self); + + /* We should put renegotiate status back to zero + * after command completes + */ + link_options.link_renegotiate = 0U; + hw_atl2_shared_buffer_write(self, link_options, link_options); + + return err; +} + +static int aq_a2_fw_set_flow_control(struct aq_hw_s *self) +{ + struct link_options_s link_options; + + hw_atl2_shared_buffer_get(self, link_options, link_options); + + aq_a2_fw_set_mpi_flow_control(self, &link_options); + + hw_atl2_shared_buffer_write(self, link_options, link_options); + + return hw_atl2_shared_buffer_finish_ack(self); +} + +static u32 aq_a2_fw_get_flow_control(struct aq_hw_s *self, u32 *fcmode) +{ + struct link_status_s link_status; + + hw_atl2_shared_buffer_read(self, link_status, link_status); + + *fcmode = ((link_status.pause_rx) ? AQ_NIC_FC_RX : 0) | + ((link_status.pause_tx) ? AQ_NIC_FC_TX : 0); + return 0; +} + +static int aq_a2_fw_set_phyloopback(struct aq_hw_s *self, u32 mode, bool enable) +{ + struct link_options_s link_options; + + hw_atl2_shared_buffer_get(self, link_options, link_options); + + switch (mode) { + case AQ_HW_LOOPBACK_PHYINT_SYS: + link_options.internal_loopback = enable; + break; + case AQ_HW_LOOPBACK_PHYEXT_SYS: + link_options.external_loopback = enable; + break; + default: + return -EINVAL; + } + + hw_atl2_shared_buffer_write(self, link_options, link_options); + + return hw_atl2_shared_buffer_finish_ack(self); +} + +u32 hw_atl2_utils_get_fw_version(struct aq_hw_s *self) +{ + struct version_s version; + + hw_atl2_shared_buffer_read_safe(self, version, &version); + + /* A2 FW version is stored in reverse order */ + return version.bundle.major << 24 | + version.bundle.minor << 16 | + version.bundle.build; +} + +int hw_atl2_utils_get_action_resolve_table_caps(struct aq_hw_s *self, + u8 *base_index, u8 *count) +{ + struct filter_caps_s filter_caps; + int err; + + err = hw_atl2_shared_buffer_read_safe(self, filter_caps, &filter_caps); + if (err) + return err; + + *base_index = filter_caps.rslv_tbl_base_index; + *count = filter_caps.rslv_tbl_count; + return 0; +} + +static int aq_a2_fw_set_downshift(struct aq_hw_s *self, u32 counter) +{ + struct link_options_s link_options; + + hw_atl2_shared_buffer_get(self, link_options, link_options); + link_options.downshift = !!counter; + link_options.downshift_retry = counter; + hw_atl2_shared_buffer_write(self, link_options, link_options); + + return hw_atl2_shared_buffer_finish_ack(self); +} + +const struct aq_fw_ops aq_a2_fw_ops = { + .init = aq_a2_fw_init, + .deinit = aq_a2_fw_deinit, + .reset = NULL, + .renegotiate = aq_a2_fw_renegotiate, + .get_mac_permanent = aq_a2_fw_get_mac_permanent, + .set_link_speed = aq_a2_fw_set_link_speed, + .set_state = aq_a2_fw_set_state, + .update_link_status = aq_a2_fw_update_link_status, + .update_stats = aq_a2_fw_update_stats, + .get_mac_temp = aq_a2_fw_get_mac_temp, + .get_phy_temp = aq_a2_fw_get_phy_temp, + .set_eee_rate = aq_a2_fw_set_eee_rate, + .get_eee_rate = aq_a2_fw_get_eee_rate, + .set_flow_control = aq_a2_fw_set_flow_control, + .get_flow_control = aq_a2_fw_get_flow_control, + .set_phyloopback = aq_a2_fw_set_phyloopback, + .set_downshift = aq_a2_fw_set_downshift, +}; |