diff options
Diffstat (limited to 'drivers/net/ethernet/mellanox/mlxsw')
16 files changed, 477 insertions, 286 deletions
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c index 025e0db983..b032d5a4b3 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_linecards.c @@ -1484,6 +1484,7 @@ err_type_file_file_validate: vfree(types_info->data); err_data_alloc: kfree(types_info); + linecards->types_info = NULL; return err; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index f42a1b1c93..d92f640bae 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -8,7 +8,6 @@ #include <linux/device.h> #include <linux/pci.h> #include <linux/interrupt.h> -#include <linux/wait.h> #include <linux/types.h> #include <linux/skbuff.h> #include <linux/if_vlan.h> @@ -36,6 +35,11 @@ enum mlxsw_pci_queue_type { #define MLXSW_PCI_QUEUE_TYPE_COUNT 4 +enum mlxsw_pci_cq_type { + MLXSW_PCI_CQ_SDQ, + MLXSW_PCI_CQ_RDQ, +}; + static const u16 mlxsw_pci_doorbell_type_offset[] = { MLXSW_PCI_DOORBELL_SDQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_SDQ */ MLXSW_PCI_DOORBELL_RDQ_OFFSET, /* for type MLXSW_PCI_QUEUE_TYPE_RDQ */ @@ -78,18 +82,15 @@ struct mlxsw_pci_queue { u8 num; /* queue number */ u8 elem_size; /* size of one element */ enum mlxsw_pci_queue_type type; - struct tasklet_struct tasklet; /* queue processing tasklet */ struct mlxsw_pci *pci; union { struct { - u32 comp_sdq_count; - u32 comp_rdq_count; enum mlxsw_pci_cqe_v v; + struct mlxsw_pci_queue *dq; + struct napi_struct napi; } cq; struct { - u32 ev_cmd_count; - u32 ev_comp_count; - u32 ev_other_count; + struct tasklet_struct tasklet; } eq; } u; }; @@ -120,9 +121,6 @@ struct mlxsw_pci { struct mlxsw_pci_mem_item out_mbox; struct mlxsw_pci_mem_item in_mbox; struct mutex lock; /* Lock access to command registers */ - bool nopoll; - wait_queue_head_t wait; - bool wait_done; struct { u8 status; u64 out_param; @@ -131,13 +129,43 @@ struct mlxsw_pci { struct mlxsw_bus_info bus_info; const struct pci_device_id *id; enum mlxsw_pci_cqe_v max_cqe_ver; /* Maximal supported CQE version */ - u8 num_sdq_cqs; /* Number of CQs used for SDQs */ + u8 num_cqs; /* Number of CQs */ + u8 num_sdqs; /* Number of SDQs */ bool skip_reset; + struct net_device *napi_dev_tx; + struct net_device *napi_dev_rx; }; -static void mlxsw_pci_queue_tasklet_schedule(struct mlxsw_pci_queue *q) +static int mlxsw_pci_napi_devs_init(struct mlxsw_pci *mlxsw_pci) +{ + int err; + + mlxsw_pci->napi_dev_tx = alloc_netdev_dummy(0); + if (!mlxsw_pci->napi_dev_tx) + return -ENOMEM; + strscpy(mlxsw_pci->napi_dev_tx->name, "mlxsw_tx", + sizeof(mlxsw_pci->napi_dev_tx->name)); + + mlxsw_pci->napi_dev_rx = alloc_netdev_dummy(0); + if (!mlxsw_pci->napi_dev_rx) { + err = -ENOMEM; + goto err_alloc_rx; + } + strscpy(mlxsw_pci->napi_dev_rx->name, "mlxsw_rx", + sizeof(mlxsw_pci->napi_dev_rx->name)); + dev_set_threaded(mlxsw_pci->napi_dev_rx, true); + + return 0; + +err_alloc_rx: + free_netdev(mlxsw_pci->napi_dev_tx); + return err; +} + +static void mlxsw_pci_napi_devs_fini(struct mlxsw_pci *mlxsw_pci) { - tasklet_schedule(&q->tasklet); + free_netdev(mlxsw_pci->napi_dev_rx); + free_netdev(mlxsw_pci->napi_dev_tx); } static char *__mlxsw_pci_queue_elem_get(struct mlxsw_pci_queue *q, @@ -187,25 +215,6 @@ mlxsw_pci_queue_type_group_get(struct mlxsw_pci *mlxsw_pci, return &mlxsw_pci->queues[q_type]; } -static u8 __mlxsw_pci_queue_count(struct mlxsw_pci *mlxsw_pci, - enum mlxsw_pci_queue_type q_type) -{ - struct mlxsw_pci_queue_type_group *queue_group; - - queue_group = mlxsw_pci_queue_type_group_get(mlxsw_pci, q_type); - return queue_group->count; -} - -static u8 mlxsw_pci_sdq_count(struct mlxsw_pci *mlxsw_pci) -{ - return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_SDQ); -} - -static u8 mlxsw_pci_cq_count(struct mlxsw_pci *mlxsw_pci) -{ - return __mlxsw_pci_queue_count(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ); -} - static struct mlxsw_pci_queue * __mlxsw_pci_queue_get(struct mlxsw_pci *mlxsw_pci, enum mlxsw_pci_queue_type q_type, u8 q_num) @@ -220,23 +229,16 @@ static struct mlxsw_pci_queue *mlxsw_pci_sdq_get(struct mlxsw_pci *mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_SDQ, q_num); } -static struct mlxsw_pci_queue *mlxsw_pci_rdq_get(struct mlxsw_pci *mlxsw_pci, - u8 q_num) -{ - return __mlxsw_pci_queue_get(mlxsw_pci, - MLXSW_PCI_QUEUE_TYPE_RDQ, q_num); -} - static struct mlxsw_pci_queue *mlxsw_pci_cq_get(struct mlxsw_pci *mlxsw_pci, u8 q_num) { return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_CQ, q_num); } -static struct mlxsw_pci_queue *mlxsw_pci_eq_get(struct mlxsw_pci *mlxsw_pci, - u8 q_num) +static struct mlxsw_pci_queue *mlxsw_pci_eq_get(struct mlxsw_pci *mlxsw_pci) { - return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_EQ, q_num); + /* There is only one EQ at index 0. */ + return __mlxsw_pci_queue_get(mlxsw_pci, MLXSW_PCI_QUEUE_TYPE_EQ, 0); } static void __mlxsw_pci_queue_doorbell_set(struct mlxsw_pci *mlxsw_pci, @@ -291,7 +293,9 @@ static dma_addr_t __mlxsw_pci_queue_page_get(struct mlxsw_pci_queue *q, static int mlxsw_pci_sdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, struct mlxsw_pci_queue *q) { + struct mlxsw_pci_queue *cq; int tclass; + u8 cq_num; int lp; int i; int err; @@ -304,7 +308,8 @@ static int mlxsw_pci_sdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, MLXSW_CMD_MBOX_SW2HW_DQ_SDQ_LP_WQE; /* Set CQ of same number of this SDQ. */ - mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, q->num); + cq_num = q->num; + mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, cq_num); mlxsw_cmd_mbox_sw2hw_dq_sdq_lp_set(mbox, lp); mlxsw_cmd_mbox_sw2hw_dq_sdq_tclass_set(mbox, tclass); mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */ @@ -317,6 +322,9 @@ static int mlxsw_pci_sdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, err = mlxsw_cmd_sw2hw_sdq(mlxsw_pci->core, mbox, q->num); if (err) return err; + + cq = mlxsw_pci_cq_get(mlxsw_pci, cq_num); + cq->u.cq.dq = q; mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q); return 0; } @@ -399,7 +407,9 @@ static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, struct mlxsw_pci_queue *q) { struct mlxsw_pci_queue_elem_info *elem_info; - u8 sdq_count = mlxsw_pci_sdq_count(mlxsw_pci); + u8 sdq_count = mlxsw_pci->num_sdqs; + struct mlxsw_pci_queue *cq; + u8 cq_num; int i; int err; @@ -409,7 +419,8 @@ static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, /* Set CQ of same number of this RDQ with base * above SDQ count as the lower ones are assigned to SDQs. */ - mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, sdq_count + q->num); + cq_num = sdq_count + q->num; + mlxsw_cmd_mbox_sw2hw_dq_cq_set(mbox, cq_num); mlxsw_cmd_mbox_sw2hw_dq_log2_dq_sz_set(mbox, 3); /* 8 pages */ for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) { dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i); @@ -421,6 +432,9 @@ static int mlxsw_pci_rdq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, if (err) return err; + cq = mlxsw_pci_cq_get(mlxsw_pci, cq_num); + cq->u.cq.dq = q; + mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q); for (i = 0; i < q->count; i++) { @@ -441,6 +455,7 @@ rollback: elem_info = mlxsw_pci_queue_elem_info_get(q, i); mlxsw_pci_rdq_skb_free(mlxsw_pci, elem_info); } + cq->u.cq.dq = NULL; mlxsw_cmd_hw2sw_rdq(mlxsw_pci->core, q->num); return err; @@ -465,54 +480,11 @@ static void mlxsw_pci_cq_pre_init(struct mlxsw_pci *mlxsw_pci, q->u.cq.v = mlxsw_pci->max_cqe_ver; if (q->u.cq.v == MLXSW_PCI_CQE_V2 && - q->num < mlxsw_pci->num_sdq_cqs && + q->num < mlxsw_pci->num_sdqs && !mlxsw_core_sdq_supports_cqe_v2(mlxsw_pci->core)) q->u.cq.v = MLXSW_PCI_CQE_V1; } -static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, - struct mlxsw_pci_queue *q) -{ - int i; - int err; - - q->consumer_counter = 0; - - for (i = 0; i < q->count; i++) { - char *elem = mlxsw_pci_queue_elem_get(q, i); - - mlxsw_pci_cqe_owner_set(q->u.cq.v, elem, 1); - } - - if (q->u.cq.v == MLXSW_PCI_CQE_V1) - mlxsw_cmd_mbox_sw2hw_cq_cqe_ver_set(mbox, - MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_1); - else if (q->u.cq.v == MLXSW_PCI_CQE_V2) - mlxsw_cmd_mbox_sw2hw_cq_cqe_ver_set(mbox, - MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_2); - - mlxsw_cmd_mbox_sw2hw_cq_c_eqn_set(mbox, MLXSW_PCI_EQ_COMP_NUM); - mlxsw_cmd_mbox_sw2hw_cq_st_set(mbox, 0); - mlxsw_cmd_mbox_sw2hw_cq_log_cq_size_set(mbox, ilog2(q->count)); - for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) { - dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i); - - mlxsw_cmd_mbox_sw2hw_cq_pa_set(mbox, i, mapaddr); - } - err = mlxsw_cmd_sw2hw_cq(mlxsw_pci->core, mbox, q->num); - if (err) - return err; - mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q); - mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q); - return 0; -} - -static void mlxsw_pci_cq_fini(struct mlxsw_pci *mlxsw_pci, - struct mlxsw_pci_queue *q) -{ - mlxsw_cmd_hw2sw_cq(mlxsw_pci->core, q->num); -} - static unsigned int mlxsw_pci_read32_off(struct mlxsw_pci *mlxsw_pci, ptrdiff_t off) { @@ -692,9 +664,7 @@ static void mlxsw_pci_cqe_rdq_handle(struct mlxsw_pci *mlxsw_pci, mlxsw_core_skb_receive(mlxsw_pci->core, skb, &rx_info); out: - /* Everything is set up, ring doorbell to pass elem to HW */ q->producer_counter++; - mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, q); return; } @@ -714,58 +684,165 @@ static char *mlxsw_pci_cq_sw_cqe_get(struct mlxsw_pci_queue *q) return elem; } -static void mlxsw_pci_cq_tasklet(struct tasklet_struct *t) +static bool mlxsw_pci_cq_cqe_to_handle(struct mlxsw_pci_queue *q) +{ + struct mlxsw_pci_queue_elem_info *elem_info; + bool owner_bit; + + elem_info = mlxsw_pci_queue_elem_info_consumer_get(q); + owner_bit = mlxsw_pci_cqe_owner_get(q->u.cq.v, elem_info->elem); + return !mlxsw_pci_elem_hw_owned(q, owner_bit); +} + +static int mlxsw_pci_napi_poll_cq_rx(struct napi_struct *napi, int budget) { - struct mlxsw_pci_queue *q = from_tasklet(q, t, tasklet); + struct mlxsw_pci_queue *q = container_of(napi, struct mlxsw_pci_queue, + u.cq.napi); + struct mlxsw_pci_queue *rdq = q->u.cq.dq; struct mlxsw_pci *mlxsw_pci = q->pci; + int work_done = 0; char *cqe; - int items = 0; - int credits = q->count >> 1; + + /* If the budget is 0, Rx processing should be skipped. */ + if (unlikely(!budget)) + return 0; while ((cqe = mlxsw_pci_cq_sw_cqe_get(q))) { u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe); u8 sendq = mlxsw_pci_cqe_sr_get(q->u.cq.v, cqe); u8 dqn = mlxsw_pci_cqe_dqn_get(q->u.cq.v, cqe); - char ncqe[MLXSW_PCI_CQE_SIZE_MAX]; - memcpy(ncqe, cqe, q->elem_size); - mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q); + if (unlikely(sendq)) { + WARN_ON_ONCE(1); + continue; + } - if (sendq) { - struct mlxsw_pci_queue *sdq; + if (unlikely(dqn != rdq->num)) { + WARN_ON_ONCE(1); + continue; + } - sdq = mlxsw_pci_sdq_get(mlxsw_pci, dqn); - mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq, - wqe_counter, q->u.cq.v, ncqe); - q->u.cq.comp_sdq_count++; - } else { - struct mlxsw_pci_queue *rdq; + mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq, + wqe_counter, q->u.cq.v, cqe); - rdq = mlxsw_pci_rdq_get(mlxsw_pci, dqn); - mlxsw_pci_cqe_rdq_handle(mlxsw_pci, rdq, - wqe_counter, q->u.cq.v, ncqe); - q->u.cq.comp_rdq_count++; - } - if (++items == credits) + if (++work_done == budget) break; } - if (items) + + mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q); + mlxsw_pci_queue_doorbell_producer_ring(mlxsw_pci, rdq); + + if (work_done < budget) + goto processing_completed; + + /* The driver still has outstanding work to do, budget was exhausted. + * Return exactly budget. In that case, the NAPI instance will be polled + * again. + */ + if (mlxsw_pci_cq_cqe_to_handle(q)) + goto out; + + /* The driver processed all the completions and handled exactly + * 'budget'. Return 'budget - 1' to distinguish from the case that + * driver still has completions to handle. + */ + if (work_done == budget) + work_done--; + +processing_completed: + if (napi_complete_done(napi, work_done)) mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q); +out: + return work_done; } -static u16 mlxsw_pci_cq_elem_count(const struct mlxsw_pci_queue *q) +static int mlxsw_pci_napi_poll_cq_tx(struct napi_struct *napi, int budget) { - return q->u.cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_COUNT : - MLXSW_PCI_CQE01_COUNT; + struct mlxsw_pci_queue *q = container_of(napi, struct mlxsw_pci_queue, + u.cq.napi); + struct mlxsw_pci_queue *sdq = q->u.cq.dq; + struct mlxsw_pci *mlxsw_pci = q->pci; + int work_done = 0; + char *cqe; + + while ((cqe = mlxsw_pci_cq_sw_cqe_get(q))) { + u16 wqe_counter = mlxsw_pci_cqe_wqe_counter_get(cqe); + u8 sendq = mlxsw_pci_cqe_sr_get(q->u.cq.v, cqe); + u8 dqn = mlxsw_pci_cqe_dqn_get(q->u.cq.v, cqe); + char ncqe[MLXSW_PCI_CQE_SIZE_MAX]; + + if (unlikely(!sendq)) { + WARN_ON_ONCE(1); + continue; + } + + if (unlikely(dqn != sdq->num)) { + WARN_ON_ONCE(1); + continue; + } + + memcpy(ncqe, cqe, q->elem_size); + mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q); + + mlxsw_pci_cqe_sdq_handle(mlxsw_pci, sdq, + wqe_counter, q->u.cq.v, ncqe); + + work_done++; + } + + /* If the budget is 0 napi_complete_done() should never be called. */ + if (unlikely(!budget)) + goto processing_completed; + + work_done = min(work_done, budget - 1); + if (unlikely(!napi_complete_done(napi, work_done))) + goto out; + +processing_completed: + mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q); +out: + return work_done; } -static u8 mlxsw_pci_cq_elem_size(const struct mlxsw_pci_queue *q) +static enum mlxsw_pci_cq_type +mlxsw_pci_cq_type(const struct mlxsw_pci *mlxsw_pci, + const struct mlxsw_pci_queue *q) { - return q->u.cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_SIZE : - MLXSW_PCI_CQE01_SIZE; + /* Each CQ is mapped to one DQ. The first 'num_sdqs' queues are used + * for SDQs and the rest are used for RDQs. + */ + if (q->num < mlxsw_pci->num_sdqs) + return MLXSW_PCI_CQ_SDQ; + + return MLXSW_PCI_CQ_RDQ; } -static int mlxsw_pci_eq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, +static void mlxsw_pci_cq_napi_setup(struct mlxsw_pci_queue *q, + enum mlxsw_pci_cq_type cq_type) +{ + struct mlxsw_pci *mlxsw_pci = q->pci; + + switch (cq_type) { + case MLXSW_PCI_CQ_SDQ: + netif_napi_add(mlxsw_pci->napi_dev_tx, &q->u.cq.napi, + mlxsw_pci_napi_poll_cq_tx); + break; + case MLXSW_PCI_CQ_RDQ: + netif_napi_add(mlxsw_pci->napi_dev_rx, &q->u.cq.napi, + mlxsw_pci_napi_poll_cq_rx); + break; + } + + napi_enable(&q->u.cq.napi); +} + +static void mlxsw_pci_cq_napi_teardown(struct mlxsw_pci_queue *q) +{ + napi_disable(&q->u.cq.napi); + netif_napi_del(&q->u.cq.napi); +} + +static int mlxsw_pci_cq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, struct mlxsw_pci_queue *q) { int i; @@ -776,39 +853,50 @@ static int mlxsw_pci_eq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, for (i = 0; i < q->count; i++) { char *elem = mlxsw_pci_queue_elem_get(q, i); - mlxsw_pci_eqe_owner_set(elem, 1); + mlxsw_pci_cqe_owner_set(q->u.cq.v, elem, 1); } - mlxsw_cmd_mbox_sw2hw_eq_int_msix_set(mbox, 1); /* MSI-X used */ - mlxsw_cmd_mbox_sw2hw_eq_st_set(mbox, 1); /* armed */ - mlxsw_cmd_mbox_sw2hw_eq_log_eq_size_set(mbox, ilog2(q->count)); + if (q->u.cq.v == MLXSW_PCI_CQE_V1) + mlxsw_cmd_mbox_sw2hw_cq_cqe_ver_set(mbox, + MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_1); + else if (q->u.cq.v == MLXSW_PCI_CQE_V2) + mlxsw_cmd_mbox_sw2hw_cq_cqe_ver_set(mbox, + MLXSW_CMD_MBOX_SW2HW_CQ_CQE_VER_2); + + mlxsw_cmd_mbox_sw2hw_cq_c_eqn_set(mbox, MLXSW_PCI_EQ_COMP_NUM); + mlxsw_cmd_mbox_sw2hw_cq_st_set(mbox, 0); + mlxsw_cmd_mbox_sw2hw_cq_log_cq_size_set(mbox, ilog2(q->count)); for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) { dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i); - mlxsw_cmd_mbox_sw2hw_eq_pa_set(mbox, i, mapaddr); + mlxsw_cmd_mbox_sw2hw_cq_pa_set(mbox, i, mapaddr); } - err = mlxsw_cmd_sw2hw_eq(mlxsw_pci->core, mbox, q->num); + err = mlxsw_cmd_sw2hw_cq(mlxsw_pci->core, mbox, q->num); if (err) return err; + mlxsw_pci_cq_napi_setup(q, mlxsw_pci_cq_type(mlxsw_pci, q)); mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q); mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q); return 0; } -static void mlxsw_pci_eq_fini(struct mlxsw_pci *mlxsw_pci, +static void mlxsw_pci_cq_fini(struct mlxsw_pci *mlxsw_pci, struct mlxsw_pci_queue *q) { - mlxsw_cmd_hw2sw_eq(mlxsw_pci->core, q->num); + mlxsw_pci_cq_napi_teardown(q); + mlxsw_cmd_hw2sw_cq(mlxsw_pci->core, q->num); } -static void mlxsw_pci_eq_cmd_event(struct mlxsw_pci *mlxsw_pci, char *eqe) +static u16 mlxsw_pci_cq_elem_count(const struct mlxsw_pci_queue *q) { - mlxsw_pci->cmd.comp.status = mlxsw_pci_eqe_cmd_status_get(eqe); - mlxsw_pci->cmd.comp.out_param = - ((u64) mlxsw_pci_eqe_cmd_out_param_h_get(eqe)) << 32 | - mlxsw_pci_eqe_cmd_out_param_l_get(eqe); - mlxsw_pci->cmd.wait_done = true; - wake_up(&mlxsw_pci->cmd.wait); + return q->u.cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_COUNT : + MLXSW_PCI_CQE01_COUNT; +} + +static u8 mlxsw_pci_cq_elem_size(const struct mlxsw_pci_queue *q) +{ + return q->u.cq.v == MLXSW_PCI_CQE_V2 ? MLXSW_PCI_CQE2_SIZE : + MLXSW_PCI_CQE01_SIZE; } static char *mlxsw_pci_eq_sw_eqe_get(struct mlxsw_pci_queue *q) @@ -829,54 +917,81 @@ static char *mlxsw_pci_eq_sw_eqe_get(struct mlxsw_pci_queue *q) static void mlxsw_pci_eq_tasklet(struct tasklet_struct *t) { - struct mlxsw_pci_queue *q = from_tasklet(q, t, tasklet); - struct mlxsw_pci *mlxsw_pci = q->pci; - u8 cq_count = mlxsw_pci_cq_count(mlxsw_pci); unsigned long active_cqns[BITS_TO_LONGS(MLXSW_PCI_CQS_MAX)]; - char *eqe; - u8 cqn; - bool cq_handle = false; - int items = 0; + struct mlxsw_pci_queue *q = from_tasklet(q, t, u.eq.tasklet); + struct mlxsw_pci *mlxsw_pci = q->pci; int credits = q->count >> 1; + u8 cqn, cq_count; + int items = 0; + char *eqe; memset(&active_cqns, 0, sizeof(active_cqns)); while ((eqe = mlxsw_pci_eq_sw_eqe_get(q))) { + cqn = mlxsw_pci_eqe_cqn_get(eqe); + set_bit(cqn, active_cqns); - /* Command interface completion events are always received on - * queue MLXSW_PCI_EQ_ASYNC_NUM (EQ0) and completion events - * are mapped to queue MLXSW_PCI_EQ_COMP_NUM (EQ1). - */ - switch (q->num) { - case MLXSW_PCI_EQ_ASYNC_NUM: - mlxsw_pci_eq_cmd_event(mlxsw_pci, eqe); - q->u.eq.ev_cmd_count++; - break; - case MLXSW_PCI_EQ_COMP_NUM: - cqn = mlxsw_pci_eqe_cqn_get(eqe); - set_bit(cqn, active_cqns); - cq_handle = true; - q->u.eq.ev_comp_count++; - break; - default: - q->u.eq.ev_other_count++; - } if (++items == credits) break; } - if (items) { - mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q); - mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q); - } - if (!cq_handle) + if (!items) return; + + mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q); + mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q); + + cq_count = mlxsw_pci->num_cqs; for_each_set_bit(cqn, active_cqns, cq_count) { q = mlxsw_pci_cq_get(mlxsw_pci, cqn); - mlxsw_pci_queue_tasklet_schedule(q); + napi_schedule(&q->u.cq.napi); } } +static int mlxsw_pci_eq_init(struct mlxsw_pci *mlxsw_pci, char *mbox, + struct mlxsw_pci_queue *q) +{ + int i; + int err; + + /* We expect to initialize only one EQ, which gets num=0 as it is + * located at index zero. We use the EQ as EQ1, so set the number for + * future use. + */ + WARN_ON_ONCE(q->num); + q->num = MLXSW_PCI_EQ_COMP_NUM; + + q->consumer_counter = 0; + + for (i = 0; i < q->count; i++) { + char *elem = mlxsw_pci_queue_elem_get(q, i); + + mlxsw_pci_eqe_owner_set(elem, 1); + } + + mlxsw_cmd_mbox_sw2hw_eq_int_msix_set(mbox, 1); /* MSI-X used */ + mlxsw_cmd_mbox_sw2hw_eq_st_set(mbox, 1); /* armed */ + mlxsw_cmd_mbox_sw2hw_eq_log_eq_size_set(mbox, ilog2(q->count)); + for (i = 0; i < MLXSW_PCI_AQ_PAGES; i++) { + dma_addr_t mapaddr = __mlxsw_pci_queue_page_get(q, i); + + mlxsw_cmd_mbox_sw2hw_eq_pa_set(mbox, i, mapaddr); + } + err = mlxsw_cmd_sw2hw_eq(mlxsw_pci->core, mbox, q->num); + if (err) + return err; + tasklet_setup(&q->u.eq.tasklet, mlxsw_pci_eq_tasklet); + mlxsw_pci_queue_doorbell_consumer_ring(mlxsw_pci, q); + mlxsw_pci_queue_doorbell_arm_consumer_ring(mlxsw_pci, q); + return 0; +} + +static void mlxsw_pci_eq_fini(struct mlxsw_pci *mlxsw_pci, + struct mlxsw_pci_queue *q) +{ + mlxsw_cmd_hw2sw_eq(mlxsw_pci->core, q->num); +} + struct mlxsw_pci_queue_ops { const char *name; enum mlxsw_pci_queue_type type; @@ -886,7 +1001,6 @@ struct mlxsw_pci_queue_ops { struct mlxsw_pci_queue *q); void (*fini)(struct mlxsw_pci *mlxsw_pci, struct mlxsw_pci_queue *q); - void (*tasklet)(struct tasklet_struct *t); u16 (*elem_count_f)(const struct mlxsw_pci_queue *q); u8 (*elem_size_f)(const struct mlxsw_pci_queue *q); u16 elem_count; @@ -914,7 +1028,6 @@ static const struct mlxsw_pci_queue_ops mlxsw_pci_cq_ops = { .pre_init = mlxsw_pci_cq_pre_init, .init = mlxsw_pci_cq_init, .fini = mlxsw_pci_cq_fini, - .tasklet = mlxsw_pci_cq_tasklet, .elem_count_f = mlxsw_pci_cq_elem_count, .elem_size_f = mlxsw_pci_cq_elem_size }; @@ -923,7 +1036,6 @@ static const struct mlxsw_pci_queue_ops mlxsw_pci_eq_ops = { .type = MLXSW_PCI_QUEUE_TYPE_EQ, .init = mlxsw_pci_eq_init, .fini = mlxsw_pci_eq_fini, - .tasklet = mlxsw_pci_eq_tasklet, .elem_count = MLXSW_PCI_EQE_COUNT, .elem_size = MLXSW_PCI_EQE_SIZE }; @@ -948,9 +1060,6 @@ static int mlxsw_pci_queue_init(struct mlxsw_pci *mlxsw_pci, char *mbox, q->type = q_ops->type; q->pci = mlxsw_pci; - if (q_ops->tasklet) - tasklet_setup(&q->tasklet, q_ops->tasklet); - mem_item->size = MLXSW_PCI_AQ_SIZE; mem_item->buf = dma_alloc_coherent(&mlxsw_pci->pdev->dev, mem_item->size, &mem_item->mapaddr, @@ -1074,7 +1183,7 @@ static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox) if (num_sdqs + num_rdqs > num_cqs || num_sdqs < MLXSW_PCI_SDQS_MIN || - num_cqs > MLXSW_PCI_CQS_MAX || num_eqs != MLXSW_PCI_EQS_COUNT) { + num_cqs > MLXSW_PCI_CQS_MAX || num_eqs != MLXSW_PCI_EQS_MAX) { dev_err(&pdev->dev, "Unsupported number of queues\n"); return -EINVAL; } @@ -1089,10 +1198,11 @@ static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox) return -EINVAL; } - mlxsw_pci->num_sdq_cqs = num_sdqs; + mlxsw_pci->num_cqs = num_cqs; + mlxsw_pci->num_sdqs = num_sdqs; err = mlxsw_pci_queue_group_init(mlxsw_pci, mbox, &mlxsw_pci_eq_ops, - num_eqs); + MLXSW_PCI_EQS_COUNT); if (err) { dev_err(&pdev->dev, "Failed to initialize event queues\n"); return err; @@ -1119,8 +1229,6 @@ static int mlxsw_pci_aqs_init(struct mlxsw_pci *mlxsw_pci, char *mbox) goto err_rdqs_init; } - /* We have to poll in command interface until queues are initialized */ - mlxsw_pci->cmd.nopoll = true; return 0; err_rdqs_init: @@ -1134,7 +1242,6 @@ err_cqs_init: static void mlxsw_pci_aqs_fini(struct mlxsw_pci *mlxsw_pci) { - mlxsw_pci->cmd.nopoll = false; mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_rdq_ops); mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_sdq_ops); mlxsw_pci_queue_group_fini(mlxsw_pci, &mlxsw_pci_cq_ops); @@ -1432,12 +1539,9 @@ static irqreturn_t mlxsw_pci_eq_irq_handler(int irq, void *dev_id) { struct mlxsw_pci *mlxsw_pci = dev_id; struct mlxsw_pci_queue *q; - int i; - for (i = 0; i < MLXSW_PCI_EQS_COUNT; i++) { - q = mlxsw_pci_eq_get(mlxsw_pci, i); - mlxsw_pci_queue_tasklet_schedule(q); - } + q = mlxsw_pci_eq_get(mlxsw_pci); + tasklet_schedule(&q->u.eq.tasklet); return IRQ_HANDLED; } @@ -1490,20 +1594,31 @@ static int mlxsw_pci_sys_ready_wait(struct mlxsw_pci *mlxsw_pci, return -EBUSY; } -static int mlxsw_pci_reset_at_pci_disable(struct mlxsw_pci *mlxsw_pci) +static int mlxsw_pci_reset_at_pci_disable(struct mlxsw_pci *mlxsw_pci, + bool pci_reset_sbr_supported) { struct pci_dev *pdev = mlxsw_pci->pdev; char mrsr_pl[MLXSW_REG_MRSR_LEN]; + struct pci_dev *bridge; int err; + if (!pci_reset_sbr_supported) { + pci_dbg(pdev, "Performing PCI hot reset instead of \"all reset\"\n"); + goto sbr; + } + mlxsw_reg_mrsr_pack(mrsr_pl, MLXSW_REG_MRSR_COMMAND_RESET_AT_PCI_DISABLE); err = mlxsw_reg_write(mlxsw_pci->core, MLXSW_REG(mrsr), mrsr_pl); if (err) return err; +sbr: device_lock_assert(&pdev->dev); + bridge = pci_upstream_bridge(pdev); + if (bridge) + pci_cfg_access_lock(bridge); pci_cfg_access_lock(pdev); pci_save_state(pdev); @@ -1513,6 +1628,8 @@ static int mlxsw_pci_reset_at_pci_disable(struct mlxsw_pci *mlxsw_pci) pci_restore_state(pdev); pci_cfg_access_unlock(pdev); + if (bridge) + pci_cfg_access_unlock(bridge); return err; } @@ -1529,6 +1646,7 @@ static int mlxsw_pci_reset(struct mlxsw_pci *mlxsw_pci, const struct pci_device_id *id) { struct pci_dev *pdev = mlxsw_pci->pdev; + bool pci_reset_sbr_supported = false; char mcam_pl[MLXSW_REG_MCAM_LEN]; bool pci_reset_supported = false; u32 sys_status; @@ -1548,13 +1666,17 @@ mlxsw_pci_reset(struct mlxsw_pci *mlxsw_pci, const struct pci_device_id *id) mlxsw_reg_mcam_pack(mcam_pl, MLXSW_REG_MCAM_FEATURE_GROUP_ENHANCED_FEATURES); err = mlxsw_reg_query(mlxsw_pci->core, MLXSW_REG(mcam), mcam_pl); - if (!err) + if (!err) { mlxsw_reg_mcam_unpack(mcam_pl, MLXSW_REG_MCAM_PCI_RESET, &pci_reset_supported); + mlxsw_reg_mcam_unpack(mcam_pl, MLXSW_REG_MCAM_PCI_RESET_SBR, + &pci_reset_sbr_supported); + } if (pci_reset_supported) { pci_dbg(pdev, "Starting PCI reset flow\n"); - err = mlxsw_pci_reset_at_pci_disable(mlxsw_pci); + err = mlxsw_pci_reset_at_pci_disable(mlxsw_pci, + pci_reset_sbr_supported); } else { pci_dbg(pdev, "Starting software reset flow\n"); err = mlxsw_pci_reset_sw(mlxsw_pci); @@ -1709,6 +1831,10 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core, if (err) goto err_requery_resources; + err = mlxsw_pci_napi_devs_init(mlxsw_pci); + if (err) + goto err_napi_devs_init; + err = mlxsw_pci_aqs_init(mlxsw_pci, mbox); if (err) goto err_aqs_init; @@ -1726,6 +1852,8 @@ static int mlxsw_pci_init(void *bus_priv, struct mlxsw_core *mlxsw_core, err_request_eq_irq: mlxsw_pci_aqs_fini(mlxsw_pci); err_aqs_init: + mlxsw_pci_napi_devs_fini(mlxsw_pci); +err_napi_devs_init: err_requery_resources: err_config_profile: err_cqe_v_check: @@ -1753,6 +1881,7 @@ static void mlxsw_pci_fini(void *bus_priv) free_irq(pci_irq_vector(mlxsw_pci->pdev, 0), mlxsw_pci); mlxsw_pci_aqs_fini(mlxsw_pci); + mlxsw_pci_napi_devs_fini(mlxsw_pci); mlxsw_pci_fw_area_fini(mlxsw_pci); mlxsw_pci_free_irq_vectors(mlxsw_pci); } @@ -1761,7 +1890,7 @@ static struct mlxsw_pci_queue * mlxsw_pci_sdq_pick(struct mlxsw_pci *mlxsw_pci, const struct mlxsw_tx_info *tx_info) { - u8 ctl_sdq_count = mlxsw_pci_sdq_count(mlxsw_pci) - 1; + u8 ctl_sdq_count = mlxsw_pci->num_sdqs - 1; u8 sdqn; if (tx_info->is_emad) { @@ -1860,9 +1989,9 @@ static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod, { struct mlxsw_pci *mlxsw_pci = bus_priv; dma_addr_t in_mapaddr = 0, out_mapaddr = 0; - bool evreq = mlxsw_pci->cmd.nopoll; unsigned long timeout = msecs_to_jiffies(MLXSW_PCI_CIR_TIMEOUT_MSECS); - bool *p_wait_done = &mlxsw_pci->cmd.wait_done; + unsigned long end; + bool wait_done; int err; *p_status = MLXSW_CMD_STATUS_OK; @@ -1886,36 +2015,28 @@ static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod, mlxsw_pci_write32(mlxsw_pci, CIR_IN_MODIFIER, in_mod); mlxsw_pci_write32(mlxsw_pci, CIR_TOKEN, 0); - *p_wait_done = false; + wait_done = false; wmb(); /* all needs to be written before we write control register */ mlxsw_pci_write32(mlxsw_pci, CIR_CTRL, MLXSW_PCI_CIR_CTRL_GO_BIT | - (evreq ? MLXSW_PCI_CIR_CTRL_EVREQ_BIT : 0) | (opcode_mod << MLXSW_PCI_CIR_CTRL_OPCODE_MOD_SHIFT) | opcode); - if (!evreq) { - unsigned long end; - - end = jiffies + timeout; - do { - u32 ctrl = mlxsw_pci_read32(mlxsw_pci, CIR_CTRL); + end = jiffies + timeout; + do { + u32 ctrl = mlxsw_pci_read32(mlxsw_pci, CIR_CTRL); - if (!(ctrl & MLXSW_PCI_CIR_CTRL_GO_BIT)) { - *p_wait_done = true; - *p_status = ctrl >> MLXSW_PCI_CIR_CTRL_STATUS_SHIFT; - break; - } - cond_resched(); - } while (time_before(jiffies, end)); - } else { - wait_event_timeout(mlxsw_pci->cmd.wait, *p_wait_done, timeout); - *p_status = mlxsw_pci->cmd.comp.status; - } + if (!(ctrl & MLXSW_PCI_CIR_CTRL_GO_BIT)) { + wait_done = true; + *p_status = ctrl >> MLXSW_PCI_CIR_CTRL_STATUS_SHIFT; + break; + } + cond_resched(); + } while (time_before(jiffies, end)); err = 0; - if (*p_wait_done) { + if (wait_done) { if (*p_status) err = -EIO; } else { @@ -1929,14 +2050,12 @@ static int mlxsw_pci_cmd_exec(void *bus_priv, u16 opcode, u8 opcode_mod, */ __be32 tmp; - if (!evreq) { - tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci, - CIR_OUT_PARAM_HI)); - memcpy(out_mbox, &tmp, sizeof(tmp)); - tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci, - CIR_OUT_PARAM_LO)); - memcpy(out_mbox + sizeof(tmp), &tmp, sizeof(tmp)); - } + tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci, + CIR_OUT_PARAM_HI)); + memcpy(out_mbox, &tmp, sizeof(tmp)); + tmp = cpu_to_be32(mlxsw_pci_read32(mlxsw_pci, + CIR_OUT_PARAM_LO)); + memcpy(out_mbox + sizeof(tmp), &tmp, sizeof(tmp)); } else if (!err && out_mbox) { memcpy(out_mbox, mlxsw_pci->cmd.out_mbox.buf, out_mbox_size); } @@ -2015,7 +2134,6 @@ static int mlxsw_pci_cmd_init(struct mlxsw_pci *mlxsw_pci) int err; mutex_init(&mlxsw_pci->cmd.lock); - init_waitqueue_head(&mlxsw_pci->cmd.wait); err = mlxsw_pci_mbox_alloc(mlxsw_pci, &mlxsw_pci->cmd.in_mbox); if (err) diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h index 7cdf0ce24f..6bed495dcf 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h +++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h @@ -42,8 +42,8 @@ ((offset) + (type_offset) + (num) * 4) #define MLXSW_PCI_CQS_MAX 96 -#define MLXSW_PCI_EQS_COUNT 2 -#define MLXSW_PCI_EQ_ASYNC_NUM 0 +#define MLXSW_PCI_EQS_MAX 2 +#define MLXSW_PCI_EQS_COUNT 1 #define MLXSW_PCI_EQ_COMP_NUM 1 #define MLXSW_PCI_SDQS_MIN 2 /* EMAD and control traffic */ diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index 8892654c68..3bb89045ea 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -4786,8 +4786,11 @@ MLXSW_ITEM32(reg, ptys, an_status, 0x04, 28, 4); #define MLXSW_REG_PTYS_EXT_ETH_SPEED_50GAUI_1_LAUI_1_50GBASE_CR_KR BIT(8) #define MLXSW_REG_PTYS_EXT_ETH_SPEED_CAUI_4_100GBASE_CR4_KR4 BIT(9) #define MLXSW_REG_PTYS_EXT_ETH_SPEED_100GAUI_2_100GBASE_CR2_KR2 BIT(10) +#define MLXSW_REG_PTYS_EXT_ETH_SPEED_100GAUI_1_100GBASE_CR_KR BIT(11) #define MLXSW_REG_PTYS_EXT_ETH_SPEED_200GAUI_4_200GBASE_CR4_KR4 BIT(12) +#define MLXSW_REG_PTYS_EXT_ETH_SPEED_200GAUI_2_200GBASE_CR2_KR2 BIT(13) #define MLXSW_REG_PTYS_EXT_ETH_SPEED_400GAUI_8 BIT(15) +#define MLXSW_REG_PTYS_EXT_ETH_SPEED_400GAUI_4_400GBASE_CR4_KR4 BIT(16) #define MLXSW_REG_PTYS_EXT_ETH_SPEED_800GAUI_8 BIT(19) /* reg_ptys_ext_eth_proto_cap @@ -10668,6 +10671,8 @@ enum mlxsw_reg_mcam_mng_feature_cap_mask_bits { MLXSW_REG_MCAM_MCIA_128B = 34, /* If set, MRSR.command=6 is supported. */ MLXSW_REG_MCAM_PCI_RESET = 48, + /* If set, MRSR.command=6 is supported with Secondary Bus Reset. */ + MLXSW_REG_MCAM_PCI_RESET_SBR = 67, }; #define MLXSW_REG_BYTES_PER_DWORD 0x4 diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index bb642e9bb6..030ed71f94 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -825,7 +825,7 @@ static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu) err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu); if (err) goto err_port_mtu_set; - dev->mtu = mtu; + WRITE_ONCE(dev->mtu, mtu); return 0; err_port_mtu_set: diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c index 4b713832fd..f5c0a4214c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c @@ -391,7 +391,8 @@ mlxsw_sp_acl_atcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp, if (err) return err; - lkey_id = aregion->ops->lkey_id_get(aregion, aentry->enc_key, erp_id); + lkey_id = aregion->ops->lkey_id_get(aregion, aentry->ht_key.enc_key, + erp_id); if (IS_ERR(lkey_id)) return PTR_ERR(lkey_id); aentry->lkey_id = lkey_id; @@ -399,7 +400,7 @@ mlxsw_sp_acl_atcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp, kvdl_index = mlxsw_afa_block_first_kvdl_index(rulei->act_block); mlxsw_reg_ptce3_pack(ptce3_pl, true, MLXSW_REG_PTCE3_OP_WRITE_WRITE, priority, region->tcam_region_info, - aentry->enc_key, erp_id, + aentry->ht_key.enc_key, erp_id, aentry->delta_info.start, aentry->delta_info.mask, aentry->delta_info.value, @@ -428,7 +429,7 @@ mlxsw_sp_acl_atcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp, mlxsw_reg_ptce3_pack(ptce3_pl, false, MLXSW_REG_PTCE3_OP_WRITE_WRITE, 0, region->tcam_region_info, - aentry->enc_key, erp_id, + aentry->ht_key.enc_key, erp_id, aentry->delta_info.start, aentry->delta_info.mask, aentry->delta_info.value, @@ -457,7 +458,7 @@ mlxsw_sp_acl_atcam_region_entry_action_replace(struct mlxsw_sp *mlxsw_sp, kvdl_index = mlxsw_afa_block_first_kvdl_index(rulei->act_block); mlxsw_reg_ptce3_pack(ptce3_pl, true, MLXSW_REG_PTCE3_OP_WRITE_UPDATE, priority, region->tcam_region_info, - aentry->enc_key, erp_id, + aentry->ht_key.enc_key, erp_id, aentry->delta_info.start, aentry->delta_info.mask, aentry->delta_info.value, @@ -480,15 +481,13 @@ __mlxsw_sp_acl_atcam_entry_add(struct mlxsw_sp *mlxsw_sp, int err; mlxsw_afk_encode(afk, region->key_info, &rulei->values, - aentry->ht_key.full_enc_key, mask); + aentry->ht_key.enc_key, mask); erp_mask = mlxsw_sp_acl_erp_mask_get(aregion, mask, false); if (IS_ERR(erp_mask)) return PTR_ERR(erp_mask); aentry->erp_mask = erp_mask; aentry->ht_key.erp_id = mlxsw_sp_acl_erp_mask_erp_id(erp_mask); - memcpy(aentry->enc_key, aentry->ht_key.full_enc_key, - sizeof(aentry->enc_key)); /* Compute all needed delta information and clear the delta bits * from the encrypted key. @@ -497,9 +496,8 @@ __mlxsw_sp_acl_atcam_entry_add(struct mlxsw_sp *mlxsw_sp, aentry->delta_info.start = mlxsw_sp_acl_erp_delta_start(delta); aentry->delta_info.mask = mlxsw_sp_acl_erp_delta_mask(delta); aentry->delta_info.value = - mlxsw_sp_acl_erp_delta_value(delta, - aentry->ht_key.full_enc_key); - mlxsw_sp_acl_erp_delta_clear(delta, aentry->enc_key); + mlxsw_sp_acl_erp_delta_value(delta, aentry->ht_key.enc_key); + mlxsw_sp_acl_erp_delta_clear(delta, aentry->ht_key.enc_key); /* Add rule to the list of A-TCAM rules, assuming this * rule is intended to A-TCAM. In case this rule does diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c index 95f63fcf4b..a54eedb69a 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c @@ -249,7 +249,7 @@ __mlxsw_sp_acl_bf_key_encode(struct mlxsw_sp_acl_atcam_region *aregion, memcpy(chunk + pad_bytes, &erp_region_id, sizeof(erp_region_id)); memcpy(chunk + key_offset, - &aentry->enc_key[chunk_key_offsets[chunk_index]], + &aentry->ht_key.enc_key[chunk_key_offsets[chunk_index]], chunk_key_len); chunk += chunk_len; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c index d231f4d288..9eee229303 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c @@ -1217,18 +1217,6 @@ static bool mlxsw_sp_acl_erp_delta_check(void *priv, const void *parent_obj, return err ? false : true; } -static int mlxsw_sp_acl_erp_hints_obj_cmp(const void *obj1, const void *obj2) -{ - const struct mlxsw_sp_acl_erp_key *key1 = obj1; - const struct mlxsw_sp_acl_erp_key *key2 = obj2; - - /* For hints purposes, two objects are considered equal - * in case the masks are the same. Does not matter what - * the "ctcam" value is. - */ - return memcmp(key1->mask, key2->mask, sizeof(key1->mask)); -} - static void *mlxsw_sp_acl_erp_delta_create(void *priv, void *parent_obj, void *obj) { @@ -1308,7 +1296,6 @@ static void mlxsw_sp_acl_erp_root_destroy(void *priv, void *root_priv) static const struct objagg_ops mlxsw_sp_acl_erp_objagg_ops = { .obj_size = sizeof(struct mlxsw_sp_acl_erp_key), .delta_check = mlxsw_sp_acl_erp_delta_check, - .hints_obj_cmp = mlxsw_sp_acl_erp_hints_obj_cmp, .delta_create = mlxsw_sp_acl_erp_delta_create, .delta_destroy = mlxsw_sp_acl_erp_delta_destroy, .root_create = mlxsw_sp_acl_erp_root_create, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c index 92a406f02e..b1d08e958b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c @@ -1504,7 +1504,8 @@ mlxsw_sp_acl_tcam_region_rehash_intrvl_get(struct devlink *devlink, u32 id, static int mlxsw_sp_acl_tcam_region_rehash_intrvl_set(struct devlink *devlink, u32 id, - struct devlink_param_gset_ctx *ctx) + struct devlink_param_gset_ctx *ctx, + struct netlink_ext_ack *extack) { struct mlxsw_core *mlxsw_core = devlink_priv(devlink); struct mlxsw_sp_acl_tcam_vregion *vregion; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h index 79a1d86065..010204f73e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h @@ -167,9 +167,9 @@ struct mlxsw_sp_acl_atcam_region { }; struct mlxsw_sp_acl_atcam_entry_ht_key { - char full_enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* Encoded - * key. - */ + char enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* Encoded key, minus + * delta bits. + */ u8 erp_id; }; @@ -181,9 +181,6 @@ struct mlxsw_sp_acl_atcam_entry { struct rhash_head ht_node; struct list_head list; /* Member in entries_list */ struct mlxsw_sp_acl_atcam_entry_ht_key ht_key; - char enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* Encoded key, - * minus delta bits. - */ struct { u16 start; u8 mask; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c index c9f1c79f3f..ba090262e2 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c @@ -1607,8 +1607,8 @@ static void mlxsw_sp_sb_sr_occ_query_cb(struct mlxsw_core *mlxsw_core, int mlxsw_sp_sb_occ_snapshot(struct mlxsw_core *mlxsw_core, unsigned int sb_index) { + u16 local_port, local_port_1, first_local_port, last_local_port; struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); - u16 local_port, local_port_1, last_local_port; struct mlxsw_sp_sb_sr_occ_query_cb_ctx cb_ctx; u8 masked_count, current_page = 0; unsigned long cb_priv = 0; @@ -1628,6 +1628,7 @@ next_batch: masked_count = 0; mlxsw_reg_sbsr_pack(sbsr_pl, false); mlxsw_reg_sbsr_port_page_set(sbsr_pl, current_page); + first_local_port = current_page * MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE; last_local_port = current_page * MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE + MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE - 1; @@ -1645,9 +1646,12 @@ next_batch: if (local_port != MLXSW_PORT_CPU_PORT) { /* Ingress quotas are not supported for the CPU port */ mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, - local_port, 1); + local_port - first_local_port, + 1); } - mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1); + mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, + local_port - first_local_port, + 1); for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) { err = mlxsw_sp_sb_pm_occ_query(mlxsw_sp, local_port, i, &bulk_list); @@ -1684,7 +1688,7 @@ int mlxsw_sp_sb_occ_max_clear(struct mlxsw_core *mlxsw_core, unsigned int sb_index) { struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core); - u16 local_port, last_local_port; + u16 local_port, first_local_port, last_local_port; LIST_HEAD(bulk_list); unsigned int masked_count; u8 current_page = 0; @@ -1702,6 +1706,7 @@ next_batch: masked_count = 0; mlxsw_reg_sbsr_pack(sbsr_pl, true); mlxsw_reg_sbsr_port_page_set(sbsr_pl, current_page); + first_local_port = current_page * MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE; last_local_port = current_page * MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE + MLXSW_REG_SBSR_NUM_PORTS_IN_PAGE - 1; @@ -1719,9 +1724,12 @@ next_batch: if (local_port != MLXSW_PORT_CPU_PORT) { /* Ingress quotas are not supported for the CPU port */ mlxsw_reg_sbsr_ingress_port_mask_set(sbsr_pl, - local_port, 1); + local_port - first_local_port, + 1); } - mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, local_port, 1); + mlxsw_reg_sbsr_egress_port_mask_set(sbsr_pl, + local_port - first_local_port, + 1); for (i = 0; i < mlxsw_sp->sb_vals->pool_count; i++) { err = mlxsw_sp_sb_pm_occ_clear(mlxsw_sp, local_port, i, &bulk_list); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c index 0f29e9c194..a755b0a901 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ethtool.c @@ -1649,6 +1649,18 @@ mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2[] = { ARRAY_SIZE(mlxsw_sp2_mask_ethtool_100gaui_2_100gbase_cr2_kr2) static const enum ethtool_link_mode_bit_indices +mlxsw_sp2_mask_ethtool_100gaui_1_100gbase_cr_kr[] = { + ETHTOOL_LINK_MODE_100000baseKR_Full_BIT, + ETHTOOL_LINK_MODE_100000baseSR_Full_BIT, + ETHTOOL_LINK_MODE_100000baseLR_ER_FR_Full_BIT, + ETHTOOL_LINK_MODE_100000baseCR_Full_BIT, + ETHTOOL_LINK_MODE_100000baseDR_Full_BIT, +}; + +#define MLXSW_SP2_MASK_ETHTOOL_100GAUI_1_100GBASE_CR_KR_LEN \ + ARRAY_SIZE(mlxsw_sp2_mask_ethtool_100gaui_1_100gbase_cr_kr) + +static const enum ethtool_link_mode_bit_indices mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4[] = { ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT, ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT, @@ -1661,6 +1673,18 @@ mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4[] = { ARRAY_SIZE(mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4) static const enum ethtool_link_mode_bit_indices +mlxsw_sp2_mask_ethtool_200gaui_2_200gbase_cr2_kr2[] = { + ETHTOOL_LINK_MODE_200000baseKR2_Full_BIT, + ETHTOOL_LINK_MODE_200000baseSR2_Full_BIT, + ETHTOOL_LINK_MODE_200000baseLR2_ER2_FR2_Full_BIT, + ETHTOOL_LINK_MODE_200000baseDR2_Full_BIT, + ETHTOOL_LINK_MODE_200000baseCR2_Full_BIT, +}; + +#define MLXSW_SP2_MASK_ETHTOOL_200GAUI_2_200GBASE_CR2_KR2_LEN \ + ARRAY_SIZE(mlxsw_sp2_mask_ethtool_200gaui_2_200gbase_cr2_kr2) + +static const enum ethtool_link_mode_bit_indices mlxsw_sp2_mask_ethtool_400gaui_8[] = { ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT, ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT, @@ -1673,6 +1697,18 @@ mlxsw_sp2_mask_ethtool_400gaui_8[] = { ARRAY_SIZE(mlxsw_sp2_mask_ethtool_400gaui_8) static const enum ethtool_link_mode_bit_indices +mlxsw_sp2_mask_ethtool_400gaui_4_400gbase_cr4_kr4[] = { + ETHTOOL_LINK_MODE_400000baseKR4_Full_BIT, + ETHTOOL_LINK_MODE_400000baseSR4_Full_BIT, + ETHTOOL_LINK_MODE_400000baseLR4_ER4_FR4_Full_BIT, + ETHTOOL_LINK_MODE_400000baseDR4_Full_BIT, + ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT, +}; + +#define MLXSW_SP2_MASK_ETHTOOL_400GAUI_4_400GBASE_CR4_KR4_LEN \ + ARRAY_SIZE(mlxsw_sp2_mask_ethtool_400gaui_4_400gbase_cr4_kr4) + +static const enum ethtool_link_mode_bit_indices mlxsw_sp2_mask_ethtool_800gaui_8[] = { ETHTOOL_LINK_MODE_800000baseCR8_Full_BIT, ETHTOOL_LINK_MODE_800000baseKR8_Full_BIT, @@ -1817,6 +1853,14 @@ static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = { .width = 2, }, { + .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_100GAUI_1_100GBASE_CR_KR, + .mask_ethtool = mlxsw_sp2_mask_ethtool_100gaui_1_100gbase_cr_kr, + .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_100GAUI_1_100GBASE_CR_KR_LEN, + .mask_sup_width = MLXSW_SP_PORT_MASK_WIDTH_1X, + .speed = SPEED_100000, + .width = 1, + }, + { .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_200GAUI_4_200GBASE_CR4_KR4, .mask_ethtool = mlxsw_sp2_mask_ethtool_200gaui_4_200gbase_cr4_kr4, .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_200GAUI_4_200GBASE_CR4_KR4_LEN, @@ -1826,6 +1870,14 @@ static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = { .width = 4, }, { + .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_200GAUI_2_200GBASE_CR2_KR2, + .mask_ethtool = mlxsw_sp2_mask_ethtool_200gaui_2_200gbase_cr2_kr2, + .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_200GAUI_2_200GBASE_CR2_KR2_LEN, + .mask_sup_width = MLXSW_SP_PORT_MASK_WIDTH_2X, + .speed = SPEED_200000, + .width = 2, + }, + { .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_400GAUI_8, .mask_ethtool = mlxsw_sp2_mask_ethtool_400gaui_8, .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_400GAUI_8_LEN, @@ -1834,6 +1886,14 @@ static const struct mlxsw_sp2_port_link_mode mlxsw_sp2_port_link_mode[] = { .width = 8, }, { + .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_400GAUI_4_400GBASE_CR4_KR4, + .mask_ethtool = mlxsw_sp2_mask_ethtool_400gaui_4_400gbase_cr4_kr4, + .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_400GAUI_4_400GBASE_CR4_KR4_LEN, + .mask_sup_width = MLXSW_SP_PORT_MASK_WIDTH_4X, + .speed = SPEED_400000, + .width = 4, + }, + { .mask = MLXSW_REG_PTYS_EXT_ETH_SPEED_800GAUI_8, .mask_ethtool = mlxsw_sp2_mask_ethtool_800gaui_8, .m_ethtool_len = MLXSW_SP2_MASK_ETHTOOL_800GAUI_8_LEN, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c index 9fd1ca0792..f07955b543 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c @@ -595,6 +595,10 @@ static int mlxsw_sp_flower_parse(struct mlxsw_sp *mlxsw_sp, flow_rule_match_control(rule, &match); addr_type = match.key->addr_type; + + if (flow_rule_has_control_flags(match.mask->flags, + f->common.extack)) + return -EOPNOTSUPP; } if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c index 3340b4a694..d761a12359 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.c @@ -8,7 +8,7 @@ #include "spectrum_ipip.h" #include "reg.h" -struct ip_tunnel_parm +struct ip_tunnel_parm_kern mlxsw_sp_ipip_netdev_parms4(const struct net_device *ol_dev) { struct ip_tunnel *tun = netdev_priv(ol_dev); @@ -24,27 +24,29 @@ mlxsw_sp_ipip_netdev_parms6(const struct net_device *ol_dev) return tun->parms; } -static bool mlxsw_sp_ipip_parms4_has_ikey(const struct ip_tunnel_parm *parms) +static bool +mlxsw_sp_ipip_parms4_has_ikey(const struct ip_tunnel_parm_kern *parms) { - return !!(parms->i_flags & TUNNEL_KEY); + return test_bit(IP_TUNNEL_KEY_BIT, parms->i_flags); } static bool mlxsw_sp_ipip_parms6_has_ikey(const struct __ip6_tnl_parm *parms) { - return !!(parms->i_flags & TUNNEL_KEY); + return test_bit(IP_TUNNEL_KEY_BIT, parms->i_flags); } -static bool mlxsw_sp_ipip_parms4_has_okey(const struct ip_tunnel_parm *parms) +static bool +mlxsw_sp_ipip_parms4_has_okey(const struct ip_tunnel_parm_kern *parms) { - return !!(parms->o_flags & TUNNEL_KEY); + return test_bit(IP_TUNNEL_KEY_BIT, parms->o_flags); } static bool mlxsw_sp_ipip_parms6_has_okey(const struct __ip6_tnl_parm *parms) { - return !!(parms->o_flags & TUNNEL_KEY); + return test_bit(IP_TUNNEL_KEY_BIT, parms->o_flags); } -static u32 mlxsw_sp_ipip_parms4_ikey(const struct ip_tunnel_parm *parms) +static u32 mlxsw_sp_ipip_parms4_ikey(const struct ip_tunnel_parm_kern *parms) { return mlxsw_sp_ipip_parms4_has_ikey(parms) ? be32_to_cpu(parms->i_key) : 0; @@ -56,7 +58,7 @@ static u32 mlxsw_sp_ipip_parms6_ikey(const struct __ip6_tnl_parm *parms) be32_to_cpu(parms->i_key) : 0; } -static u32 mlxsw_sp_ipip_parms4_okey(const struct ip_tunnel_parm *parms) +static u32 mlxsw_sp_ipip_parms4_okey(const struct ip_tunnel_parm_kern *parms) { return mlxsw_sp_ipip_parms4_has_okey(parms) ? be32_to_cpu(parms->o_key) : 0; @@ -69,7 +71,7 @@ static u32 mlxsw_sp_ipip_parms6_okey(const struct __ip6_tnl_parm *parms) } static union mlxsw_sp_l3addr -mlxsw_sp_ipip_parms4_saddr(const struct ip_tunnel_parm *parms) +mlxsw_sp_ipip_parms4_saddr(const struct ip_tunnel_parm_kern *parms) { return (union mlxsw_sp_l3addr) { .addr4 = parms->iph.saddr }; } @@ -81,7 +83,7 @@ mlxsw_sp_ipip_parms6_saddr(const struct __ip6_tnl_parm *parms) } static union mlxsw_sp_l3addr -mlxsw_sp_ipip_parms4_daddr(const struct ip_tunnel_parm *parms) +mlxsw_sp_ipip_parms4_daddr(const struct ip_tunnel_parm_kern *parms) { return (union mlxsw_sp_l3addr) { .addr4 = parms->iph.daddr }; } @@ -96,7 +98,7 @@ union mlxsw_sp_l3addr mlxsw_sp_ipip_netdev_saddr(enum mlxsw_sp_l3proto proto, const struct net_device *ol_dev) { - struct ip_tunnel_parm parms4; + struct ip_tunnel_parm_kern parms4; struct __ip6_tnl_parm parms6; switch (proto) { @@ -115,7 +117,9 @@ mlxsw_sp_ipip_netdev_saddr(enum mlxsw_sp_l3proto proto, static __be32 mlxsw_sp_ipip_netdev_daddr4(const struct net_device *ol_dev) { - struct ip_tunnel_parm parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev); + struct ip_tunnel_parm_kern parms4; + + parms4 = mlxsw_sp_ipip_netdev_parms4(ol_dev); return mlxsw_sp_ipip_parms4_daddr(&parms4).addr4; } @@ -124,7 +128,7 @@ static union mlxsw_sp_l3addr mlxsw_sp_ipip_netdev_daddr(enum mlxsw_sp_l3proto proto, const struct net_device *ol_dev) { - struct ip_tunnel_parm parms4; + struct ip_tunnel_parm_kern parms4; struct __ip6_tnl_parm parms6; switch (proto) { @@ -150,7 +154,7 @@ bool mlxsw_sp_l3addr_is_zero(union mlxsw_sp_l3addr addr) static struct mlxsw_sp_ipip_parms mlxsw_sp_ipip_netdev_parms_init_gre4(const struct net_device *ol_dev) { - struct ip_tunnel_parm parms = mlxsw_sp_ipip_netdev_parms4(ol_dev); + struct ip_tunnel_parm_kern parms = mlxsw_sp_ipip_netdev_parms4(ol_dev); return (struct mlxsw_sp_ipip_parms) { .proto = MLXSW_SP_L3_PROTO_IPV4, @@ -187,8 +191,8 @@ mlxsw_sp_ipip_decap_config_gre4(struct mlxsw_sp *mlxsw_sp, { u16 rif_index = mlxsw_sp_ipip_lb_rif_index(ipip_entry->ol_lb); u16 ul_rif_id = mlxsw_sp_ipip_lb_ul_rif_id(ipip_entry->ol_lb); + struct ip_tunnel_parm_kern parms; char rtdp_pl[MLXSW_REG_RTDP_LEN]; - struct ip_tunnel_parm parms; unsigned int type_check; bool has_ikey; u32 daddr4; @@ -238,12 +242,15 @@ static bool mlxsw_sp_ipip_can_offload_gre4(const struct mlxsw_sp *mlxsw_sp, const struct net_device *ol_dev) { struct ip_tunnel *tunnel = netdev_priv(ol_dev); - __be16 okflags = TUNNEL_KEY; /* We can't offload any other features. */ bool inherit_ttl = tunnel->parms.iph.ttl == 0; bool inherit_tos = tunnel->parms.iph.tos & 0x1; + IP_TUNNEL_DECLARE_FLAGS(okflags) = { }; + + /* We can't offload any other features. */ + __set_bit(IP_TUNNEL_KEY_BIT, okflags); - return (tunnel->parms.i_flags & ~okflags) == 0 && - (tunnel->parms.o_flags & ~okflags) == 0 && + return ip_tunnel_flags_subset(tunnel->parms.i_flags, okflags) && + ip_tunnel_flags_subset(tunnel->parms.o_flags, okflags) && inherit_ttl && inherit_tos && mlxsw_sp_ipip_tunnel_complete(MLXSW_SP_L3_PROTO_IPV4, ol_dev); } @@ -252,7 +259,7 @@ static struct mlxsw_sp_rif_ipip_lb_config mlxsw_sp_ipip_ol_loopback_config_gre4(struct mlxsw_sp *mlxsw_sp, const struct net_device *ol_dev) { - struct ip_tunnel_parm parms = mlxsw_sp_ipip_netdev_parms4(ol_dev); + struct ip_tunnel_parm_kern parms = mlxsw_sp_ipip_netdev_parms4(ol_dev); enum mlxsw_reg_ritr_loopback_ipip_type lb_ipipt; lb_ipipt = mlxsw_sp_ipip_parms4_has_okey(&parms) ? @@ -439,10 +446,13 @@ static bool mlxsw_sp_ipip_can_offload_gre6(const struct mlxsw_sp *mlxsw_sp, struct __ip6_tnl_parm tparm = mlxsw_sp_ipip_netdev_parms6(ol_dev); bool inherit_tos = tparm.flags & IP6_TNL_F_USE_ORIG_TCLASS; bool inherit_ttl = tparm.hop_limit == 0; - __be16 okflags = TUNNEL_KEY; /* We can't offload any other features. */ + IP_TUNNEL_DECLARE_FLAGS(okflags) = { }; + + /* We can't offload any other features. */ + __set_bit(IP_TUNNEL_KEY_BIT, okflags); - return (tparm.i_flags & ~okflags) == 0 && - (tparm.o_flags & ~okflags) == 0 && + return ip_tunnel_flags_subset(tparm.i_flags, okflags) && + ip_tunnel_flags_subset(tparm.o_flags, okflags) && inherit_ttl && inherit_tos && mlxsw_sp_ipip_tunnel_complete(MLXSW_SP_L3_PROTO_IPV6, ol_dev); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h index a35f009da5..a661737796 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ipip.h @@ -9,7 +9,7 @@ #include <linux/if_tunnel.h> #include <net/ip6_tunnel.h> -struct ip_tunnel_parm +struct ip_tunnel_parm_kern mlxsw_sp_ipip_netdev_parms4(const struct net_device *ol_dev); struct __ip6_tnl_parm mlxsw_sp_ipip_netdev_parms6(const struct net_device *ol_dev); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c index ce49c9514f..4b5fd71c89 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c @@ -413,8 +413,8 @@ mlxsw_sp_span_gretap4_route(const struct net_device *to_dev, __be32 *saddrp, __be32 *daddrp) { struct ip_tunnel *tun = netdev_priv(to_dev); + struct ip_tunnel_parm_kern parms; struct net_device *dev = NULL; - struct ip_tunnel_parm parms; struct rtable *rt = NULL; struct flowi4 fl4; @@ -451,7 +451,7 @@ mlxsw_sp_span_entry_gretap4_parms(struct mlxsw_sp *mlxsw_sp, const struct net_device *to_dev, struct mlxsw_sp_span_parms *sparmsp) { - struct ip_tunnel_parm tparm = mlxsw_sp_ipip_netdev_parms4(to_dev); + struct ip_tunnel_parm_kern tparm = mlxsw_sp_ipip_netdev_parms4(to_dev); union mlxsw_sp_l3addr saddr = { .addr4 = tparm.iph.saddr }; union mlxsw_sp_l3addr daddr = { .addr4 = tparm.iph.daddr }; bool inherit_tos = tparm.iph.tos & 0x1; @@ -461,7 +461,8 @@ mlxsw_sp_span_entry_gretap4_parms(struct mlxsw_sp *mlxsw_sp, if (!(to_dev->flags & IFF_UP) || /* Reject tunnels with GRE keys, checksums, etc. */ - tparm.i_flags || tparm.o_flags || + !ip_tunnel_flags_empty(tparm.i_flags) || + !ip_tunnel_flags_empty(tparm.o_flags) || /* Require a fixed TTL and a TOS copied from the mirrored packet. */ inherit_ttl || !inherit_tos || /* A destination address may not be "any". */ @@ -565,7 +566,8 @@ mlxsw_sp_span_entry_gretap6_parms(struct mlxsw_sp *mlxsw_sp, if (!(to_dev->flags & IFF_UP) || /* Reject tunnels with GRE keys, checksums, etc. */ - tparm.i_flags || tparm.o_flags || + !ip_tunnel_flags_empty(tparm.i_flags) || + !ip_tunnel_flags_empty(tparm.o_flags) || /* Require a fixed TTL and a TOS copied from the mirrored packet. */ inherit_ttl || !inherit_tos || /* A destination address may not be "any". */ |