From 94ac2ab3fff96814d7460a27a0e9d004abbd4128 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Wed, 19 Jun 2024 23:00:37 +0200 Subject: Merging upstream version 6.9.2. Signed-off-by: Daniel Baumann --- drivers/net/ethernet/amazon/ena/ena_com.c | 323 +++++++++++------------------- 1 file changed, 112 insertions(+), 211 deletions(-) (limited to 'drivers/net/ethernet/amazon/ena/ena_com.c') diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c index 4db6893729..2d8a66ea82 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_com.c @@ -90,8 +90,7 @@ static int ena_com_admin_init_sq(struct ena_com_admin_queue *admin_queue) struct ena_com_admin_sq *sq = &admin_queue->sq; u16 size = ADMIN_SQ_SIZE(admin_queue->q_depth); - sq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size, - &sq->dma_addr, GFP_KERNEL); + sq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size, &sq->dma_addr, GFP_KERNEL); if (!sq->entries) { netdev_err(ena_dev->net_device, "Memory allocation failed\n"); @@ -113,8 +112,7 @@ static int ena_com_admin_init_cq(struct ena_com_admin_queue *admin_queue) struct ena_com_admin_cq *cq = &admin_queue->cq; u16 size = ADMIN_CQ_SIZE(admin_queue->q_depth); - cq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size, - &cq->dma_addr, GFP_KERNEL); + cq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size, &cq->dma_addr, GFP_KERNEL); if (!cq->entries) { netdev_err(ena_dev->net_device, "Memory allocation failed\n"); @@ -136,8 +134,7 @@ static int ena_com_admin_init_aenq(struct ena_com_dev *ena_dev, ena_dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH; size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH); - aenq->entries = dma_alloc_coherent(ena_dev->dmadev, size, - &aenq->dma_addr, GFP_KERNEL); + aenq->entries = dma_alloc_coherent(ena_dev->dmadev, size, &aenq->dma_addr, GFP_KERNEL); if (!aenq->entries) { netdev_err(ena_dev->net_device, "Memory allocation failed\n"); @@ -155,14 +152,13 @@ static int ena_com_admin_init_aenq(struct ena_com_dev *ena_dev, aenq_caps = 0; aenq_caps |= ena_dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK; - aenq_caps |= (sizeof(struct ena_admin_aenq_entry) - << ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) & - ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK; + aenq_caps |= + (sizeof(struct ena_admin_aenq_entry) << ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) & + ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK; writel(aenq_caps, ena_dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF); if (unlikely(!aenq_handlers)) { - netdev_err(ena_dev->net_device, - "AENQ handlers pointer is NULL\n"); + netdev_err(ena_dev->net_device, "AENQ handlers pointer is NULL\n"); return -EINVAL; } @@ -189,14 +185,12 @@ static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *admin_queu } if (unlikely(!admin_queue->comp_ctx)) { - netdev_err(admin_queue->ena_dev->net_device, - "Completion context is NULL\n"); + netdev_err(admin_queue->ena_dev->net_device, "Completion context is NULL\n"); return NULL; } if (unlikely(admin_queue->comp_ctx[command_id].occupied && capture)) { - netdev_err(admin_queue->ena_dev->net_device, - "Completion context is occupied\n"); + netdev_err(admin_queue->ena_dev->net_device, "Completion context is occupied\n"); return NULL; } @@ -226,8 +220,7 @@ static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queu /* In case of queue FULL */ cnt = (u16)atomic_read(&admin_queue->outstanding_cmds); if (cnt >= admin_queue->q_depth) { - netdev_dbg(admin_queue->ena_dev->net_device, - "Admin queue is full.\n"); + netdev_dbg(admin_queue->ena_dev->net_device, "Admin queue is full.\n"); admin_queue->stats.out_of_space++; return ERR_PTR(-ENOSPC); } @@ -274,8 +267,7 @@ static int ena_com_init_comp_ctxt(struct ena_com_admin_queue *admin_queue) struct ena_comp_ctx *comp_ctx; u16 i; - admin_queue->comp_ctx = - devm_kzalloc(admin_queue->q_dmadev, size, GFP_KERNEL); + admin_queue->comp_ctx = devm_kzalloc(admin_queue->q_dmadev, size, GFP_KERNEL); if (unlikely(!admin_queue->comp_ctx)) { netdev_err(ena_dev->net_device, "Memory allocation failed\n"); return -ENOMEM; @@ -336,20 +328,17 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev, dev_node = dev_to_node(ena_dev->dmadev); set_dev_node(ena_dev->dmadev, ctx->numa_node); io_sq->desc_addr.virt_addr = - dma_alloc_coherent(ena_dev->dmadev, size, - &io_sq->desc_addr.phys_addr, + dma_alloc_coherent(ena_dev->dmadev, size, &io_sq->desc_addr.phys_addr, GFP_KERNEL); set_dev_node(ena_dev->dmadev, dev_node); if (!io_sq->desc_addr.virt_addr) { io_sq->desc_addr.virt_addr = dma_alloc_coherent(ena_dev->dmadev, size, - &io_sq->desc_addr.phys_addr, - GFP_KERNEL); + &io_sq->desc_addr.phys_addr, GFP_KERNEL); } if (!io_sq->desc_addr.virt_addr) { - netdev_err(ena_dev->net_device, - "Memory allocation failed\n"); + netdev_err(ena_dev->net_device, "Memory allocation failed\n"); return -ENOMEM; } } @@ -367,16 +356,14 @@ static int ena_com_init_io_sq(struct ena_com_dev *ena_dev, dev_node = dev_to_node(ena_dev->dmadev); set_dev_node(ena_dev->dmadev, ctx->numa_node); - io_sq->bounce_buf_ctrl.base_buffer = - devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL); + io_sq->bounce_buf_ctrl.base_buffer = devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL); set_dev_node(ena_dev->dmadev, dev_node); if (!io_sq->bounce_buf_ctrl.base_buffer) io_sq->bounce_buf_ctrl.base_buffer = devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL); if (!io_sq->bounce_buf_ctrl.base_buffer) { - netdev_err(ena_dev->net_device, - "Bounce buffer memory allocation failed\n"); + netdev_err(ena_dev->net_device, "Bounce buffer memory allocation failed\n"); return -ENOMEM; } @@ -425,13 +412,11 @@ static int ena_com_init_io_cq(struct ena_com_dev *ena_dev, prev_node = dev_to_node(ena_dev->dmadev); set_dev_node(ena_dev->dmadev, ctx->numa_node); io_cq->cdesc_addr.virt_addr = - dma_alloc_coherent(ena_dev->dmadev, size, - &io_cq->cdesc_addr.phys_addr, GFP_KERNEL); + dma_alloc_coherent(ena_dev->dmadev, size, &io_cq->cdesc_addr.phys_addr, GFP_KERNEL); set_dev_node(ena_dev->dmadev, prev_node); if (!io_cq->cdesc_addr.virt_addr) { io_cq->cdesc_addr.virt_addr = - dma_alloc_coherent(ena_dev->dmadev, size, - &io_cq->cdesc_addr.phys_addr, + dma_alloc_coherent(ena_dev->dmadev, size, &io_cq->cdesc_addr.phys_addr, GFP_KERNEL); } @@ -514,8 +499,8 @@ static int ena_com_comp_status_to_errno(struct ena_com_admin_queue *admin_queue, u8 comp_status) { if (unlikely(comp_status != 0)) - netdev_err(admin_queue->ena_dev->net_device, - "Admin command failed[%u]\n", comp_status); + netdev_err(admin_queue->ena_dev->net_device, "Admin command failed[%u]\n", + comp_status); switch (comp_status) { case ENA_ADMIN_SUCCESS: @@ -580,8 +565,7 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c } if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) { - netdev_err(admin_queue->ena_dev->net_device, - "Command was aborted\n"); + netdev_err(admin_queue->ena_dev->net_device, "Command was aborted\n"); spin_lock_irqsave(&admin_queue->q_lock, flags); admin_queue->stats.aborted_cmd++; spin_unlock_irqrestore(&admin_queue->q_lock, flags); @@ -589,8 +573,7 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c goto err; } - WARN(comp_ctx->status != ENA_CMD_COMPLETED, "Invalid comp status %d\n", - comp_ctx->status); + WARN(comp_ctx->status != ENA_CMD_COMPLETED, "Invalid comp status %d\n", comp_ctx->status); ret = ena_com_comp_status_to_errno(admin_queue, comp_ctx->comp_status); err: @@ -634,8 +617,7 @@ static int ena_com_set_llq(struct ena_com_dev *ena_dev) sizeof(resp)); if (unlikely(ret)) - netdev_err(ena_dev->net_device, - "Failed to set LLQ configurations: %d\n", ret); + netdev_err(ena_dev->net_device, "Failed to set LLQ configurations: %d\n", ret); return ret; } @@ -658,8 +640,7 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev, llq_default_cfg->llq_header_location; } else { netdev_err(ena_dev->net_device, - "Invalid header location control, supported: 0x%x\n", - supported_feat); + "Invalid header location control, supported: 0x%x\n", supported_feat); return -EINVAL; } @@ -681,8 +662,8 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev, netdev_err(ena_dev->net_device, "Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n", - llq_default_cfg->llq_stride_ctrl, - supported_feat, llq_info->desc_stride_ctrl); + llq_default_cfg->llq_stride_ctrl, supported_feat, + llq_info->desc_stride_ctrl); } } else { llq_info->desc_stride_ctrl = 0; @@ -704,8 +685,7 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev, llq_info->desc_list_entry_size = 256; } else { netdev_err(ena_dev->net_device, - "Invalid entry_size_ctrl, supported: 0x%x\n", - supported_feat); + "Invalid entry_size_ctrl, supported: 0x%x\n", supported_feat); return -EINVAL; } @@ -750,8 +730,8 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev, netdev_err(ena_dev->net_device, "Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n", - llq_default_cfg->llq_num_decs_before_header, - supported_feat, llq_info->descs_num_before_header); + llq_default_cfg->llq_num_decs_before_header, supported_feat, + llq_info->descs_num_before_header); } /* Check for accelerated queue supported */ llq_accel_mode_get = llq_features->accel_mode.u.get; @@ -767,8 +747,7 @@ static int ena_com_config_llq_info(struct ena_com_dev *ena_dev, rc = ena_com_set_llq(ena_dev); if (rc) - netdev_err(ena_dev->net_device, - "Cannot set LLQ configuration: %d\n", rc); + netdev_err(ena_dev->net_device, "Cannot set LLQ configuration: %d\n", rc); return rc; } @@ -780,8 +759,7 @@ static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *com int ret; wait_for_completion_timeout(&comp_ctx->wait_event, - usecs_to_jiffies( - admin_queue->completion_timeout)); + usecs_to_jiffies(admin_queue->completion_timeout)); /* In case the command wasn't completed find out the root cause. * There might be 2 kinds of errors @@ -797,8 +775,7 @@ static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *com if (comp_ctx->status == ENA_CMD_COMPLETED) { netdev_err(admin_queue->ena_dev->net_device, "The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n", - comp_ctx->cmd_opcode, - admin_queue->auto_polling ? "ON" : "OFF"); + comp_ctx->cmd_opcode, admin_queue->auto_polling ? "ON" : "OFF"); /* Check if fallback to polling is enabled */ if (admin_queue->auto_polling) admin_queue->polling = true; @@ -867,15 +844,13 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset) if (unlikely(i == timeout)) { netdev_err(ena_dev->net_device, "Reading reg failed for timeout. expected: req id[%u] offset[%u] actual: req id[%u] offset[%u]\n", - mmio_read->seq_num, offset, read_resp->req_id, - read_resp->reg_off); + mmio_read->seq_num, offset, read_resp->req_id, read_resp->reg_off); ret = ENA_MMIO_READ_TIMEOUT; goto err; } if (read_resp->reg_off != offset) { - netdev_err(ena_dev->net_device, - "Read failure: wrong offset provided\n"); + netdev_err(ena_dev->net_device, "Read failure: wrong offset provided\n"); ret = ENA_MMIO_READ_TIMEOUT; } else { ret = read_resp->reg_val; @@ -934,8 +909,7 @@ static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev, sizeof(destroy_resp)); if (unlikely(ret && (ret != -ENODEV))) - netdev_err(ena_dev->net_device, - "Failed to destroy io sq error: %d\n", ret); + netdev_err(ena_dev->net_device, "Failed to destroy io sq error: %d\n", ret); return ret; } @@ -949,8 +923,7 @@ static void ena_com_io_queue_free(struct ena_com_dev *ena_dev, if (io_cq->cdesc_addr.virt_addr) { size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth; - dma_free_coherent(ena_dev->dmadev, size, - io_cq->cdesc_addr.virt_addr, + dma_free_coherent(ena_dev->dmadev, size, io_cq->cdesc_addr.virt_addr, io_cq->cdesc_addr.phys_addr); io_cq->cdesc_addr.virt_addr = NULL; @@ -959,8 +932,7 @@ static void ena_com_io_queue_free(struct ena_com_dev *ena_dev, if (io_sq->desc_addr.virt_addr) { size = io_sq->desc_entry_size * io_sq->q_depth; - dma_free_coherent(ena_dev->dmadev, size, - io_sq->desc_addr.virt_addr, + dma_free_coherent(ena_dev->dmadev, size, io_sq->desc_addr.virt_addr, io_sq->desc_addr.phys_addr); io_sq->desc_addr.virt_addr = NULL; @@ -985,8 +957,7 @@ static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout, val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF); if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) { - netdev_err(ena_dev->net_device, - "Reg read timeout occurred\n"); + netdev_err(ena_dev->net_device, "Reg read timeout occurred\n"); return -ETIME; } @@ -1026,8 +997,7 @@ static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev, int ret; if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) { - netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n", - feature_id); + netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n", feature_id); return -EOPNOTSUPP; } @@ -1064,8 +1034,7 @@ static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev, if (unlikely(ret)) netdev_err(ena_dev->net_device, - "Failed to submit get_feature command %d error: %d\n", - feature_id, ret); + "Failed to submit get_feature command %d error: %d\n", feature_id, ret); return ret; } @@ -1104,13 +1073,11 @@ static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev) { struct ena_rss *rss = &ena_dev->rss; - if (!ena_com_check_supported_feature_id(ena_dev, - ENA_ADMIN_RSS_HASH_FUNCTION)) + if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_HASH_FUNCTION)) return -EOPNOTSUPP; - rss->hash_key = - dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key), - &rss->hash_key_dma_addr, GFP_KERNEL); + rss->hash_key = dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key), + &rss->hash_key_dma_addr, GFP_KERNEL); if (unlikely(!rss->hash_key)) return -ENOMEM; @@ -1123,8 +1090,8 @@ static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev) struct ena_rss *rss = &ena_dev->rss; if (rss->hash_key) - dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_key), - rss->hash_key, rss->hash_key_dma_addr); + dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_key), rss->hash_key, + rss->hash_key_dma_addr); rss->hash_key = NULL; } @@ -1132,9 +1099,8 @@ static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev) { struct ena_rss *rss = &ena_dev->rss; - rss->hash_ctrl = - dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl), - &rss->hash_ctrl_dma_addr, GFP_KERNEL); + rss->hash_ctrl = dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl), + &rss->hash_ctrl_dma_addr, GFP_KERNEL); if (unlikely(!rss->hash_ctrl)) return -ENOMEM; @@ -1147,8 +1113,8 @@ static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev) struct ena_rss *rss = &ena_dev->rss; if (rss->hash_ctrl) - dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl), - rss->hash_ctrl, rss->hash_ctrl_dma_addr); + dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl), rss->hash_ctrl, + rss->hash_ctrl_dma_addr); rss->hash_ctrl = NULL; } @@ -1177,15 +1143,13 @@ static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev, tbl_size = (1ULL << log_size) * sizeof(struct ena_admin_rss_ind_table_entry); - rss->rss_ind_tbl = - dma_alloc_coherent(ena_dev->dmadev, tbl_size, - &rss->rss_ind_tbl_dma_addr, GFP_KERNEL); + rss->rss_ind_tbl = dma_alloc_coherent(ena_dev->dmadev, tbl_size, &rss->rss_ind_tbl_dma_addr, + GFP_KERNEL); if (unlikely(!rss->rss_ind_tbl)) goto mem_err1; tbl_size = (1ULL << log_size) * sizeof(u16); - rss->host_rss_ind_tbl = - devm_kzalloc(ena_dev->dmadev, tbl_size, GFP_KERNEL); + rss->host_rss_ind_tbl = devm_kzalloc(ena_dev->dmadev, tbl_size, GFP_KERNEL); if (unlikely(!rss->host_rss_ind_tbl)) goto mem_err2; @@ -1197,8 +1161,7 @@ mem_err2: tbl_size = (1ULL << log_size) * sizeof(struct ena_admin_rss_ind_table_entry); - dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl, - rss->rss_ind_tbl_dma_addr); + dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl, rss->rss_ind_tbl_dma_addr); rss->rss_ind_tbl = NULL; mem_err1: rss->tbl_log_size = 0; @@ -1261,8 +1224,7 @@ static int ena_com_create_io_sq(struct ena_com_dev *ena_dev, &create_cmd.sq_ba, io_sq->desc_addr.phys_addr); if (unlikely(ret)) { - netdev_err(ena_dev->net_device, - "Memory address set failed\n"); + netdev_err(ena_dev->net_device, "Memory address set failed\n"); return ret; } } @@ -1273,8 +1235,7 @@ static int ena_com_create_io_sq(struct ena_com_dev *ena_dev, (struct ena_admin_acq_entry *)&cmd_completion, sizeof(cmd_completion)); if (unlikely(ret)) { - netdev_err(ena_dev->net_device, - "Failed to create IO SQ. error: %d\n", ret); + netdev_err(ena_dev->net_device, "Failed to create IO SQ. error: %d\n", ret); return ret; } @@ -1284,16 +1245,12 @@ static int ena_com_create_io_sq(struct ena_com_dev *ena_dev, (uintptr_t)cmd_completion.sq_doorbell_offset); if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { - io_sq->header_addr = (u8 __iomem *)((uintptr_t)ena_dev->mem_bar - + cmd_completion.llq_headers_offset); - io_sq->desc_addr.pbuf_dev_addr = (u8 __iomem *)((uintptr_t)ena_dev->mem_bar + cmd_completion.llq_descriptors_offset); } - netdev_dbg(ena_dev->net_device, "Created sq[%u], depth[%u]\n", - io_sq->idx, io_sq->q_depth); + netdev_dbg(ena_dev->net_device, "Created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth); return ret; } @@ -1420,8 +1377,7 @@ int ena_com_create_io_cq(struct ena_com_dev *ena_dev, (struct ena_admin_acq_entry *)&cmd_completion, sizeof(cmd_completion)); if (unlikely(ret)) { - netdev_err(ena_dev->net_device, - "Failed to create IO CQ. error: %d\n", ret); + netdev_err(ena_dev->net_device, "Failed to create IO CQ. error: %d\n", ret); return ret; } @@ -1430,18 +1386,12 @@ int ena_com_create_io_cq(struct ena_com_dev *ena_dev, io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + cmd_completion.cq_interrupt_unmask_register_offset); - if (cmd_completion.cq_head_db_register_offset) - io_cq->cq_head_db_reg = - (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + - cmd_completion.cq_head_db_register_offset); - if (cmd_completion.numa_node_register_offset) io_cq->numa_node_cfg_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar + cmd_completion.numa_node_register_offset); - netdev_dbg(ena_dev->net_device, "Created cq[%u], depth[%u]\n", - io_cq->idx, io_cq->q_depth); + netdev_dbg(ena_dev->net_device, "Created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth); return ret; } @@ -1451,8 +1401,7 @@ int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid, struct ena_com_io_cq **io_cq) { if (qid >= ENA_TOTAL_NUM_QUEUES) { - netdev_err(ena_dev->net_device, - "Invalid queue number %d but the max is %d\n", qid, + netdev_err(ena_dev->net_device, "Invalid queue number %d but the max is %d\n", qid, ENA_TOTAL_NUM_QUEUES); return -EINVAL; } @@ -1492,8 +1441,7 @@ void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev) spin_lock_irqsave(&admin_queue->q_lock, flags); while (atomic_read(&admin_queue->outstanding_cmds) != 0) { spin_unlock_irqrestore(&admin_queue->q_lock, flags); - ena_delay_exponential_backoff_us(exp++, - ena_dev->ena_min_poll_delay_us); + ena_delay_exponential_backoff_us(exp++, ena_dev->ena_min_poll_delay_us); spin_lock_irqsave(&admin_queue->q_lock, flags); } spin_unlock_irqrestore(&admin_queue->q_lock, flags); @@ -1519,8 +1467,7 @@ int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev, sizeof(destroy_resp)); if (unlikely(ret && (ret != -ENODEV))) - netdev_err(ena_dev->net_device, - "Failed to destroy IO CQ. error: %d\n", ret); + netdev_err(ena_dev->net_device, "Failed to destroy IO CQ. error: %d\n", ret); return ret; } @@ -1588,8 +1535,7 @@ int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag) sizeof(resp)); if (unlikely(ret)) - netdev_err(ena_dev->net_device, - "Failed to config AENQ ret: %d\n", ret); + netdev_err(ena_dev->net_device, "Failed to config AENQ ret: %d\n", ret); return ret; } @@ -1610,8 +1556,7 @@ int ena_com_get_dma_width(struct ena_com_dev *ena_dev) netdev_dbg(ena_dev->net_device, "ENA dma width: %d\n", width); if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) { - netdev_err(ena_dev->net_device, "DMA width illegal value: %d\n", - width); + netdev_err(ena_dev->net_device, "DMA width illegal value: %d\n", width); return -EINVAL; } @@ -1633,19 +1578,16 @@ int ena_com_validate_version(struct ena_com_dev *ena_dev) ctrl_ver = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CONTROLLER_VERSION_OFF); - if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) || - (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) { + if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) || (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) { netdev_err(ena_dev->net_device, "Reg read timeout occurred\n"); return -ETIME; } dev_info(ena_dev->dmadev, "ENA device version: %d.%d\n", - (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >> - ENA_REGS_VERSION_MAJOR_VERSION_SHIFT, + (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >> ENA_REGS_VERSION_MAJOR_VERSION_SHIFT, ver & ENA_REGS_VERSION_MINOR_VERSION_MASK); - dev_info(ena_dev->dmadev, - "ENA controller version: %d.%d.%d implementation version %d\n", + dev_info(ena_dev->dmadev, "ENA controller version: %d.%d.%d implementation version %d\n", (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >> ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT, (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) >> @@ -1694,20 +1636,17 @@ void ena_com_admin_destroy(struct ena_com_dev *ena_dev) size = ADMIN_SQ_SIZE(admin_queue->q_depth); if (sq->entries) - dma_free_coherent(ena_dev->dmadev, size, sq->entries, - sq->dma_addr); + dma_free_coherent(ena_dev->dmadev, size, sq->entries, sq->dma_addr); sq->entries = NULL; size = ADMIN_CQ_SIZE(admin_queue->q_depth); if (cq->entries) - dma_free_coherent(ena_dev->dmadev, size, cq->entries, - cq->dma_addr); + dma_free_coherent(ena_dev->dmadev, size, cq->entries, cq->dma_addr); cq->entries = NULL; size = ADMIN_AENQ_SIZE(aenq->q_depth); if (ena_dev->aenq.entries) - dma_free_coherent(ena_dev->dmadev, size, aenq->entries, - aenq->dma_addr); + dma_free_coherent(ena_dev->dmadev, size, aenq->entries, aenq->dma_addr); aenq->entries = NULL; } @@ -1733,10 +1672,8 @@ int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev) struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read; spin_lock_init(&mmio_read->lock); - mmio_read->read_resp = - dma_alloc_coherent(ena_dev->dmadev, - sizeof(*mmio_read->read_resp), - &mmio_read->read_resp_dma_addr, GFP_KERNEL); + mmio_read->read_resp = dma_alloc_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp), + &mmio_read->read_resp_dma_addr, GFP_KERNEL); if (unlikely(!mmio_read->read_resp)) goto err; @@ -1767,8 +1704,8 @@ void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev) writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF); writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF); - dma_free_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp), - mmio_read->read_resp, mmio_read->read_resp_dma_addr); + dma_free_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp), mmio_read->read_resp, + mmio_read->read_resp_dma_addr); mmio_read->read_resp = NULL; } @@ -1800,8 +1737,7 @@ int ena_com_admin_init(struct ena_com_dev *ena_dev, } if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) { - netdev_err(ena_dev->net_device, - "Device isn't ready, abort com init\n"); + netdev_err(ena_dev->net_device, "Device isn't ready, abort com init\n"); return -ENODEV; } @@ -1878,8 +1814,7 @@ int ena_com_create_io_queue(struct ena_com_dev *ena_dev, int ret; if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) { - netdev_err(ena_dev->net_device, - "Qid (%d) is bigger than max num of queues (%d)\n", + netdev_err(ena_dev->net_device, "Qid (%d) is bigger than max num of queues (%d)\n", ctx->qid, ENA_TOTAL_NUM_QUEUES); return -EINVAL; } @@ -1905,8 +1840,7 @@ int ena_com_create_io_queue(struct ena_com_dev *ena_dev, if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) /* header length is limited to 8 bits */ - io_sq->tx_max_header_size = - min_t(u32, ena_dev->tx_max_header_size, SZ_256); + io_sq->tx_max_header_size = min_t(u32, ena_dev->tx_max_header_size, SZ_256); ret = ena_com_init_io_sq(ena_dev, ctx, io_sq); if (ret) @@ -1938,8 +1872,7 @@ void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid) struct ena_com_io_cq *io_cq; if (qid >= ENA_TOTAL_NUM_QUEUES) { - netdev_err(ena_dev->net_device, - "Qid (%d) is bigger than max num of queues (%d)\n", + netdev_err(ena_dev->net_device, "Qid (%d) is bigger than max num of queues (%d)\n", qid, ENA_TOTAL_NUM_QUEUES); return; } @@ -1983,8 +1916,7 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev, if (rc) return rc; - if (get_resp.u.max_queue_ext.version != - ENA_FEATURE_MAX_QUEUE_EXT_VER) + if (get_resp.u.max_queue_ext.version != ENA_FEATURE_MAX_QUEUE_EXT_VER) return -EINVAL; memcpy(&get_feat_ctx->max_queue_ext, &get_resp.u.max_queue_ext, @@ -2025,18 +1957,15 @@ int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev, rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS, 0); if (!rc) - memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints, - sizeof(get_resp.u.hw_hints)); + memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints, sizeof(get_resp.u.hw_hints)); else if (rc == -EOPNOTSUPP) - memset(&get_feat_ctx->hw_hints, 0x0, - sizeof(get_feat_ctx->hw_hints)); + memset(&get_feat_ctx->hw_hints, 0x0, sizeof(get_feat_ctx->hw_hints)); else return rc; rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ, 0); if (!rc) - memcpy(&get_feat_ctx->llq, &get_resp.u.llq, - sizeof(get_resp.u.llq)); + memcpy(&get_feat_ctx->llq, &get_resp.u.llq, sizeof(get_resp.u.llq)); else if (rc == -EOPNOTSUPP) memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq)); else @@ -2084,8 +2013,7 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data) aenq_common = &aenq_e->aenq_common_desc; /* Go over all the events */ - while ((READ_ONCE(aenq_common->flags) & - ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) { + while ((READ_ONCE(aenq_common->flags) & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) { /* Make sure the phase bit (ownership) is as expected before * reading the rest of the descriptor. */ @@ -2094,8 +2022,7 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data) timestamp = (u64)aenq_common->timestamp_low | ((u64)aenq_common->timestamp_high << 32); - netdev_dbg(ena_dev->net_device, - "AENQ! Group[%x] Syndrome[%x] timestamp: [%llus]\n", + netdev_dbg(ena_dev->net_device, "AENQ! Group[%x] Syndrome[%x] timestamp: [%llus]\n", aenq_common->group, aenq_common->syndrome, timestamp); /* Handle specific event*/ @@ -2124,8 +2051,7 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data) /* write the aenq doorbell after all AENQ descriptors were read */ mb(); - writel_relaxed((u32)aenq->head, - ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF); + writel_relaxed((u32)aenq->head, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF); } int ena_com_dev_reset(struct ena_com_dev *ena_dev, @@ -2137,15 +2063,13 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev, stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF); cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF); - if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) || - (cap == ENA_MMIO_READ_TIMEOUT))) { + if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) || (cap == ENA_MMIO_READ_TIMEOUT))) { netdev_err(ena_dev->net_device, "Reg read32 timeout occurred\n"); return -ETIME; } if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) { - netdev_err(ena_dev->net_device, - "Device isn't ready, can't reset device\n"); + netdev_err(ena_dev->net_device, "Device isn't ready, can't reset device\n"); return -EINVAL; } @@ -2168,8 +2092,7 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev, rc = wait_for_reset_state(ena_dev, timeout, ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK); if (rc != 0) { - netdev_err(ena_dev->net_device, - "Reset indication didn't turn on\n"); + netdev_err(ena_dev->net_device, "Reset indication didn't turn on\n"); return rc; } @@ -2177,8 +2100,7 @@ int ena_com_dev_reset(struct ena_com_dev *ena_dev, writel(0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF); rc = wait_for_reset_state(ena_dev, timeout, 0); if (rc != 0) { - netdev_err(ena_dev->net_device, - "Reset indication didn't turn off\n"); + netdev_err(ena_dev->net_device, "Reset indication didn't turn off\n"); return rc; } @@ -2215,8 +2137,7 @@ static int ena_get_dev_stats(struct ena_com_dev *ena_dev, sizeof(*get_resp)); if (unlikely(ret)) - netdev_err(ena_dev->net_device, - "Failed to get stats. error: %d\n", ret); + netdev_err(ena_dev->net_device, "Failed to get stats. error: %d\n", ret); return ret; } @@ -2228,8 +2149,7 @@ int ena_com_get_eni_stats(struct ena_com_dev *ena_dev, int ret; if (!ena_com_get_cap(ena_dev, ENA_ADMIN_ENI_STATS)) { - netdev_err(ena_dev->net_device, - "Capability %d isn't supported\n", + netdev_err(ena_dev->net_device, "Capability %d isn't supported\n", ENA_ADMIN_ENI_STATS); return -EOPNOTSUPP; } @@ -2266,8 +2186,7 @@ int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, u32 mtu) int ret; if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) { - netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n", - ENA_ADMIN_MTU); + netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n", ENA_ADMIN_MTU); return -EOPNOTSUPP; } @@ -2286,8 +2205,7 @@ int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, u32 mtu) sizeof(resp)); if (unlikely(ret)) - netdev_err(ena_dev->net_device, - "Failed to set mtu %d. error: %d\n", mtu, ret); + netdev_err(ena_dev->net_device, "Failed to set mtu %d. error: %d\n", mtu, ret); return ret; } @@ -2301,8 +2219,7 @@ int ena_com_get_offload_settings(struct ena_com_dev *ena_dev, ret = ena_com_get_feature(ena_dev, &resp, ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0); if (unlikely(ret)) { - netdev_err(ena_dev->net_device, - "Failed to get offload capabilities %d\n", ret); + netdev_err(ena_dev->net_device, "Failed to get offload capabilities %d\n", ret); return ret; } @@ -2320,8 +2237,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev) struct ena_admin_get_feat_resp get_resp; int ret; - if (!ena_com_check_supported_feature_id(ena_dev, - ENA_ADMIN_RSS_HASH_FUNCTION)) { + if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_HASH_FUNCTION)) { netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n", ENA_ADMIN_RSS_HASH_FUNCTION); return -EOPNOTSUPP; @@ -2334,8 +2250,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev) return ret; if (!(get_resp.u.flow_hash_func.supported_func & BIT(rss->hash_func))) { - netdev_err(ena_dev->net_device, - "Func hash %d isn't supported by device, abort\n", + netdev_err(ena_dev->net_device, "Func hash %d isn't supported by device, abort\n", rss->hash_func); return -EOPNOTSUPP; } @@ -2365,8 +2280,7 @@ int ena_com_set_hash_function(struct ena_com_dev *ena_dev) (struct ena_admin_acq_entry *)&resp, sizeof(resp)); if (unlikely(ret)) { - netdev_err(ena_dev->net_device, - "Failed to set hash function %d. error: %d\n", + netdev_err(ena_dev->net_device, "Failed to set hash function %d. error: %d\n", rss->hash_func, ret); return -EINVAL; } @@ -2398,16 +2312,15 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev, return rc; if (!(BIT(func) & get_resp.u.flow_hash_func.supported_func)) { - netdev_err(ena_dev->net_device, - "Flow hash function %d isn't supported\n", func); + netdev_err(ena_dev->net_device, "Flow hash function %d isn't supported\n", func); return -EOPNOTSUPP; } if ((func == ENA_ADMIN_TOEPLITZ) && key) { if (key_len != sizeof(hash_key->key)) { netdev_err(ena_dev->net_device, - "key len (%u) doesn't equal the supported size (%zu)\n", - key_len, sizeof(hash_key->key)); + "key len (%u) doesn't equal the supported size (%zu)\n", key_len, + sizeof(hash_key->key)); return -EINVAL; } memcpy(hash_key->key, key, key_len); @@ -2495,8 +2408,7 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev) struct ena_admin_set_feat_resp resp; int ret; - if (!ena_com_check_supported_feature_id(ena_dev, - ENA_ADMIN_RSS_HASH_INPUT)) { + if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_HASH_INPUT)) { netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n", ENA_ADMIN_RSS_HASH_INPUT); return -EOPNOTSUPP; @@ -2527,8 +2439,7 @@ int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev) (struct ena_admin_acq_entry *)&resp, sizeof(resp)); if (unlikely(ret)) - netdev_err(ena_dev->net_device, - "Failed to set hash input. error: %d\n", ret); + netdev_err(ena_dev->net_device, "Failed to set hash input. error: %d\n", ret); return ret; } @@ -2605,8 +2516,7 @@ int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev, int rc; if (proto >= ENA_ADMIN_RSS_PROTO_NUM) { - netdev_err(ena_dev->net_device, "Invalid proto num (%u)\n", - proto); + netdev_err(ena_dev->net_device, "Invalid proto num (%u)\n", proto); return -EINVAL; } @@ -2658,8 +2568,7 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev) struct ena_admin_set_feat_resp resp; int ret; - if (!ena_com_check_supported_feature_id( - ena_dev, ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG)) { + if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG)) { netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n", ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG); return -EOPNOTSUPP; @@ -2699,8 +2608,7 @@ int ena_com_indirect_table_set(struct ena_com_dev *ena_dev) sizeof(resp)); if (unlikely(ret)) - netdev_err(ena_dev->net_device, - "Failed to set indirect table. error: %d\n", ret); + netdev_err(ena_dev->net_device, "Failed to set indirect table. error: %d\n", ret); return ret; } @@ -2779,9 +2687,8 @@ int ena_com_allocate_host_info(struct ena_com_dev *ena_dev) { struct ena_host_attribute *host_attr = &ena_dev->host_attr; - host_attr->host_info = - dma_alloc_coherent(ena_dev->dmadev, SZ_4K, - &host_attr->host_info_dma_addr, GFP_KERNEL); + host_attr->host_info = dma_alloc_coherent(ena_dev->dmadev, SZ_4K, + &host_attr->host_info_dma_addr, GFP_KERNEL); if (unlikely(!host_attr->host_info)) return -ENOMEM; @@ -2827,8 +2734,7 @@ void ena_com_delete_debug_area(struct ena_com_dev *ena_dev) if (host_attr->debug_area_virt_addr) { dma_free_coherent(ena_dev->dmadev, host_attr->debug_area_size, - host_attr->debug_area_virt_addr, - host_attr->debug_area_dma_addr); + host_attr->debug_area_virt_addr, host_attr->debug_area_dma_addr); host_attr->debug_area_virt_addr = NULL; } } @@ -2877,8 +2783,7 @@ int ena_com_set_host_attributes(struct ena_com_dev *ena_dev) sizeof(resp)); if (unlikely(ret)) - netdev_err(ena_dev->net_device, - "Failed to set host attributes: %d\n", ret); + netdev_err(ena_dev->net_device, "Failed to set host attributes: %d\n", ret); return ret; } @@ -2896,8 +2801,7 @@ static int ena_com_update_nonadaptive_moderation_interval(struct ena_com_dev *en u32 *intr_moder_interval) { if (!intr_delay_resolution) { - netdev_err(ena_dev->net_device, - "Illegal interrupt delay granularity value\n"); + netdev_err(ena_dev->net_device, "Illegal interrupt delay granularity value\n"); return -EFAULT; } @@ -2935,14 +2839,12 @@ int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev) if (rc) { if (rc == -EOPNOTSUPP) { - netdev_dbg(ena_dev->net_device, - "Feature %d isn't supported\n", + netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n", ENA_ADMIN_INTERRUPT_MODERATION); rc = 0; } else { netdev_err(ena_dev->net_device, - "Failed to get interrupt moderation admin cmd. rc: %d\n", - rc); + "Failed to get interrupt moderation admin cmd. rc: %d\n", rc); } /* no moderation supported, disable adaptive support */ @@ -2990,8 +2892,7 @@ int ena_com_config_dev_mode(struct ena_com_dev *ena_dev, (llq_info->descs_num_before_header * sizeof(struct ena_eth_io_tx_desc)); if (unlikely(ena_dev->tx_max_header_size == 0)) { - netdev_err(ena_dev->net_device, - "The size of the LLQ entry is smaller than needed\n"); + netdev_err(ena_dev->net_device, "The size of the LLQ entry is smaller than needed\n"); return -EINVAL; } -- cgit v1.2.3