summaryrefslogtreecommitdiffstats
path: root/debian/patches/features/all/ena/0008-net-ena-explicit-casting-and-initialization-and-clea.patch
diff options
context:
space:
mode:
Diffstat (limited to 'debian/patches/features/all/ena/0008-net-ena-explicit-casting-and-initialization-and-clea.patch')
-rw-r--r--debian/patches/features/all/ena/0008-net-ena-explicit-casting-and-initialization-and-clea.patch223
1 files changed, 223 insertions, 0 deletions
diff --git a/debian/patches/features/all/ena/0008-net-ena-explicit-casting-and-initialization-and-clea.patch b/debian/patches/features/all/ena/0008-net-ena-explicit-casting-and-initialization-and-clea.patch
new file mode 100644
index 000000000..59ea0566a
--- /dev/null
+++ b/debian/patches/features/all/ena/0008-net-ena-explicit-casting-and-initialization-and-clea.patch
@@ -0,0 +1,223 @@
+From: Arthur Kiyanovski <akiyano@amazon.com>
+Date: Thu, 11 Oct 2018 11:26:22 +0300
+Subject: [PATCH 08/19] net: ena: explicit casting and initialization, and
+ clearer error handling
+Origin: https://git.kernel.org/linus/bd791175a6432d24fc5d7b348304276027372545
+
+Signed-off-by: Arthur Kiyanovski <akiyano@amazon.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ drivers/net/ethernet/amazon/ena/ena_com.c | 39 ++++++++++++--------
+ drivers/net/ethernet/amazon/ena/ena_netdev.c | 5 +--
+ drivers/net/ethernet/amazon/ena/ena_netdev.h | 22 +++++------
+ 3 files changed, 36 insertions(+), 30 deletions(-)
+
+Index: linux/drivers/net/ethernet/amazon/ena/ena_com.c
+===================================================================
+--- linux.orig/drivers/net/ethernet/amazon/ena/ena_com.c
++++ linux/drivers/net/ethernet/amazon/ena/ena_com.c
+@@ -235,7 +235,7 @@ static struct ena_comp_ctx *__ena_com_su
+ tail_masked = admin_queue->sq.tail & queue_size_mask;
+
+ /* In case of queue FULL */
+- cnt = atomic_read(&admin_queue->outstanding_cmds);
++ cnt = (u16)atomic_read(&admin_queue->outstanding_cmds);
+ if (cnt >= admin_queue->q_depth) {
+ pr_debug("admin queue is full.\n");
+ admin_queue->stats.out_of_space++;
+@@ -304,7 +304,7 @@ static struct ena_comp_ctx *ena_com_subm
+ struct ena_admin_acq_entry *comp,
+ size_t comp_size_in_bytes)
+ {
+- unsigned long flags;
++ unsigned long flags = 0;
+ struct ena_comp_ctx *comp_ctx;
+
+ spin_lock_irqsave(&admin_queue->q_lock, flags);
+@@ -332,7 +332,7 @@ static int ena_com_init_io_sq(struct ena
+
+ memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
+
+- io_sq->dma_addr_bits = ena_dev->dma_addr_bits;
++ io_sq->dma_addr_bits = (u8)ena_dev->dma_addr_bits;
+ io_sq->desc_entry_size =
+ (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
+ sizeof(struct ena_eth_io_tx_desc) :
+@@ -486,7 +486,7 @@ static void ena_com_handle_admin_complet
+
+ /* Go over all the completions */
+ while ((READ_ONCE(cqe->acq_common_descriptor.flags) &
+- ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
++ ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
+ /* Do not read the rest of the completion entry before the
+ * phase bit was validated
+ */
+@@ -537,7 +537,8 @@ static int ena_com_comp_status_to_errno(
+ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
+ struct ena_com_admin_queue *admin_queue)
+ {
+- unsigned long flags, timeout;
++ unsigned long flags = 0;
++ unsigned long timeout;
+ int ret;
+
+ timeout = jiffies + usecs_to_jiffies(admin_queue->completion_timeout);
+@@ -736,7 +737,7 @@ static int ena_com_config_llq_info(struc
+ static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,
+ struct ena_com_admin_queue *admin_queue)
+ {
+- unsigned long flags;
++ unsigned long flags = 0;
+ int ret;
+
+ wait_for_completion_timeout(&comp_ctx->wait_event,
+@@ -782,7 +783,7 @@ static u32 ena_com_reg_bar_read32(struct
+ volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp =
+ mmio_read->read_resp;
+ u32 mmio_read_reg, ret, i;
+- unsigned long flags;
++ unsigned long flags = 0;
+ u32 timeout = mmio_read->reg_read_to;
+
+ might_sleep();
+@@ -1426,7 +1427,7 @@ void ena_com_abort_admin_commands(struct
+ void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
+ {
+ struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+- unsigned long flags;
++ unsigned long flags = 0;
+
+ spin_lock_irqsave(&admin_queue->q_lock, flags);
+ while (atomic_read(&admin_queue->outstanding_cmds) != 0) {
+@@ -1470,7 +1471,7 @@ bool ena_com_get_admin_running_state(str
+ void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state)
+ {
+ struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
+- unsigned long flags;
++ unsigned long flags = 0;
+
+ spin_lock_irqsave(&admin_queue->q_lock, flags);
+ ena_dev->admin_queue.running_state = state;
+@@ -1504,7 +1505,7 @@ int ena_com_set_aenq_config(struct ena_c
+ }
+
+ if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) {
+- pr_warn("Trying to set unsupported aenq events. supported flag: %x asked flag: %x\n",
++ pr_warn("Trying to set unsupported aenq events. supported flag: 0x%x asked flag: 0x%x\n",
+ get_resp.u.aenq.supported_groups, groups_flag);
+ return -EOPNOTSUPP;
+ }
+@@ -1652,7 +1653,7 @@ int ena_com_mmio_reg_read_request_init(s
+ sizeof(*mmio_read->read_resp),
+ &mmio_read->read_resp_dma_addr, GFP_KERNEL);
+ if (unlikely(!mmio_read->read_resp))
+- return -ENOMEM;
++ goto err;
+
+ ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
+
+@@ -1661,6 +1662,10 @@ int ena_com_mmio_reg_read_request_init(s
+ mmio_read->readless_supported = true;
+
+ return 0;
++
++err:
++
++ return -ENOMEM;
+ }
+
+ void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported)
+@@ -1961,6 +1966,7 @@ void ena_com_aenq_intr_handler(struct en
+ struct ena_admin_aenq_entry *aenq_e;
+ struct ena_admin_aenq_common_desc *aenq_common;
+ struct ena_com_aenq *aenq = &dev->aenq;
++ unsigned long long timestamp;
+ ena_aenq_handler handler_cb;
+ u16 masked_head, processed = 0;
+ u8 phase;
+@@ -1978,10 +1984,11 @@ void ena_com_aenq_intr_handler(struct en
+ */
+ dma_rmb();
+
++ timestamp =
++ (unsigned long long)aenq_common->timestamp_low |
++ ((unsigned long long)aenq_common->timestamp_high << 32);
+ pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
+- aenq_common->group, aenq_common->syndrom,
+- (u64)aenq_common->timestamp_low +
+- ((u64)aenq_common->timestamp_high << 32));
++ aenq_common->group, aenq_common->syndrom, timestamp);
+
+ /* Handle specific event*/
+ handler_cb = ena_com_get_specific_aenq_cb(dev,
+@@ -2623,8 +2630,8 @@ int ena_com_allocate_host_info(struct en
+ if (unlikely(!host_attr->host_info))
+ return -ENOMEM;
+
+- host_attr->host_info->ena_spec_version =
+- ((ENA_COMMON_SPEC_VERSION_MAJOR << ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) |
++ host_attr->host_info->ena_spec_version = ((ENA_COMMON_SPEC_VERSION_MAJOR <<
++ ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) |
+ (ENA_COMMON_SPEC_VERSION_MINOR));
+
+ return 0;
+Index: linux/drivers/net/ethernet/amazon/ena/ena_netdev.c
+===================================================================
+--- linux.orig/drivers/net/ethernet/amazon/ena/ena_netdev.c
++++ linux/drivers/net/ethernet/amazon/ena/ena_netdev.c
+@@ -2604,15 +2604,14 @@ static void ena_destroy_device(struct en
+
+ dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags);
+ adapter->dev_up_before_reset = dev_up;
+-
+ if (!graceful)
+ ena_com_set_admin_running_state(ena_dev, false);
+
+ if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags))
+ ena_down(adapter);
+
+- /* Before releasing the ENA resources, a device reset is required.
+- * (to prevent the device from accessing them).
++ /* Stop the device from sending AENQ events (in case reset flag is set
++ * and device is up, ena_close already reset the device
+ * In case the reset flag is set and the device is up, ena_down()
+ * already perform the reset, so it can be skipped.
+ */
+Index: linux/drivers/net/ethernet/amazon/ena/ena_netdev.h
+===================================================================
+--- linux.orig/drivers/net/ethernet/amazon/ena/ena_netdev.h
++++ linux/drivers/net/ethernet/amazon/ena/ena_netdev.h
+@@ -61,6 +61,17 @@
+ #define ENA_ADMIN_MSIX_VEC 1
+ #define ENA_MAX_MSIX_VEC(io_queues) (ENA_ADMIN_MSIX_VEC + (io_queues))
+
++/* The ENA buffer length fields is 16 bit long. So when PAGE_SIZE == 64kB the
++ * driver passes 0.
++ * Since the max packet size the ENA handles is ~9kB limit the buffer length to
++ * 16kB.
++ */
++#if PAGE_SIZE > SZ_16K
++#define ENA_PAGE_SIZE SZ_16K
++#else
++#define ENA_PAGE_SIZE PAGE_SIZE
++#endif
++
+ #define ENA_MIN_MSIX_VEC 2
+
+ #define ENA_REG_BAR 0
+@@ -362,15 +373,4 @@ void ena_dump_stats_to_buf(struct ena_ad
+
+ int ena_get_sset_count(struct net_device *netdev, int sset);
+
+-/* The ENA buffer length fields is 16 bit long. So when PAGE_SIZE == 64kB the
+- * driver passas 0.
+- * Since the max packet size the ENA handles is ~9kB limit the buffer length to
+- * 16kB.
+- */
+-#if PAGE_SIZE > SZ_16K
+-#define ENA_PAGE_SIZE SZ_16K
+-#else
+-#define ENA_PAGE_SIZE PAGE_SIZE
+-#endif
+-
+ #endif /* !(ENA_H) */