summaryrefslogtreecommitdiffstats
path: root/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wwan/t7xx/t7xx_hif_cldma.c')
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_cldma.c47
1 files changed, 32 insertions, 15 deletions
diff --git a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
index 554ba4669c..97163e1e57 100644
--- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
+++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
@@ -57,8 +57,6 @@
#define CHECK_Q_STOP_TIMEOUT_US 1000000
#define CHECK_Q_STOP_STEP_US 10000
-#define CLDMA_JUMBO_BUFF_SZ (63 * 1024 + sizeof(struct ccci_header))
-
static void md_cd_queue_struct_reset(struct cldma_queue *queue, struct cldma_ctrl *md_ctrl,
enum mtk_txrx tx_rx, unsigned int index)
{
@@ -162,7 +160,7 @@ static int t7xx_cldma_gpd_rx_from_q(struct cldma_queue *queue, int budget, bool
skb_reset_tail_pointer(skb);
skb_put(skb, le16_to_cpu(gpd->data_buff_len));
- ret = md_ctrl->recv_skb(queue, skb);
+ ret = queue->recv_skb(queue, skb);
/* Break processing, will try again later */
if (ret < 0)
return ret;
@@ -898,13 +896,13 @@ static void t7xx_cldma_hw_start_send(struct cldma_ctrl *md_ctrl, int qno,
/**
* t7xx_cldma_set_recv_skb() - Set the callback to handle RX packets.
- * @md_ctrl: CLDMA context structure.
+ * @queue: CLDMA queue.
* @recv_skb: Receiving skb callback.
*/
-void t7xx_cldma_set_recv_skb(struct cldma_ctrl *md_ctrl,
+void t7xx_cldma_set_recv_skb(struct cldma_queue *queue,
int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb))
{
- md_ctrl->recv_skb = recv_skb;
+ queue->recv_skb = recv_skb;
}
/**
@@ -994,6 +992,28 @@ allow_sleep:
return ret;
}
+static void t7xx_cldma_adjust_config(struct cldma_ctrl *md_ctrl, enum cldma_cfg cfg_id)
+{
+ int qno;
+
+ for (qno = 0; qno < CLDMA_RXQ_NUM; qno++) {
+ md_ctrl->rx_ring[qno].pkt_size = CLDMA_SHARED_Q_BUFF_SZ;
+ t7xx_cldma_set_recv_skb(&md_ctrl->rxq[qno], t7xx_port_proxy_recv_skb);
+ }
+
+ md_ctrl->rx_ring[CLDMA_RXQ_NUM - 1].pkt_size = CLDMA_JUMBO_BUFF_SZ;
+
+ for (qno = 0; qno < CLDMA_TXQ_NUM; qno++)
+ md_ctrl->tx_ring[qno].pkt_size = CLDMA_SHARED_Q_BUFF_SZ;
+
+ if (cfg_id == CLDMA_DEDICATED_Q_CFG) {
+ md_ctrl->tx_ring[CLDMA_Q_IDX_DUMP].pkt_size = CLDMA_DEDICATED_Q_BUFF_SZ;
+ md_ctrl->rx_ring[CLDMA_Q_IDX_DUMP].pkt_size = CLDMA_DEDICATED_Q_BUFF_SZ;
+ t7xx_cldma_set_recv_skb(&md_ctrl->rxq[CLDMA_Q_IDX_DUMP],
+ t7xx_port_proxy_recv_skb_from_dedicated_queue);
+ }
+}
+
static int t7xx_cldma_late_init(struct cldma_ctrl *md_ctrl)
{
char dma_pool_name[32];
@@ -1019,16 +1039,9 @@ static int t7xx_cldma_late_init(struct cldma_ctrl *md_ctrl)
dev_err(md_ctrl->dev, "control TX ring init fail\n");
goto err_free_tx_ring;
}
-
- md_ctrl->tx_ring[i].pkt_size = CLDMA_MTU;
}
for (j = 0; j < CLDMA_RXQ_NUM; j++) {
- md_ctrl->rx_ring[j].pkt_size = CLDMA_MTU;
-
- if (j == CLDMA_RXQ_NUM - 1)
- md_ctrl->rx_ring[j].pkt_size = CLDMA_JUMBO_BUFF_SZ;
-
ret = t7xx_cldma_rx_ring_init(md_ctrl, &md_ctrl->rx_ring[j]);
if (ret) {
dev_err(md_ctrl->dev, "Control RX ring init fail\n");
@@ -1095,6 +1108,7 @@ int t7xx_cldma_alloc(enum cldma_id hif_id, struct t7xx_pci_dev *t7xx_dev)
{
struct device *dev = &t7xx_dev->pdev->dev;
struct cldma_ctrl *md_ctrl;
+ int qno;
md_ctrl = devm_kzalloc(dev, sizeof(*md_ctrl), GFP_KERNEL);
if (!md_ctrl)
@@ -1103,7 +1117,9 @@ int t7xx_cldma_alloc(enum cldma_id hif_id, struct t7xx_pci_dev *t7xx_dev)
md_ctrl->t7xx_dev = t7xx_dev;
md_ctrl->dev = dev;
md_ctrl->hif_id = hif_id;
- md_ctrl->recv_skb = t7xx_cldma_default_recv_skb;
+ for (qno = 0; qno < CLDMA_RXQ_NUM; qno++)
+ md_ctrl->rxq[qno].recv_skb = t7xx_cldma_default_recv_skb;
+
t7xx_hw_info_init(md_ctrl);
t7xx_dev->md->md_ctrl[hif_id] = md_ctrl;
return 0;
@@ -1333,9 +1349,10 @@ err_workqueue:
return -ENOMEM;
}
-void t7xx_cldma_switch_cfg(struct cldma_ctrl *md_ctrl)
+void t7xx_cldma_switch_cfg(struct cldma_ctrl *md_ctrl, enum cldma_cfg cfg_id)
{
t7xx_cldma_late_release(md_ctrl);
+ t7xx_cldma_adjust_config(md_ctrl, cfg_id);
t7xx_cldma_late_init(md_ctrl);
}