summaryrefslogtreecommitdiffstats
path: root/drivers/dma/ti
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-08-07 13:11:22 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-08-07 13:11:22 +0000
commitb20732900e4636a467c0183a47f7396700f5f743 (patch)
tree42f079ff82e701ebcb76829974b4caca3e5b6798 /drivers/dma/ti
parentAdding upstream version 6.8.12. (diff)
downloadlinux-b20732900e4636a467c0183a47f7396700f5f743.tar.xz
linux-b20732900e4636a467c0183a47f7396700f5f743.zip
Adding upstream version 6.9.7.upstream/6.9.7
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/dma/ti')
-rw-r--r--drivers/dma/ti/k3-psil-j721s2.c73
-rw-r--r--drivers/dma/ti/k3-udma-glue.c297
2 files changed, 286 insertions, 84 deletions
diff --git a/drivers/dma/ti/k3-psil-j721s2.c b/drivers/dma/ti/k3-psil-j721s2.c
index 1d5430fc57..ba08bdcdcd 100644
--- a/drivers/dma/ti/k3-psil-j721s2.c
+++ b/drivers/dma/ti/k3-psil-j721s2.c
@@ -57,6 +57,14 @@
}, \
}
+#define PSIL_CSI2RX(x) \
+ { \
+ .thread_id = x, \
+ .ep_config = { \
+ .ep_type = PSIL_EP_NATIVE, \
+ }, \
+ }
+
/* PSI-L source thread IDs, used for RX (DMA_DEV_TO_MEM) */
static struct psil_ep j721s2_src_ep_map[] = {
/* PDMA_MCASP - McASP0-4 */
@@ -114,6 +122,71 @@ static struct psil_ep j721s2_src_ep_map[] = {
PSIL_PDMA_XY_PKT(0x4707),
PSIL_PDMA_XY_PKT(0x4708),
PSIL_PDMA_XY_PKT(0x4709),
+ /* CSI2RX */
+ PSIL_CSI2RX(0x4940),
+ PSIL_CSI2RX(0x4941),
+ PSIL_CSI2RX(0x4942),
+ PSIL_CSI2RX(0x4943),
+ PSIL_CSI2RX(0x4944),
+ PSIL_CSI2RX(0x4945),
+ PSIL_CSI2RX(0x4946),
+ PSIL_CSI2RX(0x4947),
+ PSIL_CSI2RX(0x4948),
+ PSIL_CSI2RX(0x4949),
+ PSIL_CSI2RX(0x494a),
+ PSIL_CSI2RX(0x494b),
+ PSIL_CSI2RX(0x494c),
+ PSIL_CSI2RX(0x494d),
+ PSIL_CSI2RX(0x494e),
+ PSIL_CSI2RX(0x494f),
+ PSIL_CSI2RX(0x4950),
+ PSIL_CSI2RX(0x4951),
+ PSIL_CSI2RX(0x4952),
+ PSIL_CSI2RX(0x4953),
+ PSIL_CSI2RX(0x4954),
+ PSIL_CSI2RX(0x4955),
+ PSIL_CSI2RX(0x4956),
+ PSIL_CSI2RX(0x4957),
+ PSIL_CSI2RX(0x4958),
+ PSIL_CSI2RX(0x4959),
+ PSIL_CSI2RX(0x495a),
+ PSIL_CSI2RX(0x495b),
+ PSIL_CSI2RX(0x495c),
+ PSIL_CSI2RX(0x495d),
+ PSIL_CSI2RX(0x495e),
+ PSIL_CSI2RX(0x495f),
+ PSIL_CSI2RX(0x4960),
+ PSIL_CSI2RX(0x4961),
+ PSIL_CSI2RX(0x4962),
+ PSIL_CSI2RX(0x4963),
+ PSIL_CSI2RX(0x4964),
+ PSIL_CSI2RX(0x4965),
+ PSIL_CSI2RX(0x4966),
+ PSIL_CSI2RX(0x4967),
+ PSIL_CSI2RX(0x4968),
+ PSIL_CSI2RX(0x4969),
+ PSIL_CSI2RX(0x496a),
+ PSIL_CSI2RX(0x496b),
+ PSIL_CSI2RX(0x496c),
+ PSIL_CSI2RX(0x496d),
+ PSIL_CSI2RX(0x496e),
+ PSIL_CSI2RX(0x496f),
+ PSIL_CSI2RX(0x4970),
+ PSIL_CSI2RX(0x4971),
+ PSIL_CSI2RX(0x4972),
+ PSIL_CSI2RX(0x4973),
+ PSIL_CSI2RX(0x4974),
+ PSIL_CSI2RX(0x4975),
+ PSIL_CSI2RX(0x4976),
+ PSIL_CSI2RX(0x4977),
+ PSIL_CSI2RX(0x4978),
+ PSIL_CSI2RX(0x4979),
+ PSIL_CSI2RX(0x497a),
+ PSIL_CSI2RX(0x497b),
+ PSIL_CSI2RX(0x497c),
+ PSIL_CSI2RX(0x497d),
+ PSIL_CSI2RX(0x497e),
+ PSIL_CSI2RX(0x497f),
/* MAIN SA2UL */
PSIL_SA2UL(0x4a40, 0),
PSIL_SA2UL(0x4a41, 0),
diff --git a/drivers/dma/ti/k3-udma-glue.c b/drivers/dma/ti/k3-udma-glue.c
index c278d5facf..f0a399cf45 100644
--- a/drivers/dma/ti/k3-udma-glue.c
+++ b/drivers/dma/ti/k3-udma-glue.c
@@ -111,6 +111,35 @@ static int of_k3_udma_glue_parse(struct device_node *udmax_np,
return 0;
}
+static int of_k3_udma_glue_parse_chn_common(struct k3_udma_glue_common *common, u32 thread_id,
+ bool tx_chn)
+{
+ if (tx_chn && !(thread_id & K3_PSIL_DST_THREAD_ID_OFFSET))
+ return -EINVAL;
+
+ if (!tx_chn && (thread_id & K3_PSIL_DST_THREAD_ID_OFFSET))
+ return -EINVAL;
+
+ /* get psil endpoint config */
+ common->ep_config = psil_get_ep_config(thread_id);
+ if (IS_ERR(common->ep_config)) {
+ dev_err(common->dev,
+ "No configuration for psi-l thread 0x%04x\n",
+ thread_id);
+ return PTR_ERR(common->ep_config);
+ }
+
+ common->epib = common->ep_config->needs_epib;
+ common->psdata_size = common->ep_config->psd_size;
+
+ if (tx_chn)
+ common->dst_thread = thread_id;
+ else
+ common->src_thread = thread_id;
+
+ return 0;
+}
+
static int of_k3_udma_glue_parse_chn(struct device_node *chn_np,
const char *name, struct k3_udma_glue_common *common,
bool tx_chn)
@@ -153,38 +182,29 @@ static int of_k3_udma_glue_parse_chn(struct device_node *chn_np,
common->atype_asel = dma_spec.args[1];
}
- if (tx_chn && !(thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) {
- ret = -EINVAL;
- goto out_put_spec;
- }
+ ret = of_k3_udma_glue_parse_chn_common(common, thread_id, tx_chn);
- if (!tx_chn && (thread_id & K3_PSIL_DST_THREAD_ID_OFFSET)) {
- ret = -EINVAL;
- goto out_put_spec;
- }
+out_put_spec:
+ of_node_put(dma_spec.np);
+ return ret;
+}
- /* get psil endpoint config */
- common->ep_config = psil_get_ep_config(thread_id);
- if (IS_ERR(common->ep_config)) {
- dev_err(common->dev,
- "No configuration for psi-l thread 0x%04x\n",
- thread_id);
- ret = PTR_ERR(common->ep_config);
- goto out_put_spec;
- }
+static int
+of_k3_udma_glue_parse_chn_by_id(struct device_node *udmax_np, struct k3_udma_glue_common *common,
+ bool tx_chn, u32 thread_id)
+{
+ int ret = 0;
- common->epib = common->ep_config->needs_epib;
- common->psdata_size = common->ep_config->psd_size;
+ if (unlikely(!udmax_np))
+ return -EINVAL;
- if (tx_chn)
- common->dst_thread = thread_id;
- else
- common->src_thread = thread_id;
+ ret = of_k3_udma_glue_parse(udmax_np, common);
+ if (ret)
+ return ret;
-out_put_spec:
- of_node_put(dma_spec.np);
+ ret = of_k3_udma_glue_parse_chn_common(common, thread_id, tx_chn);
return ret;
-};
+}
static void k3_udma_glue_dump_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
{
@@ -251,29 +271,13 @@ static int k3_udma_glue_cfg_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
return tisci_rm->tisci_udmap_ops->tx_ch_cfg(tisci_rm->tisci, &req);
}
-struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
- const char *name, struct k3_udma_glue_tx_channel_cfg *cfg)
+static int
+k3_udma_glue_request_tx_chn_common(struct device *dev,
+ struct k3_udma_glue_tx_channel *tx_chn,
+ struct k3_udma_glue_tx_channel_cfg *cfg)
{
- struct k3_udma_glue_tx_channel *tx_chn;
int ret;
- tx_chn = devm_kzalloc(dev, sizeof(*tx_chn), GFP_KERNEL);
- if (!tx_chn)
- return ERR_PTR(-ENOMEM);
-
- tx_chn->common.dev = dev;
- tx_chn->common.swdata_size = cfg->swdata_size;
- tx_chn->tx_pause_on_err = cfg->tx_pause_on_err;
- tx_chn->tx_filt_einfo = cfg->tx_filt_einfo;
- tx_chn->tx_filt_pswords = cfg->tx_filt_pswords;
- tx_chn->tx_supr_tdpkt = cfg->tx_supr_tdpkt;
-
- /* parse of udmap channel */
- ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
- &tx_chn->common, true);
- if (ret)
- goto err;
-
tx_chn->common.hdesc_size = cppi5_hdesc_calc_size(tx_chn->common.epib,
tx_chn->common.psdata_size,
tx_chn->common.swdata_size);
@@ -289,7 +293,7 @@ struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
if (IS_ERR(tx_chn->udma_tchanx)) {
ret = PTR_ERR(tx_chn->udma_tchanx);
dev_err(dev, "UDMAX tchanx get err %d\n", ret);
- goto err;
+ return ret;
}
tx_chn->udma_tchan_id = xudma_tchan_get_id(tx_chn->udma_tchanx);
@@ -302,7 +306,7 @@ struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
dev_err(dev, "Channel Device registration failed %d\n", ret);
put_device(&tx_chn->common.chan_dev);
tx_chn->common.chan_dev.parent = NULL;
- goto err;
+ return ret;
}
if (xudma_is_pktdma(tx_chn->common.udmax)) {
@@ -326,7 +330,7 @@ struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
&tx_chn->ringtxcq);
if (ret) {
dev_err(dev, "Failed to get TX/TXCQ rings %d\n", ret);
- goto err;
+ return ret;
}
/* Set the dma_dev for the rings to be configured */
@@ -342,13 +346,13 @@ struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
ret = k3_ringacc_ring_cfg(tx_chn->ringtx, &cfg->tx_cfg);
if (ret) {
dev_err(dev, "Failed to cfg ringtx %d\n", ret);
- goto err;
+ return ret;
}
ret = k3_ringacc_ring_cfg(tx_chn->ringtxcq, &cfg->txcq_cfg);
if (ret) {
dev_err(dev, "Failed to cfg ringtx %d\n", ret);
- goto err;
+ return ret;
}
/* request and cfg psi-l */
@@ -359,11 +363,42 @@ struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
ret = k3_udma_glue_cfg_tx_chn(tx_chn);
if (ret) {
dev_err(dev, "Failed to cfg tchan %d\n", ret);
- goto err;
+ return ret;
}
k3_udma_glue_dump_tx_chn(tx_chn);
+ return 0;
+}
+
+struct k3_udma_glue_tx_channel *
+k3_udma_glue_request_tx_chn(struct device *dev, const char *name,
+ struct k3_udma_glue_tx_channel_cfg *cfg)
+{
+ struct k3_udma_glue_tx_channel *tx_chn;
+ int ret;
+
+ tx_chn = devm_kzalloc(dev, sizeof(*tx_chn), GFP_KERNEL);
+ if (!tx_chn)
+ return ERR_PTR(-ENOMEM);
+
+ tx_chn->common.dev = dev;
+ tx_chn->common.swdata_size = cfg->swdata_size;
+ tx_chn->tx_pause_on_err = cfg->tx_pause_on_err;
+ tx_chn->tx_filt_einfo = cfg->tx_filt_einfo;
+ tx_chn->tx_filt_pswords = cfg->tx_filt_pswords;
+ tx_chn->tx_supr_tdpkt = cfg->tx_supr_tdpkt;
+
+ /* parse of udmap channel */
+ ret = of_k3_udma_glue_parse_chn(dev->of_node, name,
+ &tx_chn->common, true);
+ if (ret)
+ goto err;
+
+ ret = k3_udma_glue_request_tx_chn_common(dev, tx_chn, cfg);
+ if (ret)
+ goto err;
+
return tx_chn;
err:
@@ -372,6 +407,41 @@ err:
}
EXPORT_SYMBOL_GPL(k3_udma_glue_request_tx_chn);
+struct k3_udma_glue_tx_channel *
+k3_udma_glue_request_tx_chn_for_thread_id(struct device *dev,
+ struct k3_udma_glue_tx_channel_cfg *cfg,
+ struct device_node *udmax_np, u32 thread_id)
+{
+ struct k3_udma_glue_tx_channel *tx_chn;
+ int ret;
+
+ tx_chn = devm_kzalloc(dev, sizeof(*tx_chn), GFP_KERNEL);
+ if (!tx_chn)
+ return ERR_PTR(-ENOMEM);
+
+ tx_chn->common.dev = dev;
+ tx_chn->common.swdata_size = cfg->swdata_size;
+ tx_chn->tx_pause_on_err = cfg->tx_pause_on_err;
+ tx_chn->tx_filt_einfo = cfg->tx_filt_einfo;
+ tx_chn->tx_filt_pswords = cfg->tx_filt_pswords;
+ tx_chn->tx_supr_tdpkt = cfg->tx_supr_tdpkt;
+
+ ret = of_k3_udma_glue_parse_chn_by_id(udmax_np, &tx_chn->common, true, thread_id);
+ if (ret)
+ goto err;
+
+ ret = k3_udma_glue_request_tx_chn_common(dev, tx_chn, cfg);
+ if (ret)
+ goto err;
+
+ return tx_chn;
+
+err:
+ k3_udma_glue_release_tx_chn(tx_chn);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_request_tx_chn_for_thread_id);
+
void k3_udma_glue_release_tx_chn(struct k3_udma_glue_tx_channel *tx_chn)
{
if (tx_chn->psil_paired) {
@@ -1000,12 +1070,59 @@ err:
return ERR_PTR(ret);
}
+static int
+k3_udma_glue_request_remote_rx_chn_common(struct k3_udma_glue_rx_channel *rx_chn,
+ struct k3_udma_glue_rx_channel_cfg *cfg,
+ struct device *dev)
+{
+ int ret, i;
+
+ rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib,
+ rx_chn->common.psdata_size,
+ rx_chn->common.swdata_size);
+
+ rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num,
+ sizeof(*rx_chn->flows), GFP_KERNEL);
+ if (!rx_chn->flows)
+ return -ENOMEM;
+
+ rx_chn->common.chan_dev.class = &k3_udma_glue_devclass;
+ rx_chn->common.chan_dev.parent = xudma_get_device(rx_chn->common.udmax);
+ dev_set_name(&rx_chn->common.chan_dev, "rchan_remote-0x%04x-0x%02x",
+ rx_chn->common.src_thread, rx_chn->flow_id_base);
+ ret = device_register(&rx_chn->common.chan_dev);
+ if (ret) {
+ dev_err(dev, "Channel Device registration failed %d\n", ret);
+ put_device(&rx_chn->common.chan_dev);
+ rx_chn->common.chan_dev.parent = NULL;
+ return ret;
+ }
+
+ if (xudma_is_pktdma(rx_chn->common.udmax)) {
+ /* prepare the channel device as coherent */
+ rx_chn->common.chan_dev.dma_coherent = true;
+ dma_coerce_mask_and_coherent(&rx_chn->common.chan_dev,
+ DMA_BIT_MASK(48));
+ }
+
+ ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < rx_chn->flow_num; i++)
+ rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i;
+
+ k3_udma_glue_dump_rx_chn(rx_chn);
+
+ return 0;
+}
+
static struct k3_udma_glue_rx_channel *
k3_udma_glue_request_remote_rx_chn(struct device *dev, const char *name,
struct k3_udma_glue_rx_channel_cfg *cfg)
{
struct k3_udma_glue_rx_channel *rx_chn;
- int ret, i;
+ int ret;
if (cfg->flow_id_num <= 0 ||
cfg->flow_id_use_rxchan_id ||
@@ -1036,44 +1153,55 @@ k3_udma_glue_request_remote_rx_chn(struct device *dev, const char *name,
if (ret)
goto err;
- rx_chn->common.hdesc_size = cppi5_hdesc_calc_size(rx_chn->common.epib,
- rx_chn->common.psdata_size,
- rx_chn->common.swdata_size);
-
- rx_chn->flows = devm_kcalloc(dev, rx_chn->flow_num,
- sizeof(*rx_chn->flows), GFP_KERNEL);
- if (!rx_chn->flows) {
- ret = -ENOMEM;
+ ret = k3_udma_glue_request_remote_rx_chn_common(rx_chn, cfg, dev);
+ if (ret)
goto err;
- }
- rx_chn->common.chan_dev.class = &k3_udma_glue_devclass;
- rx_chn->common.chan_dev.parent = xudma_get_device(rx_chn->common.udmax);
- dev_set_name(&rx_chn->common.chan_dev, "rchan_remote-0x%04x",
- rx_chn->common.src_thread);
- ret = device_register(&rx_chn->common.chan_dev);
- if (ret) {
- dev_err(dev, "Channel Device registration failed %d\n", ret);
- put_device(&rx_chn->common.chan_dev);
- rx_chn->common.chan_dev.parent = NULL;
- goto err;
- }
+ return rx_chn;
- if (xudma_is_pktdma(rx_chn->common.udmax)) {
- /* prepare the channel device as coherent */
- rx_chn->common.chan_dev.dma_coherent = true;
- dma_coerce_mask_and_coherent(&rx_chn->common.chan_dev,
- DMA_BIT_MASK(48));
- }
+err:
+ k3_udma_glue_release_rx_chn(rx_chn);
+ return ERR_PTR(ret);
+}
- ret = k3_udma_glue_allocate_rx_flows(rx_chn, cfg);
+struct k3_udma_glue_rx_channel *
+k3_udma_glue_request_remote_rx_chn_for_thread_id(struct device *dev,
+ struct k3_udma_glue_rx_channel_cfg *cfg,
+ struct device_node *udmax_np, u32 thread_id)
+{
+ struct k3_udma_glue_rx_channel *rx_chn;
+ int ret;
+
+ if (cfg->flow_id_num <= 0 ||
+ cfg->flow_id_use_rxchan_id ||
+ cfg->def_flow_cfg ||
+ cfg->flow_id_base < 0)
+ return ERR_PTR(-EINVAL);
+
+ /*
+ * Remote RX channel is under control of Remote CPU core, so
+ * Linux can only request and manipulate by dedicated RX flows
+ */
+
+ rx_chn = devm_kzalloc(dev, sizeof(*rx_chn), GFP_KERNEL);
+ if (!rx_chn)
+ return ERR_PTR(-ENOMEM);
+
+ rx_chn->common.dev = dev;
+ rx_chn->common.swdata_size = cfg->swdata_size;
+ rx_chn->remote = true;
+ rx_chn->udma_rchan_id = -1;
+ rx_chn->flow_num = cfg->flow_id_num;
+ rx_chn->flow_id_base = cfg->flow_id_base;
+ rx_chn->psil_paired = false;
+
+ ret = of_k3_udma_glue_parse_chn_by_id(udmax_np, &rx_chn->common, false, thread_id);
if (ret)
goto err;
- for (i = 0; i < rx_chn->flow_num; i++)
- rx_chn->flows[i].udma_rflow_id = rx_chn->flow_id_base + i;
-
- k3_udma_glue_dump_rx_chn(rx_chn);
+ ret = k3_udma_glue_request_remote_rx_chn_common(rx_chn, cfg, dev);
+ if (ret)
+ goto err;
return rx_chn;
@@ -1081,6 +1209,7 @@ err:
k3_udma_glue_release_rx_chn(rx_chn);
return ERR_PTR(ret);
}
+EXPORT_SYMBOL_GPL(k3_udma_glue_request_remote_rx_chn_for_thread_id);
struct k3_udma_glue_rx_channel *
k3_udma_glue_request_rx_chn(struct device *dev, const char *name,