summaryrefslogtreecommitdiffstats
path: root/drivers/soc/fsl/qe
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/soc/fsl/qe')
-rw-r--r--drivers/soc/fsl/qe/qmc.c637
1 files changed, 490 insertions, 147 deletions
diff --git a/drivers/soc/fsl/qe/qmc.c b/drivers/soc/fsl/qe/qmc.c
index 2312152a44..f498db9abe 100644
--- a/drivers/soc/fsl/qe/qmc.c
+++ b/drivers/soc/fsl/qe/qmc.c
@@ -166,7 +166,7 @@
struct qmc_xfer_desc {
union {
void (*tx_complete)(void *context);
- void (*rx_complete)(void *context, size_t length);
+ void (*rx_complete)(void *context, size_t length, unsigned int flags);
};
void *context;
};
@@ -177,7 +177,10 @@ struct qmc_chan {
struct qmc *qmc;
void __iomem *s_param;
enum qmc_mode mode;
+ spinlock_t ts_lock; /* Protect timeslots */
+ u64 tx_ts_mask_avail;
u64 tx_ts_mask;
+ u64 rx_ts_mask_avail;
u64 rx_ts_mask;
bool is_reverse_data;
@@ -214,41 +217,47 @@ struct qmc {
u16 __iomem *int_curr;
dma_addr_t int_dma_addr;
size_t int_size;
+ bool is_tsa_64rxtx;
struct list_head chan_head;
struct qmc_chan *chans[64];
};
-static inline void qmc_write16(void __iomem *addr, u16 val)
+static void qmc_write16(void __iomem *addr, u16 val)
{
iowrite16be(val, addr);
}
-static inline u16 qmc_read16(void __iomem *addr)
+static u16 qmc_read16(void __iomem *addr)
{
return ioread16be(addr);
}
-static inline void qmc_setbits16(void __iomem *addr, u16 set)
+static void qmc_setbits16(void __iomem *addr, u16 set)
{
qmc_write16(addr, qmc_read16(addr) | set);
}
-static inline void qmc_clrbits16(void __iomem *addr, u16 clr)
+static void qmc_clrbits16(void __iomem *addr, u16 clr)
{
qmc_write16(addr, qmc_read16(addr) & ~clr);
}
-static inline void qmc_write32(void __iomem *addr, u32 val)
+static void qmc_clrsetbits16(void __iomem *addr, u16 clr, u16 set)
+{
+ qmc_write16(addr, (qmc_read16(addr) & ~clr) | set);
+}
+
+static void qmc_write32(void __iomem *addr, u32 val)
{
iowrite32be(val, addr);
}
-static inline u32 qmc_read32(void __iomem *addr)
+static u32 qmc_read32(void __iomem *addr)
{
return ioread32be(addr);
}
-static inline void qmc_setbits32(void __iomem *addr, u32 set)
+static void qmc_setbits32(void __iomem *addr, u32 set)
{
qmc_write32(addr, qmc_read32(addr) | set);
}
@@ -257,6 +266,7 @@ static inline void qmc_setbits32(void __iomem *addr, u32 set)
int qmc_chan_get_info(struct qmc_chan *chan, struct qmc_chan_info *info)
{
struct tsa_serial_info tsa_info;
+ unsigned long flags;
int ret;
/* Retrieve info from the TSA related serial */
@@ -264,6 +274,8 @@ int qmc_chan_get_info(struct qmc_chan *chan, struct qmc_chan_info *info)
if (ret)
return ret;
+ spin_lock_irqsave(&chan->ts_lock, flags);
+
info->mode = chan->mode;
info->rx_fs_rate = tsa_info.rx_fs_rate;
info->rx_bit_rate = tsa_info.rx_bit_rate;
@@ -272,10 +284,63 @@ int qmc_chan_get_info(struct qmc_chan *chan, struct qmc_chan_info *info)
info->tx_bit_rate = tsa_info.tx_bit_rate;
info->nb_rx_ts = hweight64(chan->rx_ts_mask);
+ spin_unlock_irqrestore(&chan->ts_lock, flags);
+
return 0;
}
EXPORT_SYMBOL(qmc_chan_get_info);
+int qmc_chan_get_ts_info(struct qmc_chan *chan, struct qmc_chan_ts_info *ts_info)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&chan->ts_lock, flags);
+
+ ts_info->rx_ts_mask_avail = chan->rx_ts_mask_avail;
+ ts_info->tx_ts_mask_avail = chan->tx_ts_mask_avail;
+ ts_info->rx_ts_mask = chan->rx_ts_mask;
+ ts_info->tx_ts_mask = chan->tx_ts_mask;
+
+ spin_unlock_irqrestore(&chan->ts_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(qmc_chan_get_ts_info);
+
+int qmc_chan_set_ts_info(struct qmc_chan *chan, const struct qmc_chan_ts_info *ts_info)
+{
+ unsigned long flags;
+ int ret;
+
+ /* Only a subset of available timeslots is allowed */
+ if ((ts_info->rx_ts_mask & chan->rx_ts_mask_avail) != ts_info->rx_ts_mask)
+ return -EINVAL;
+ if ((ts_info->tx_ts_mask & chan->tx_ts_mask_avail) != ts_info->tx_ts_mask)
+ return -EINVAL;
+
+ /* In case of common rx/tx table, rx/tx masks must be identical */
+ if (chan->qmc->is_tsa_64rxtx) {
+ if (ts_info->rx_ts_mask != ts_info->tx_ts_mask)
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&chan->ts_lock, flags);
+
+ if ((chan->tx_ts_mask != ts_info->tx_ts_mask && !chan->is_tx_stopped) ||
+ (chan->rx_ts_mask != ts_info->rx_ts_mask && !chan->is_rx_stopped)) {
+ dev_err(chan->qmc->dev, "Channel rx and/or tx not stopped\n");
+ ret = -EBUSY;
+ } else {
+ chan->tx_ts_mask = ts_info->tx_ts_mask;
+ chan->rx_ts_mask = ts_info->rx_ts_mask;
+ ret = 0;
+ }
+ spin_unlock_irqrestore(&chan->ts_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(qmc_chan_set_ts_info);
+
int qmc_chan_set_param(struct qmc_chan *chan, const struct qmc_chan_param *param)
{
if (param->mode != chan->mode)
@@ -421,7 +486,8 @@ end:
}
int qmc_chan_read_submit(struct qmc_chan *chan, dma_addr_t addr, size_t length,
- void (*complete)(void *context, size_t length), void *context)
+ void (*complete)(void *context, size_t length, unsigned int flags),
+ void *context)
{
struct qmc_xfer_desc *xfer_desc;
unsigned long flags;
@@ -454,6 +520,10 @@ int qmc_chan_read_submit(struct qmc_chan *chan, dma_addr_t addr, size_t length,
xfer_desc->rx_complete = complete;
xfer_desc->context = context;
+ /* Clear previous status flags */
+ ctrl &= ~(QMC_BD_RX_L | QMC_BD_RX_F | QMC_BD_RX_LG | QMC_BD_RX_NO |
+ QMC_BD_RX_AB | QMC_BD_RX_CR);
+
/* Activate the descriptor */
ctrl |= (QMC_BD_RX_E | QMC_BD_RX_UB);
wmb(); /* Be sure to flush data before descriptor activation */
@@ -485,7 +555,7 @@ EXPORT_SYMBOL(qmc_chan_read_submit);
static void qmc_chan_read_done(struct qmc_chan *chan)
{
- void (*complete)(void *context, size_t size);
+ void (*complete)(void *context, size_t size, unsigned int flags);
struct qmc_xfer_desc *xfer_desc;
unsigned long flags;
cbd_t __iomem *bd;
@@ -527,7 +597,23 @@ static void qmc_chan_read_done(struct qmc_chan *chan)
if (complete) {
spin_unlock_irqrestore(&chan->rx_lock, flags);
- complete(context, datalen);
+
+ /*
+ * Avoid conversion between internal hardware flags and
+ * the software API flags.
+ * -> Be sure that the software API flags are consistent
+ * with the hardware flags
+ */
+ BUILD_BUG_ON(QMC_RX_FLAG_HDLC_LAST != QMC_BD_RX_L);
+ BUILD_BUG_ON(QMC_RX_FLAG_HDLC_FIRST != QMC_BD_RX_F);
+ BUILD_BUG_ON(QMC_RX_FLAG_HDLC_OVF != QMC_BD_RX_LG);
+ BUILD_BUG_ON(QMC_RX_FLAG_HDLC_UNA != QMC_BD_RX_NO);
+ BUILD_BUG_ON(QMC_RX_FLAG_HDLC_ABORT != QMC_BD_RX_AB);
+ BUILD_BUG_ON(QMC_RX_FLAG_HDLC_CRC != QMC_BD_RX_CR);
+
+ complete(context, datalen,
+ ctrl & (QMC_BD_RX_L | QMC_BD_RX_F | QMC_BD_RX_LG |
+ QMC_BD_RX_NO | QMC_BD_RX_AB | QMC_BD_RX_CR));
spin_lock_irqsave(&chan->rx_lock, flags);
}
@@ -539,6 +625,155 @@ end:
spin_unlock_irqrestore(&chan->rx_lock, flags);
}
+static int qmc_chan_setup_tsa_64rxtx(struct qmc_chan *chan, const struct tsa_serial_info *info,
+ bool enable)
+{
+ unsigned int i;
+ u16 curr;
+ u16 val;
+
+ /*
+ * Use a common Tx/Rx 64 entries table.
+ * Tx and Rx related stuffs must be identical
+ */
+ if (chan->tx_ts_mask != chan->rx_ts_mask) {
+ dev_err(chan->qmc->dev, "chan %u uses different Rx and Tx TS\n", chan->id);
+ return -EINVAL;
+ }
+
+ val = QMC_TSA_VALID | QMC_TSA_MASK | QMC_TSA_CHANNEL(chan->id);
+
+ /* Check entries based on Rx stuff*/
+ for (i = 0; i < info->nb_rx_ts; i++) {
+ if (!(chan->rx_ts_mask & (((u64)1) << i)))
+ continue;
+
+ curr = qmc_read16(chan->qmc->scc_pram + QMC_GBL_TSATRX + (i * 2));
+ if (curr & QMC_TSA_VALID && (curr & ~QMC_TSA_WRAP) != val) {
+ dev_err(chan->qmc->dev, "chan %u TxRx entry %d already used\n",
+ chan->id, i);
+ return -EBUSY;
+ }
+ }
+
+ /* Set entries based on Rx stuff*/
+ for (i = 0; i < info->nb_rx_ts; i++) {
+ if (!(chan->rx_ts_mask & (((u64)1) << i)))
+ continue;
+
+ qmc_clrsetbits16(chan->qmc->scc_pram + QMC_GBL_TSATRX + (i * 2),
+ ~QMC_TSA_WRAP, enable ? val : 0x0000);
+ }
+
+ return 0;
+}
+
+static int qmc_chan_setup_tsa_32rx(struct qmc_chan *chan, const struct tsa_serial_info *info,
+ bool enable)
+{
+ unsigned int i;
+ u16 curr;
+ u16 val;
+
+ /* Use a Rx 32 entries table */
+
+ val = QMC_TSA_VALID | QMC_TSA_MASK | QMC_TSA_CHANNEL(chan->id);
+
+ /* Check entries based on Rx stuff */
+ for (i = 0; i < info->nb_rx_ts; i++) {
+ if (!(chan->rx_ts_mask & (((u64)1) << i)))
+ continue;
+
+ curr = qmc_read16(chan->qmc->scc_pram + QMC_GBL_TSATRX + (i * 2));
+ if (curr & QMC_TSA_VALID && (curr & ~QMC_TSA_WRAP) != val) {
+ dev_err(chan->qmc->dev, "chan %u Rx entry %d already used\n",
+ chan->id, i);
+ return -EBUSY;
+ }
+ }
+
+ /* Set entries based on Rx stuff */
+ for (i = 0; i < info->nb_rx_ts; i++) {
+ if (!(chan->rx_ts_mask & (((u64)1) << i)))
+ continue;
+
+ qmc_clrsetbits16(chan->qmc->scc_pram + QMC_GBL_TSATRX + (i * 2),
+ ~QMC_TSA_WRAP, enable ? val : 0x0000);
+ }
+
+ return 0;
+}
+
+static int qmc_chan_setup_tsa_32tx(struct qmc_chan *chan, const struct tsa_serial_info *info,
+ bool enable)
+{
+ unsigned int i;
+ u16 curr;
+ u16 val;
+
+ /* Use a Tx 32 entries table */
+
+ val = QMC_TSA_VALID | QMC_TSA_MASK | QMC_TSA_CHANNEL(chan->id);
+
+ /* Check entries based on Tx stuff */
+ for (i = 0; i < info->nb_tx_ts; i++) {
+ if (!(chan->tx_ts_mask & (((u64)1) << i)))
+ continue;
+
+ curr = qmc_read16(chan->qmc->scc_pram + QMC_GBL_TSATTX + (i * 2));
+ if (curr & QMC_TSA_VALID && (curr & ~QMC_TSA_WRAP) != val) {
+ dev_err(chan->qmc->dev, "chan %u Tx entry %d already used\n",
+ chan->id, i);
+ return -EBUSY;
+ }
+ }
+
+ /* Set entries based on Tx stuff */
+ for (i = 0; i < info->nb_tx_ts; i++) {
+ if (!(chan->tx_ts_mask & (((u64)1) << i)))
+ continue;
+
+ qmc_clrsetbits16(chan->qmc->scc_pram + QMC_GBL_TSATTX + (i * 2),
+ ~QMC_TSA_WRAP, enable ? val : 0x0000);
+ }
+
+ return 0;
+}
+
+static int qmc_chan_setup_tsa_tx(struct qmc_chan *chan, bool enable)
+{
+ struct tsa_serial_info info;
+ int ret;
+
+ /* Retrieve info from the TSA related serial */
+ ret = tsa_serial_get_info(chan->qmc->tsa_serial, &info);
+ if (ret)
+ return ret;
+
+ /* Setup entries */
+ if (chan->qmc->is_tsa_64rxtx)
+ return qmc_chan_setup_tsa_64rxtx(chan, &info, enable);
+
+ return qmc_chan_setup_tsa_32tx(chan, &info, enable);
+}
+
+static int qmc_chan_setup_tsa_rx(struct qmc_chan *chan, bool enable)
+{
+ struct tsa_serial_info info;
+ int ret;
+
+ /* Retrieve info from the TSA related serial */
+ ret = tsa_serial_get_info(chan->qmc->tsa_serial, &info);
+ if (ret)
+ return ret;
+
+ /* Setup entries */
+ if (chan->qmc->is_tsa_64rxtx)
+ return qmc_chan_setup_tsa_64rxtx(chan, &info, enable);
+
+ return qmc_chan_setup_tsa_32rx(chan, &info, enable);
+}
+
static int qmc_chan_command(struct qmc_chan *chan, u8 qmc_opcode)
{
return cpm_command(chan->id << 2, (qmc_opcode << 4) | 0x0E);
@@ -551,6 +786,12 @@ static int qmc_chan_stop_rx(struct qmc_chan *chan)
spin_lock_irqsave(&chan->rx_lock, flags);
+ if (chan->is_rx_stopped) {
+ /* The channel is already stopped -> simply return ok */
+ ret = 0;
+ goto end;
+ }
+
/* Send STOP RECEIVE command */
ret = qmc_chan_command(chan, 0x0);
if (ret) {
@@ -561,6 +802,15 @@ static int qmc_chan_stop_rx(struct qmc_chan *chan)
chan->is_rx_stopped = true;
+ if (!chan->qmc->is_tsa_64rxtx || chan->is_tx_stopped) {
+ ret = qmc_chan_setup_tsa_rx(chan, false);
+ if (ret) {
+ dev_err(chan->qmc->dev, "chan %u: Disable tsa entries failed (%d)\n",
+ chan->id, ret);
+ goto end;
+ }
+ }
+
end:
spin_unlock_irqrestore(&chan->rx_lock, flags);
return ret;
@@ -573,6 +823,12 @@ static int qmc_chan_stop_tx(struct qmc_chan *chan)
spin_lock_irqsave(&chan->tx_lock, flags);
+ if (chan->is_tx_stopped) {
+ /* The channel is already stopped -> simply return ok */
+ ret = 0;
+ goto end;
+ }
+
/* Send STOP TRANSMIT command */
ret = qmc_chan_command(chan, 0x1);
if (ret) {
@@ -583,37 +839,114 @@ static int qmc_chan_stop_tx(struct qmc_chan *chan)
chan->is_tx_stopped = true;
+ if (!chan->qmc->is_tsa_64rxtx || chan->is_rx_stopped) {
+ ret = qmc_chan_setup_tsa_tx(chan, false);
+ if (ret) {
+ dev_err(chan->qmc->dev, "chan %u: Disable tsa entries failed (%d)\n",
+ chan->id, ret);
+ goto end;
+ }
+ }
+
end:
spin_unlock_irqrestore(&chan->tx_lock, flags);
return ret;
}
+static int qmc_chan_start_rx(struct qmc_chan *chan);
+
int qmc_chan_stop(struct qmc_chan *chan, int direction)
{
- int ret;
+ bool is_rx_rollback_needed = false;
+ unsigned long flags;
+ int ret = 0;
+
+ spin_lock_irqsave(&chan->ts_lock, flags);
if (direction & QMC_CHAN_READ) {
+ is_rx_rollback_needed = !chan->is_rx_stopped;
ret = qmc_chan_stop_rx(chan);
if (ret)
- return ret;
+ goto end;
}
if (direction & QMC_CHAN_WRITE) {
ret = qmc_chan_stop_tx(chan);
- if (ret)
- return ret;
+ if (ret) {
+ /* Restart rx if needed */
+ if (is_rx_rollback_needed)
+ qmc_chan_start_rx(chan);
+ goto end;
+ }
}
- return 0;
+end:
+ spin_unlock_irqrestore(&chan->ts_lock, flags);
+ return ret;
}
EXPORT_SYMBOL(qmc_chan_stop);
-static void qmc_chan_start_rx(struct qmc_chan *chan)
+static int qmc_setup_chan_trnsync(struct qmc *qmc, struct qmc_chan *chan)
+{
+ struct tsa_serial_info info;
+ u16 first_rx, last_tx;
+ u16 trnsync;
+ int ret;
+
+ /* Retrieve info from the TSA related serial */
+ ret = tsa_serial_get_info(chan->qmc->tsa_serial, &info);
+ if (ret)
+ return ret;
+
+ /* Find the first Rx TS allocated to the channel */
+ first_rx = chan->rx_ts_mask ? __ffs64(chan->rx_ts_mask) + 1 : 0;
+
+ /* Find the last Tx TS allocated to the channel */
+ last_tx = fls64(chan->tx_ts_mask);
+
+ trnsync = 0;
+ if (info.nb_rx_ts)
+ trnsync |= QMC_SPE_TRNSYNC_RX((first_rx % info.nb_rx_ts) * 2);
+ if (info.nb_tx_ts)
+ trnsync |= QMC_SPE_TRNSYNC_TX((last_tx % info.nb_tx_ts) * 2);
+
+ qmc_write16(chan->s_param + QMC_SPE_TRNSYNC, trnsync);
+
+ dev_dbg(qmc->dev, "chan %u: trnsync=0x%04x, rx %u/%u 0x%llx, tx %u/%u 0x%llx\n",
+ chan->id, trnsync,
+ first_rx, info.nb_rx_ts, chan->rx_ts_mask,
+ last_tx, info.nb_tx_ts, chan->tx_ts_mask);
+
+ return 0;
+}
+
+static int qmc_chan_start_rx(struct qmc_chan *chan)
{
unsigned long flags;
+ int ret;
spin_lock_irqsave(&chan->rx_lock, flags);
+ if (!chan->is_rx_stopped) {
+ /* The channel is already started -> simply return ok */
+ ret = 0;
+ goto end;
+ }
+
+ ret = qmc_chan_setup_tsa_rx(chan, true);
+ if (ret) {
+ dev_err(chan->qmc->dev, "chan %u: Enable tsa entries failed (%d)\n",
+ chan->id, ret);
+ goto end;
+ }
+
+ ret = qmc_setup_chan_trnsync(chan->qmc, chan);
+ if (ret) {
+ dev_err(chan->qmc->dev, "chan %u: setup TRNSYNC failed (%d)\n",
+ chan->id, ret);
+ goto end;
+ }
+
/* Restart the receiver */
if (chan->mode == QMC_TRANSPARENT)
qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x18000080);
@@ -624,15 +957,38 @@ static void qmc_chan_start_rx(struct qmc_chan *chan)
chan->is_rx_stopped = false;
+end:
spin_unlock_irqrestore(&chan->rx_lock, flags);
+ return ret;
}
-static void qmc_chan_start_tx(struct qmc_chan *chan)
+static int qmc_chan_start_tx(struct qmc_chan *chan)
{
unsigned long flags;
+ int ret;
spin_lock_irqsave(&chan->tx_lock, flags);
+ if (!chan->is_tx_stopped) {
+ /* The channel is already started -> simply return ok */
+ ret = 0;
+ goto end;
+ }
+
+ ret = qmc_chan_setup_tsa_tx(chan, true);
+ if (ret) {
+ dev_err(chan->qmc->dev, "chan %u: Enable tsa entries failed (%d)\n",
+ chan->id, ret);
+ goto end;
+ }
+
+ ret = qmc_setup_chan_trnsync(chan->qmc, chan);
+ if (ret) {
+ dev_err(chan->qmc->dev, "chan %u: setup TRNSYNC failed (%d)\n",
+ chan->id, ret);
+ goto end;
+ }
+
/*
* Enable channel transmitter as it could be disabled if
* qmc_chan_reset() was called.
@@ -644,18 +1000,39 @@ static void qmc_chan_start_tx(struct qmc_chan *chan)
chan->is_tx_stopped = false;
+end:
spin_unlock_irqrestore(&chan->tx_lock, flags);
+ return ret;
}
int qmc_chan_start(struct qmc_chan *chan, int direction)
{
- if (direction & QMC_CHAN_READ)
- qmc_chan_start_rx(chan);
+ bool is_rx_rollback_needed = false;
+ unsigned long flags;
+ int ret = 0;
- if (direction & QMC_CHAN_WRITE)
- qmc_chan_start_tx(chan);
+ spin_lock_irqsave(&chan->ts_lock, flags);
- return 0;
+ if (direction & QMC_CHAN_READ) {
+ is_rx_rollback_needed = chan->is_rx_stopped;
+ ret = qmc_chan_start_rx(chan);
+ if (ret)
+ goto end;
+ }
+
+ if (direction & QMC_CHAN_WRITE) {
+ ret = qmc_chan_start_tx(chan);
+ if (ret) {
+ /* Restop rx if needed */
+ if (is_rx_rollback_needed)
+ qmc_chan_stop_rx(chan);
+ goto end;
+ }
+ }
+
+end:
+ spin_unlock_irqrestore(&chan->ts_lock, flags);
+ return ret;
}
EXPORT_SYMBOL(qmc_chan_start);
@@ -740,10 +1117,7 @@ EXPORT_SYMBOL(qmc_chan_reset);
static int qmc_check_chans(struct qmc *qmc)
{
struct tsa_serial_info info;
- bool is_one_table = false;
struct qmc_chan *chan;
- u64 tx_ts_mask = 0;
- u64 rx_ts_mask = 0;
u64 tx_ts_assigned_mask;
u64 rx_ts_assigned_mask;
int ret;
@@ -767,38 +1141,21 @@ static int qmc_check_chans(struct qmc *qmc)
dev_err(qmc->dev, "Number of TSA Tx/Rx TS assigned are not equal\n");
return -EINVAL;
}
- is_one_table = true;
}
tx_ts_assigned_mask = info.nb_tx_ts == 64 ? U64_MAX : (((u64)1) << info.nb_tx_ts) - 1;
rx_ts_assigned_mask = info.nb_rx_ts == 64 ? U64_MAX : (((u64)1) << info.nb_rx_ts) - 1;
list_for_each_entry(chan, &qmc->chan_head, list) {
- if (chan->tx_ts_mask > tx_ts_assigned_mask) {
- dev_err(qmc->dev, "chan %u uses TSA unassigned Tx TS\n", chan->id);
- return -EINVAL;
- }
- if (tx_ts_mask & chan->tx_ts_mask) {
- dev_err(qmc->dev, "chan %u uses an already used Tx TS\n", chan->id);
- return -EINVAL;
- }
-
- if (chan->rx_ts_mask > rx_ts_assigned_mask) {
- dev_err(qmc->dev, "chan %u uses TSA unassigned Rx TS\n", chan->id);
- return -EINVAL;
- }
- if (rx_ts_mask & chan->rx_ts_mask) {
- dev_err(qmc->dev, "chan %u uses an already used Rx TS\n", chan->id);
+ if (chan->tx_ts_mask_avail > tx_ts_assigned_mask) {
+ dev_err(qmc->dev, "chan %u can use TSA unassigned Tx TS\n", chan->id);
return -EINVAL;
}
- if (is_one_table && (chan->tx_ts_mask != chan->rx_ts_mask)) {
- dev_err(qmc->dev, "chan %u uses different Rx and Tx TS\n", chan->id);
+ if (chan->rx_ts_mask_avail > rx_ts_assigned_mask) {
+ dev_err(qmc->dev, "chan %u can use TSA unassigned Rx TS\n", chan->id);
return -EINVAL;
}
-
- tx_ts_mask |= chan->tx_ts_mask;
- rx_ts_mask |= chan->rx_ts_mask;
}
return 0;
@@ -844,6 +1201,7 @@ static int qmc_of_parse_chans(struct qmc *qmc, struct device_node *np)
}
chan->id = chan_id;
+ spin_lock_init(&chan->ts_lock);
spin_lock_init(&chan->rx_lock);
spin_lock_init(&chan->tx_lock);
@@ -854,7 +1212,8 @@ static int qmc_of_parse_chans(struct qmc *qmc, struct device_node *np)
of_node_put(chan_np);
return ret;
}
- chan->tx_ts_mask = ts_mask;
+ chan->tx_ts_mask_avail = ts_mask;
+ chan->tx_ts_mask = chan->tx_ts_mask_avail;
ret = of_property_read_u64(chan_np, "fsl,rx-ts-mask", &ts_mask);
if (ret) {
@@ -863,7 +1222,8 @@ static int qmc_of_parse_chans(struct qmc *qmc, struct device_node *np)
of_node_put(chan_np);
return ret;
}
- chan->rx_ts_mask = ts_mask;
+ chan->rx_ts_mask_avail = ts_mask;
+ chan->rx_ts_mask = chan->rx_ts_mask_avail;
mode = "transparent";
ret = of_property_read_string(chan_np, "fsl,operational-mode", &mode);
@@ -894,9 +1254,8 @@ static int qmc_of_parse_chans(struct qmc *qmc, struct device_node *np)
return qmc_check_chans(qmc);
}
-static int qmc_setup_tsa_64rxtx(struct qmc *qmc, const struct tsa_serial_info *info)
+static int qmc_init_tsa_64rxtx(struct qmc *qmc, const struct tsa_serial_info *info)
{
- struct qmc_chan *chan;
unsigned int i;
u16 val;
@@ -905,23 +1264,12 @@ static int qmc_setup_tsa_64rxtx(struct qmc *qmc, const struct tsa_serial_info *i
* Everything was previously checked, Tx and Rx related stuffs are
* identical -> Used Rx related stuff to build the table
*/
+ qmc->is_tsa_64rxtx = true;
/* Invalidate all entries */
for (i = 0; i < 64; i++)
qmc_write16(qmc->scc_pram + QMC_GBL_TSATRX + (i * 2), 0x0000);
- /* Set entries based on Rx stuff*/
- list_for_each_entry(chan, &qmc->chan_head, list) {
- for (i = 0; i < info->nb_rx_ts; i++) {
- if (!(chan->rx_ts_mask & (((u64)1) << i)))
- continue;
-
- val = QMC_TSA_VALID | QMC_TSA_MASK |
- QMC_TSA_CHANNEL(chan->id);
- qmc_write16(qmc->scc_pram + QMC_GBL_TSATRX + (i * 2), val);
- }
- }
-
/* Set Wrap bit on last entry */
qmc_setbits16(qmc->scc_pram + QMC_GBL_TSATRX + ((info->nb_rx_ts - 1) * 2),
QMC_TSA_WRAP);
@@ -936,9 +1284,8 @@ static int qmc_setup_tsa_64rxtx(struct qmc *qmc, const struct tsa_serial_info *i
return 0;
}
-static int qmc_setup_tsa_32rx_32tx(struct qmc *qmc, const struct tsa_serial_info *info)
+static int qmc_init_tsa_32rx_32tx(struct qmc *qmc, const struct tsa_serial_info *info)
{
- struct qmc_chan *chan;
unsigned int i;
u16 val;
@@ -946,6 +1293,7 @@ static int qmc_setup_tsa_32rx_32tx(struct qmc *qmc, const struct tsa_serial_info
* Use a Tx 32 entries table and a Rx 32 entries table.
* Everything was previously checked.
*/
+ qmc->is_tsa_64rxtx = false;
/* Invalidate all entries */
for (i = 0; i < 32; i++) {
@@ -953,28 +1301,6 @@ static int qmc_setup_tsa_32rx_32tx(struct qmc *qmc, const struct tsa_serial_info
qmc_write16(qmc->scc_pram + QMC_GBL_TSATTX + (i * 2), 0x0000);
}
- /* Set entries based on Rx and Tx stuff*/
- list_for_each_entry(chan, &qmc->chan_head, list) {
- /* Rx part */
- for (i = 0; i < info->nb_rx_ts; i++) {
- if (!(chan->rx_ts_mask & (((u64)1) << i)))
- continue;
-
- val = QMC_TSA_VALID | QMC_TSA_MASK |
- QMC_TSA_CHANNEL(chan->id);
- qmc_write16(qmc->scc_pram + QMC_GBL_TSATRX + (i * 2), val);
- }
- /* Tx part */
- for (i = 0; i < info->nb_tx_ts; i++) {
- if (!(chan->tx_ts_mask & (((u64)1) << i)))
- continue;
-
- val = QMC_TSA_VALID | QMC_TSA_MASK |
- QMC_TSA_CHANNEL(chan->id);
- qmc_write16(qmc->scc_pram + QMC_GBL_TSATTX + (i * 2), val);
- }
- }
-
/* Set Wrap bit on last entries */
qmc_setbits16(qmc->scc_pram + QMC_GBL_TSATRX + ((info->nb_rx_ts - 1) * 2),
QMC_TSA_WRAP);
@@ -994,7 +1320,7 @@ static int qmc_setup_tsa_32rx_32tx(struct qmc *qmc, const struct tsa_serial_info
return 0;
}
-static int qmc_setup_tsa(struct qmc *qmc)
+static int qmc_init_tsa(struct qmc *qmc)
{
struct tsa_serial_info info;
int ret;
@@ -1005,46 +1331,12 @@ static int qmc_setup_tsa(struct qmc *qmc)
return ret;
/*
- * Setup one common 64 entries table or two 32 entries (one for Tx and
- * one for Tx) according to assigned TS numbers.
+ * Initialize one common 64 entries table or two 32 entries (one for Tx
+ * and one for Tx) according to assigned TS numbers.
*/
return ((info.nb_tx_ts > 32) || (info.nb_rx_ts > 32)) ?
- qmc_setup_tsa_64rxtx(qmc, &info) :
- qmc_setup_tsa_32rx_32tx(qmc, &info);
-}
-
-static int qmc_setup_chan_trnsync(struct qmc *qmc, struct qmc_chan *chan)
-{
- struct tsa_serial_info info;
- u16 first_rx, last_tx;
- u16 trnsync;
- int ret;
-
- /* Retrieve info from the TSA related serial */
- ret = tsa_serial_get_info(chan->qmc->tsa_serial, &info);
- if (ret)
- return ret;
-
- /* Find the first Rx TS allocated to the channel */
- first_rx = chan->rx_ts_mask ? __ffs64(chan->rx_ts_mask) + 1 : 0;
-
- /* Find the last Tx TS allocated to the channel */
- last_tx = fls64(chan->tx_ts_mask);
-
- trnsync = 0;
- if (info.nb_rx_ts)
- trnsync |= QMC_SPE_TRNSYNC_RX((first_rx % info.nb_rx_ts) * 2);
- if (info.nb_tx_ts)
- trnsync |= QMC_SPE_TRNSYNC_TX((last_tx % info.nb_tx_ts) * 2);
-
- qmc_write16(chan->s_param + QMC_SPE_TRNSYNC, trnsync);
-
- dev_dbg(qmc->dev, "chan %u: trnsync=0x%04x, rx %u/%u 0x%llx, tx %u/%u 0x%llx\n",
- chan->id, trnsync,
- first_rx, info.nb_rx_ts, chan->rx_ts_mask,
- last_tx, info.nb_tx_ts, chan->tx_ts_mask);
-
- return 0;
+ qmc_init_tsa_64rxtx(qmc, &info) :
+ qmc_init_tsa_32rx_32tx(qmc, &info);
}
static int qmc_setup_chan(struct qmc *qmc, struct qmc_chan *chan)
@@ -1366,7 +1658,7 @@ static int qmc_probe(struct platform_device *pdev)
qmc_write32(qmc->scc_pram + QMC_GBL_C_MASK32, 0xDEBB20E3);
qmc_write16(qmc->scc_pram + QMC_GBL_C_MASK16, 0xF0B8);
- ret = qmc_setup_tsa(qmc);
+ ret = qmc_init_tsa(qmc);
if (ret)
goto err_tsa_serial_disconnect;
@@ -1404,8 +1696,16 @@ static int qmc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, qmc);
+ /* Populate channel related devices */
+ ret = devm_of_platform_populate(qmc->dev);
+ if (ret)
+ goto err_disable_txrx;
+
return 0;
+err_disable_txrx:
+ qmc_setbits32(qmc->scc_regs + SCC_GSMRL, 0);
+
err_disable_intr:
qmc_write16(qmc->scc_regs + SCC_SCCM, 0);
@@ -1444,26 +1744,16 @@ static struct platform_driver qmc_driver = {
};
module_platform_driver(qmc_driver);
-struct qmc_chan *qmc_chan_get_byphandle(struct device_node *np, const char *phandle_name)
+static struct qmc_chan *qmc_chan_get_from_qmc(struct device_node *qmc_np, unsigned int chan_index)
{
- struct of_phandle_args out_args;
struct platform_device *pdev;
struct qmc_chan *qmc_chan;
struct qmc *qmc;
- int ret;
- ret = of_parse_phandle_with_fixed_args(np, phandle_name, 1, 0,
- &out_args);
- if (ret < 0)
- return ERR_PTR(ret);
-
- if (!of_match_node(qmc_driver.driver.of_match_table, out_args.np)) {
- of_node_put(out_args.np);
+ if (!of_match_node(qmc_driver.driver.of_match_table, qmc_np))
return ERR_PTR(-EINVAL);
- }
- pdev = of_find_device_by_node(out_args.np);
- of_node_put(out_args.np);
+ pdev = of_find_device_by_node(qmc_np);
if (!pdev)
return ERR_PTR(-ENODEV);
@@ -1473,17 +1763,12 @@ struct qmc_chan *qmc_chan_get_byphandle(struct device_node *np, const char *phan
return ERR_PTR(-EPROBE_DEFER);
}
- if (out_args.args_count != 1) {
- platform_device_put(pdev);
- return ERR_PTR(-EINVAL);
- }
-
- if (out_args.args[0] >= ARRAY_SIZE(qmc->chans)) {
+ if (chan_index >= ARRAY_SIZE(qmc->chans)) {
platform_device_put(pdev);
return ERR_PTR(-EINVAL);
}
- qmc_chan = qmc->chans[out_args.args[0]];
+ qmc_chan = qmc->chans[chan_index];
if (!qmc_chan) {
platform_device_put(pdev);
return ERR_PTR(-ENOENT);
@@ -1491,8 +1776,44 @@ struct qmc_chan *qmc_chan_get_byphandle(struct device_node *np, const char *phan
return qmc_chan;
}
+
+struct qmc_chan *qmc_chan_get_byphandle(struct device_node *np, const char *phandle_name)
+{
+ struct of_phandle_args out_args;
+ struct qmc_chan *qmc_chan;
+ int ret;
+
+ ret = of_parse_phandle_with_fixed_args(np, phandle_name, 1, 0,
+ &out_args);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ if (out_args.args_count != 1) {
+ of_node_put(out_args.np);
+ return ERR_PTR(-EINVAL);
+ }
+
+ qmc_chan = qmc_chan_get_from_qmc(out_args.np, out_args.args[0]);
+ of_node_put(out_args.np);
+ return qmc_chan;
+}
EXPORT_SYMBOL(qmc_chan_get_byphandle);
+struct qmc_chan *qmc_chan_get_bychild(struct device_node *np)
+{
+ struct device_node *qmc_np;
+ u32 chan_index;
+ int ret;
+
+ qmc_np = np->parent;
+ ret = of_property_read_u32(np, "reg", &chan_index);
+ if (ret)
+ return ERR_PTR(-EINVAL);
+
+ return qmc_chan_get_from_qmc(qmc_np, chan_index);
+}
+EXPORT_SYMBOL(qmc_chan_get_bychild);
+
void qmc_chan_put(struct qmc_chan *chan)
{
put_device(chan->qmc->dev);
@@ -1529,6 +1850,28 @@ struct qmc_chan *devm_qmc_chan_get_byphandle(struct device *dev,
}
EXPORT_SYMBOL(devm_qmc_chan_get_byphandle);
+struct qmc_chan *devm_qmc_chan_get_bychild(struct device *dev,
+ struct device_node *np)
+{
+ struct qmc_chan *qmc_chan;
+ struct qmc_chan **dr;
+
+ dr = devres_alloc(devm_qmc_chan_release, sizeof(*dr), GFP_KERNEL);
+ if (!dr)
+ return ERR_PTR(-ENOMEM);
+
+ qmc_chan = qmc_chan_get_bychild(np);
+ if (!IS_ERR(qmc_chan)) {
+ *dr = qmc_chan;
+ devres_add(dev, dr);
+ } else {
+ devres_free(dr);
+ }
+
+ return qmc_chan;
+}
+EXPORT_SYMBOL(devm_qmc_chan_get_bychild);
+
MODULE_AUTHOR("Herve Codina <herve.codina@bootlin.com>");
MODULE_DESCRIPTION("CPM QMC driver");
MODULE_LICENSE("GPL");