summaryrefslogtreecommitdiffstats
path: root/drivers/isdn/mISDN/hwchannel.c
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-06 01:02:30 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-06 01:02:30 +0000
commit76cb841cb886eef6b3bee341a2266c76578724ad (patch)
treef5892e5ba6cc11949952a6ce4ecbe6d516d6ce58 /drivers/isdn/mISDN/hwchannel.c
parentInitial commit. (diff)
downloadlinux-upstream.tar.xz
linux-upstream.zip
Adding upstream version 4.19.249.upstream/4.19.249upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/isdn/mISDN/hwchannel.c')
-rw-r--r--drivers/isdn/mISDN/hwchannel.c526
1 files changed, 526 insertions, 0 deletions
diff --git a/drivers/isdn/mISDN/hwchannel.c b/drivers/isdn/mISDN/hwchannel.c
new file mode 100644
index 000000000..84b4b0f7e
--- /dev/null
+++ b/drivers/isdn/mISDN/hwchannel.c
@@ -0,0 +1,526 @@
+/*
+ *
+ * Author Karsten Keil <kkeil@novell.com>
+ *
+ * Copyright 2008 by Karsten Keil <kkeil@novell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/gfp.h>
+#include <linux/module.h>
+#include <linux/mISDNhw.h>
+
+static void
+dchannel_bh(struct work_struct *ws)
+{
+ struct dchannel *dch = container_of(ws, struct dchannel, workq);
+ struct sk_buff *skb;
+ int err;
+
+ if (test_and_clear_bit(FLG_RECVQUEUE, &dch->Flags)) {
+ while ((skb = skb_dequeue(&dch->rqueue))) {
+ if (likely(dch->dev.D.peer)) {
+ err = dch->dev.D.recv(dch->dev.D.peer, skb);
+ if (err)
+ dev_kfree_skb(skb);
+ } else
+ dev_kfree_skb(skb);
+ }
+ }
+ if (test_and_clear_bit(FLG_PHCHANGE, &dch->Flags)) {
+ if (dch->phfunc)
+ dch->phfunc(dch);
+ }
+}
+
+static void
+bchannel_bh(struct work_struct *ws)
+{
+ struct bchannel *bch = container_of(ws, struct bchannel, workq);
+ struct sk_buff *skb;
+ int err;
+
+ if (test_and_clear_bit(FLG_RECVQUEUE, &bch->Flags)) {
+ while ((skb = skb_dequeue(&bch->rqueue))) {
+ bch->rcount--;
+ if (likely(bch->ch.peer)) {
+ err = bch->ch.recv(bch->ch.peer, skb);
+ if (err)
+ dev_kfree_skb(skb);
+ } else
+ dev_kfree_skb(skb);
+ }
+ }
+}
+
+int
+mISDN_initdchannel(struct dchannel *ch, int maxlen, void *phf)
+{
+ test_and_set_bit(FLG_HDLC, &ch->Flags);
+ ch->maxlen = maxlen;
+ ch->hw = NULL;
+ ch->rx_skb = NULL;
+ ch->tx_skb = NULL;
+ ch->tx_idx = 0;
+ ch->phfunc = phf;
+ skb_queue_head_init(&ch->squeue);
+ skb_queue_head_init(&ch->rqueue);
+ INIT_LIST_HEAD(&ch->dev.bchannels);
+ INIT_WORK(&ch->workq, dchannel_bh);
+ return 0;
+}
+EXPORT_SYMBOL(mISDN_initdchannel);
+
+int
+mISDN_initbchannel(struct bchannel *ch, unsigned short maxlen,
+ unsigned short minlen)
+{
+ ch->Flags = 0;
+ ch->minlen = minlen;
+ ch->next_minlen = minlen;
+ ch->init_minlen = minlen;
+ ch->maxlen = maxlen;
+ ch->next_maxlen = maxlen;
+ ch->init_maxlen = maxlen;
+ ch->hw = NULL;
+ ch->rx_skb = NULL;
+ ch->tx_skb = NULL;
+ ch->tx_idx = 0;
+ skb_queue_head_init(&ch->rqueue);
+ ch->rcount = 0;
+ ch->next_skb = NULL;
+ INIT_WORK(&ch->workq, bchannel_bh);
+ return 0;
+}
+EXPORT_SYMBOL(mISDN_initbchannel);
+
+int
+mISDN_freedchannel(struct dchannel *ch)
+{
+ if (ch->tx_skb) {
+ dev_kfree_skb(ch->tx_skb);
+ ch->tx_skb = NULL;
+ }
+ if (ch->rx_skb) {
+ dev_kfree_skb(ch->rx_skb);
+ ch->rx_skb = NULL;
+ }
+ skb_queue_purge(&ch->squeue);
+ skb_queue_purge(&ch->rqueue);
+ flush_work(&ch->workq);
+ return 0;
+}
+EXPORT_SYMBOL(mISDN_freedchannel);
+
+void
+mISDN_clear_bchannel(struct bchannel *ch)
+{
+ if (ch->tx_skb) {
+ dev_kfree_skb(ch->tx_skb);
+ ch->tx_skb = NULL;
+ }
+ ch->tx_idx = 0;
+ if (ch->rx_skb) {
+ dev_kfree_skb(ch->rx_skb);
+ ch->rx_skb = NULL;
+ }
+ if (ch->next_skb) {
+ dev_kfree_skb(ch->next_skb);
+ ch->next_skb = NULL;
+ }
+ test_and_clear_bit(FLG_TX_BUSY, &ch->Flags);
+ test_and_clear_bit(FLG_TX_NEXT, &ch->Flags);
+ test_and_clear_bit(FLG_ACTIVE, &ch->Flags);
+ test_and_clear_bit(FLG_FILLEMPTY, &ch->Flags);
+ test_and_clear_bit(FLG_TX_EMPTY, &ch->Flags);
+ test_and_clear_bit(FLG_RX_OFF, &ch->Flags);
+ ch->dropcnt = 0;
+ ch->minlen = ch->init_minlen;
+ ch->next_minlen = ch->init_minlen;
+ ch->maxlen = ch->init_maxlen;
+ ch->next_maxlen = ch->init_maxlen;
+ skb_queue_purge(&ch->rqueue);
+ ch->rcount = 0;
+}
+EXPORT_SYMBOL(mISDN_clear_bchannel);
+
+void
+mISDN_freebchannel(struct bchannel *ch)
+{
+ cancel_work_sync(&ch->workq);
+ mISDN_clear_bchannel(ch);
+}
+EXPORT_SYMBOL(mISDN_freebchannel);
+
+int
+mISDN_ctrl_bchannel(struct bchannel *bch, struct mISDN_ctrl_req *cq)
+{
+ int ret = 0;
+
+ switch (cq->op) {
+ case MISDN_CTRL_GETOP:
+ cq->op = MISDN_CTRL_RX_BUFFER | MISDN_CTRL_FILL_EMPTY |
+ MISDN_CTRL_RX_OFF;
+ break;
+ case MISDN_CTRL_FILL_EMPTY:
+ if (cq->p1) {
+ memset(bch->fill, cq->p2 & 0xff, MISDN_BCH_FILL_SIZE);
+ test_and_set_bit(FLG_FILLEMPTY, &bch->Flags);
+ } else {
+ test_and_clear_bit(FLG_FILLEMPTY, &bch->Flags);
+ }
+ break;
+ case MISDN_CTRL_RX_OFF:
+ /* read back dropped byte count */
+ cq->p2 = bch->dropcnt;
+ if (cq->p1)
+ test_and_set_bit(FLG_RX_OFF, &bch->Flags);
+ else
+ test_and_clear_bit(FLG_RX_OFF, &bch->Flags);
+ bch->dropcnt = 0;
+ break;
+ case MISDN_CTRL_RX_BUFFER:
+ if (cq->p2 > MISDN_CTRL_RX_SIZE_IGNORE)
+ bch->next_maxlen = cq->p2;
+ if (cq->p1 > MISDN_CTRL_RX_SIZE_IGNORE)
+ bch->next_minlen = cq->p1;
+ /* we return the old values */
+ cq->p1 = bch->minlen;
+ cq->p2 = bch->maxlen;
+ break;
+ default:
+ pr_info("mISDN unhandled control %x operation\n", cq->op);
+ ret = -EINVAL;
+ break;
+ }
+ return ret;
+}
+EXPORT_SYMBOL(mISDN_ctrl_bchannel);
+
+static inline u_int
+get_sapi_tei(u_char *p)
+{
+ u_int sapi, tei;
+
+ sapi = *p >> 2;
+ tei = p[1] >> 1;
+ return sapi | (tei << 8);
+}
+
+void
+recv_Dchannel(struct dchannel *dch)
+{
+ struct mISDNhead *hh;
+
+ if (dch->rx_skb->len < 2) { /* at least 2 for sapi / tei */
+ dev_kfree_skb(dch->rx_skb);
+ dch->rx_skb = NULL;
+ return;
+ }
+ hh = mISDN_HEAD_P(dch->rx_skb);
+ hh->prim = PH_DATA_IND;
+ hh->id = get_sapi_tei(dch->rx_skb->data);
+ skb_queue_tail(&dch->rqueue, dch->rx_skb);
+ dch->rx_skb = NULL;
+ schedule_event(dch, FLG_RECVQUEUE);
+}
+EXPORT_SYMBOL(recv_Dchannel);
+
+void
+recv_Echannel(struct dchannel *ech, struct dchannel *dch)
+{
+ struct mISDNhead *hh;
+
+ if (ech->rx_skb->len < 2) { /* at least 2 for sapi / tei */
+ dev_kfree_skb(ech->rx_skb);
+ ech->rx_skb = NULL;
+ return;
+ }
+ hh = mISDN_HEAD_P(ech->rx_skb);
+ hh->prim = PH_DATA_E_IND;
+ hh->id = get_sapi_tei(ech->rx_skb->data);
+ skb_queue_tail(&dch->rqueue, ech->rx_skb);
+ ech->rx_skb = NULL;
+ schedule_event(dch, FLG_RECVQUEUE);
+}
+EXPORT_SYMBOL(recv_Echannel);
+
+void
+recv_Bchannel(struct bchannel *bch, unsigned int id, bool force)
+{
+ struct mISDNhead *hh;
+
+ /* if allocation did fail upper functions still may call us */
+ if (unlikely(!bch->rx_skb))
+ return;
+ if (unlikely(!bch->rx_skb->len)) {
+ /* we have no data to send - this may happen after recovery
+ * from overflow or too small allocation.
+ * We need to free the buffer here */
+ dev_kfree_skb(bch->rx_skb);
+ bch->rx_skb = NULL;
+ } else {
+ if (test_bit(FLG_TRANSPARENT, &bch->Flags) &&
+ (bch->rx_skb->len < bch->minlen) && !force)
+ return;
+ hh = mISDN_HEAD_P(bch->rx_skb);
+ hh->prim = PH_DATA_IND;
+ hh->id = id;
+ if (bch->rcount >= 64) {
+ printk(KERN_WARNING
+ "B%d receive queue overflow - flushing!\n",
+ bch->nr);
+ skb_queue_purge(&bch->rqueue);
+ }
+ bch->rcount++;
+ skb_queue_tail(&bch->rqueue, bch->rx_skb);
+ bch->rx_skb = NULL;
+ schedule_event(bch, FLG_RECVQUEUE);
+ }
+}
+EXPORT_SYMBOL(recv_Bchannel);
+
+void
+recv_Dchannel_skb(struct dchannel *dch, struct sk_buff *skb)
+{
+ skb_queue_tail(&dch->rqueue, skb);
+ schedule_event(dch, FLG_RECVQUEUE);
+}
+EXPORT_SYMBOL(recv_Dchannel_skb);
+
+void
+recv_Bchannel_skb(struct bchannel *bch, struct sk_buff *skb)
+{
+ if (bch->rcount >= 64) {
+ printk(KERN_WARNING "B-channel %p receive queue overflow, "
+ "flushing!\n", bch);
+ skb_queue_purge(&bch->rqueue);
+ bch->rcount = 0;
+ }
+ bch->rcount++;
+ skb_queue_tail(&bch->rqueue, skb);
+ schedule_event(bch, FLG_RECVQUEUE);
+}
+EXPORT_SYMBOL(recv_Bchannel_skb);
+
+static void
+confirm_Dsend(struct dchannel *dch)
+{
+ struct sk_buff *skb;
+
+ skb = _alloc_mISDN_skb(PH_DATA_CNF, mISDN_HEAD_ID(dch->tx_skb),
+ 0, NULL, GFP_ATOMIC);
+ if (!skb) {
+ printk(KERN_ERR "%s: no skb id %x\n", __func__,
+ mISDN_HEAD_ID(dch->tx_skb));
+ return;
+ }
+ skb_queue_tail(&dch->rqueue, skb);
+ schedule_event(dch, FLG_RECVQUEUE);
+}
+
+int
+get_next_dframe(struct dchannel *dch)
+{
+ dch->tx_idx = 0;
+ dch->tx_skb = skb_dequeue(&dch->squeue);
+ if (dch->tx_skb) {
+ confirm_Dsend(dch);
+ return 1;
+ }
+ dch->tx_skb = NULL;
+ test_and_clear_bit(FLG_TX_BUSY, &dch->Flags);
+ return 0;
+}
+EXPORT_SYMBOL(get_next_dframe);
+
+static void
+confirm_Bsend(struct bchannel *bch)
+{
+ struct sk_buff *skb;
+
+ if (bch->rcount >= 64) {
+ printk(KERN_WARNING "B-channel %p receive queue overflow, "
+ "flushing!\n", bch);
+ skb_queue_purge(&bch->rqueue);
+ bch->rcount = 0;
+ }
+ skb = _alloc_mISDN_skb(PH_DATA_CNF, mISDN_HEAD_ID(bch->tx_skb),
+ 0, NULL, GFP_ATOMIC);
+ if (!skb) {
+ printk(KERN_ERR "%s: no skb id %x\n", __func__,
+ mISDN_HEAD_ID(bch->tx_skb));
+ return;
+ }
+ bch->rcount++;
+ skb_queue_tail(&bch->rqueue, skb);
+ schedule_event(bch, FLG_RECVQUEUE);
+}
+
+int
+get_next_bframe(struct bchannel *bch)
+{
+ bch->tx_idx = 0;
+ if (test_bit(FLG_TX_NEXT, &bch->Flags)) {
+ bch->tx_skb = bch->next_skb;
+ if (bch->tx_skb) {
+ bch->next_skb = NULL;
+ test_and_clear_bit(FLG_TX_NEXT, &bch->Flags);
+ /* confirm imediately to allow next data */
+ confirm_Bsend(bch);
+ return 1;
+ } else {
+ test_and_clear_bit(FLG_TX_NEXT, &bch->Flags);
+ printk(KERN_WARNING "B TX_NEXT without skb\n");
+ }
+ }
+ bch->tx_skb = NULL;
+ test_and_clear_bit(FLG_TX_BUSY, &bch->Flags);
+ return 0;
+}
+EXPORT_SYMBOL(get_next_bframe);
+
+void
+queue_ch_frame(struct mISDNchannel *ch, u_int pr, int id, struct sk_buff *skb)
+{
+ struct mISDNhead *hh;
+
+ if (!skb) {
+ _queue_data(ch, pr, id, 0, NULL, GFP_ATOMIC);
+ } else {
+ if (ch->peer) {
+ hh = mISDN_HEAD_P(skb);
+ hh->prim = pr;
+ hh->id = id;
+ if (!ch->recv(ch->peer, skb))
+ return;
+ }
+ dev_kfree_skb(skb);
+ }
+}
+EXPORT_SYMBOL(queue_ch_frame);
+
+int
+dchannel_senddata(struct dchannel *ch, struct sk_buff *skb)
+{
+ /* check oversize */
+ if (skb->len <= 0) {
+ printk(KERN_WARNING "%s: skb too small\n", __func__);
+ return -EINVAL;
+ }
+ if (skb->len > ch->maxlen) {
+ printk(KERN_WARNING "%s: skb too large(%d/%d)\n",
+ __func__, skb->len, ch->maxlen);
+ return -EINVAL;
+ }
+ /* HW lock must be obtained */
+ if (test_and_set_bit(FLG_TX_BUSY, &ch->Flags)) {
+ skb_queue_tail(&ch->squeue, skb);
+ return 0;
+ } else {
+ /* write to fifo */
+ ch->tx_skb = skb;
+ ch->tx_idx = 0;
+ return 1;
+ }
+}
+EXPORT_SYMBOL(dchannel_senddata);
+
+int
+bchannel_senddata(struct bchannel *ch, struct sk_buff *skb)
+{
+
+ /* check oversize */
+ if (skb->len <= 0) {
+ printk(KERN_WARNING "%s: skb too small\n", __func__);
+ return -EINVAL;
+ }
+ if (skb->len > ch->maxlen) {
+ printk(KERN_WARNING "%s: skb too large(%d/%d)\n",
+ __func__, skb->len, ch->maxlen);
+ return -EINVAL;
+ }
+ /* HW lock must be obtained */
+ /* check for pending next_skb */
+ if (ch->next_skb) {
+ printk(KERN_WARNING
+ "%s: next_skb exist ERROR (skb->len=%d next_skb->len=%d)\n",
+ __func__, skb->len, ch->next_skb->len);
+ return -EBUSY;
+ }
+ if (test_and_set_bit(FLG_TX_BUSY, &ch->Flags)) {
+ test_and_set_bit(FLG_TX_NEXT, &ch->Flags);
+ ch->next_skb = skb;
+ return 0;
+ } else {
+ /* write to fifo */
+ ch->tx_skb = skb;
+ ch->tx_idx = 0;
+ confirm_Bsend(ch);
+ return 1;
+ }
+}
+EXPORT_SYMBOL(bchannel_senddata);
+
+/* The function allocates a new receive skb on demand with a size for the
+ * requirements of the current protocol. It returns the tailroom of the
+ * receive skb or an error.
+ */
+int
+bchannel_get_rxbuf(struct bchannel *bch, int reqlen)
+{
+ int len;
+
+ if (bch->rx_skb) {
+ len = skb_tailroom(bch->rx_skb);
+ if (len < reqlen) {
+ pr_warning("B%d no space for %d (only %d) bytes\n",
+ bch->nr, reqlen, len);
+ if (test_bit(FLG_TRANSPARENT, &bch->Flags)) {
+ /* send what we have now and try a new buffer */
+ recv_Bchannel(bch, 0, true);
+ } else {
+ /* on HDLC we have to drop too big frames */
+ return -EMSGSIZE;
+ }
+ } else {
+ return len;
+ }
+ }
+ /* update current min/max length first */
+ if (unlikely(bch->maxlen != bch->next_maxlen))
+ bch->maxlen = bch->next_maxlen;
+ if (unlikely(bch->minlen != bch->next_minlen))
+ bch->minlen = bch->next_minlen;
+ if (unlikely(reqlen > bch->maxlen))
+ return -EMSGSIZE;
+ if (test_bit(FLG_TRANSPARENT, &bch->Flags)) {
+ if (reqlen >= bch->minlen) {
+ len = reqlen;
+ } else {
+ len = 2 * bch->minlen;
+ if (len > bch->maxlen)
+ len = bch->maxlen;
+ }
+ } else {
+ /* with HDLC we do not know the length yet */
+ len = bch->maxlen;
+ }
+ bch->rx_skb = mI_alloc_skb(len, GFP_ATOMIC);
+ if (!bch->rx_skb) {
+ pr_warning("B%d receive no memory for %d bytes\n",
+ bch->nr, len);
+ len = -ENOMEM;
+ }
+ return len;
+}
+EXPORT_SYMBOL(bchannel_get_rxbuf);