summaryrefslogtreecommitdiffstats
path: root/net/phonet
diff options
context:
space:
mode:
Diffstat (limited to 'net/phonet')
-rw-r--r--net/phonet/Kconfig17
-rw-r--r--net/phonet/Makefile12
-rw-r--r--net/phonet/af_phonet.c540
-rw-r--r--net/phonet/datagram.c196
-rw-r--r--net/phonet/pep-gprs.c310
-rw-r--r--net/phonet/pep.c1365
-rw-r--r--net/phonet/pn_dev.c419
-rw-r--r--net/phonet/pn_netlink.c306
-rw-r--r--net/phonet/socket.c772
-rw-r--r--net/phonet/sysctl.c96
10 files changed, 4033 insertions, 0 deletions
diff --git a/net/phonet/Kconfig b/net/phonet/Kconfig
new file mode 100644
index 0000000000..07f2c21721
--- /dev/null
+++ b/net/phonet/Kconfig
@@ -0,0 +1,17 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Phonet protocol
+#
+
+config PHONET
+ tristate "Phonet protocols family"
+ help
+ The Phone Network protocol (PhoNet) is a packet-oriented
+ communication protocol developed by Nokia for use with its modems.
+
+ This is required for Maemo to use cellular data connectivity (if
+ supported). It can also be used to control Nokia phones
+ from a Linux computer, although AT commands may be easier to use.
+
+ To compile this driver as a module, choose M here: the module
+ will be called phonet. If unsure, say N.
diff --git a/net/phonet/Makefile b/net/phonet/Makefile
new file mode 100644
index 0000000000..444f875932
--- /dev/null
+++ b/net/phonet/Makefile
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_PHONET) += phonet.o pn_pep.o
+
+phonet-y := \
+ pn_dev.o \
+ pn_netlink.o \
+ socket.o \
+ datagram.o \
+ sysctl.o \
+ af_phonet.o
+
+pn_pep-y := pep.o pep-gprs.o
diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
new file mode 100644
index 0000000000..2b582da1e8
--- /dev/null
+++ b/net/phonet/af_phonet.c
@@ -0,0 +1,540 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * File: af_phonet.c
+ *
+ * Phonet protocols family
+ *
+ * Copyright (C) 2008 Nokia Corporation.
+ *
+ * Authors: Sakari Ailus <sakari.ailus@nokia.com>
+ * Rémi Denis-Courmont
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <asm/unaligned.h>
+#include <net/sock.h>
+
+#include <linux/if_phonet.h>
+#include <linux/phonet.h>
+#include <net/phonet/phonet.h>
+#include <net/phonet/pn_dev.h>
+
+/* Transport protocol registration */
+static const struct phonet_protocol *proto_tab[PHONET_NPROTO] __read_mostly;
+
+static const struct phonet_protocol *phonet_proto_get(unsigned int protocol)
+{
+ const struct phonet_protocol *pp;
+
+ if (protocol >= PHONET_NPROTO)
+ return NULL;
+
+ rcu_read_lock();
+ pp = rcu_dereference(proto_tab[protocol]);
+ if (pp && !try_module_get(pp->prot->owner))
+ pp = NULL;
+ rcu_read_unlock();
+
+ return pp;
+}
+
+static inline void phonet_proto_put(const struct phonet_protocol *pp)
+{
+ module_put(pp->prot->owner);
+}
+
+/* protocol family functions */
+
+static int pn_socket_create(struct net *net, struct socket *sock, int protocol,
+ int kern)
+{
+ struct sock *sk;
+ struct pn_sock *pn;
+ const struct phonet_protocol *pnp;
+ int err;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ if (protocol == 0) {
+ /* Default protocol selection */
+ switch (sock->type) {
+ case SOCK_DGRAM:
+ protocol = PN_PROTO_PHONET;
+ break;
+ case SOCK_SEQPACKET:
+ protocol = PN_PROTO_PIPE;
+ break;
+ default:
+ return -EPROTONOSUPPORT;
+ }
+ }
+
+ pnp = phonet_proto_get(protocol);
+ if (pnp == NULL &&
+ request_module("net-pf-%d-proto-%d", PF_PHONET, protocol) == 0)
+ pnp = phonet_proto_get(protocol);
+
+ if (pnp == NULL)
+ return -EPROTONOSUPPORT;
+ if (sock->type != pnp->sock_type) {
+ err = -EPROTONOSUPPORT;
+ goto out;
+ }
+
+ sk = sk_alloc(net, PF_PHONET, GFP_KERNEL, pnp->prot, kern);
+ if (sk == NULL) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ sock_init_data(sock, sk);
+ sock->state = SS_UNCONNECTED;
+ sock->ops = pnp->ops;
+ sk->sk_backlog_rcv = sk->sk_prot->backlog_rcv;
+ sk->sk_protocol = protocol;
+ pn = pn_sk(sk);
+ pn->sobject = 0;
+ pn->dobject = 0;
+ pn->resource = 0;
+ sk->sk_prot->init(sk);
+ err = 0;
+
+out:
+ phonet_proto_put(pnp);
+ return err;
+}
+
+static const struct net_proto_family phonet_proto_family = {
+ .family = PF_PHONET,
+ .create = pn_socket_create,
+ .owner = THIS_MODULE,
+};
+
+/* Phonet device header operations */
+static int pn_header_create(struct sk_buff *skb, struct net_device *dev,
+ unsigned short type, const void *daddr,
+ const void *saddr, unsigned int len)
+{
+ u8 *media = skb_push(skb, 1);
+
+ if (type != ETH_P_PHONET)
+ return -1;
+
+ if (!saddr)
+ saddr = dev->dev_addr;
+ *media = *(const u8 *)saddr;
+ return 1;
+}
+
+static int pn_header_parse(const struct sk_buff *skb, unsigned char *haddr)
+{
+ const u8 *media = skb_mac_header(skb);
+ *haddr = *media;
+ return 1;
+}
+
+const struct header_ops phonet_header_ops = {
+ .create = pn_header_create,
+ .parse = pn_header_parse,
+};
+EXPORT_SYMBOL(phonet_header_ops);
+
+/*
+ * Prepends an ISI header and sends a datagram.
+ */
+static int pn_send(struct sk_buff *skb, struct net_device *dev,
+ u16 dst, u16 src, u8 res)
+{
+ struct phonethdr *ph;
+ int err;
+
+ if (skb->len + 2 > 0xffff /* Phonet length field limit */ ||
+ skb->len + sizeof(struct phonethdr) > dev->mtu) {
+ err = -EMSGSIZE;
+ goto drop;
+ }
+
+ /* Broadcast sending is not implemented */
+ if (pn_addr(dst) == PNADDR_BROADCAST) {
+ err = -EOPNOTSUPP;
+ goto drop;
+ }
+
+ skb_reset_transport_header(skb);
+ WARN_ON(skb_headroom(skb) & 1); /* HW assumes word alignment */
+ skb_push(skb, sizeof(struct phonethdr));
+ skb_reset_network_header(skb);
+ ph = pn_hdr(skb);
+ ph->pn_rdev = pn_dev(dst);
+ ph->pn_sdev = pn_dev(src);
+ ph->pn_res = res;
+ ph->pn_length = __cpu_to_be16(skb->len + 2 - sizeof(*ph));
+ ph->pn_robj = pn_obj(dst);
+ ph->pn_sobj = pn_obj(src);
+
+ skb->protocol = htons(ETH_P_PHONET);
+ skb->priority = 0;
+ skb->dev = dev;
+
+ if (skb->pkt_type == PACKET_LOOPBACK) {
+ skb_reset_mac_header(skb);
+ skb_orphan(skb);
+ err = netif_rx(skb) ? -ENOBUFS : 0;
+ } else {
+ err = dev_hard_header(skb, dev, ntohs(skb->protocol),
+ NULL, NULL, skb->len);
+ if (err < 0) {
+ err = -EHOSTUNREACH;
+ goto drop;
+ }
+ err = dev_queue_xmit(skb);
+ if (unlikely(err > 0))
+ err = net_xmit_errno(err);
+ }
+
+ return err;
+drop:
+ kfree_skb(skb);
+ return err;
+}
+
+static int pn_raw_send(const void *data, int len, struct net_device *dev,
+ u16 dst, u16 src, u8 res)
+{
+ struct sk_buff *skb = alloc_skb(MAX_PHONET_HEADER + len, GFP_ATOMIC);
+ if (skb == NULL)
+ return -ENOMEM;
+
+ if (phonet_address_lookup(dev_net(dev), pn_addr(dst)) == 0)
+ skb->pkt_type = PACKET_LOOPBACK;
+
+ skb_reserve(skb, MAX_PHONET_HEADER);
+ __skb_put(skb, len);
+ skb_copy_to_linear_data(skb, data, len);
+ return pn_send(skb, dev, dst, src, res);
+}
+
+/*
+ * Create a Phonet header for the skb and send it out. Returns
+ * non-zero error code if failed. The skb is freed then.
+ */
+int pn_skb_send(struct sock *sk, struct sk_buff *skb,
+ const struct sockaddr_pn *target)
+{
+ struct net *net = sock_net(sk);
+ struct net_device *dev;
+ struct pn_sock *pn = pn_sk(sk);
+ int err;
+ u16 src, dst;
+ u8 daddr, saddr, res;
+
+ src = pn->sobject;
+ if (target != NULL) {
+ dst = pn_sockaddr_get_object(target);
+ res = pn_sockaddr_get_resource(target);
+ } else {
+ dst = pn->dobject;
+ res = pn->resource;
+ }
+ daddr = pn_addr(dst);
+
+ err = -EHOSTUNREACH;
+ if (sk->sk_bound_dev_if)
+ dev = dev_get_by_index(net, sk->sk_bound_dev_if);
+ else if (phonet_address_lookup(net, daddr) == 0) {
+ dev = phonet_device_get(net);
+ skb->pkt_type = PACKET_LOOPBACK;
+ } else if (dst == 0) {
+ /* Resource routing (small race until phonet_rcv()) */
+ struct sock *sk = pn_find_sock_by_res(net, res);
+ if (sk) {
+ sock_put(sk);
+ dev = phonet_device_get(net);
+ skb->pkt_type = PACKET_LOOPBACK;
+ } else
+ dev = phonet_route_output(net, daddr);
+ } else
+ dev = phonet_route_output(net, daddr);
+
+ if (!dev || !(dev->flags & IFF_UP))
+ goto drop;
+
+ saddr = phonet_address_get(dev, daddr);
+ if (saddr == PN_NO_ADDR)
+ goto drop;
+
+ if (!pn_addr(src))
+ src = pn_object(saddr, pn_obj(src));
+
+ err = pn_send(skb, dev, dst, src, res);
+ dev_put(dev);
+ return err;
+
+drop:
+ kfree_skb(skb);
+ dev_put(dev);
+ return err;
+}
+EXPORT_SYMBOL(pn_skb_send);
+
+/* Do not send an error message in response to an error message */
+static inline int can_respond(struct sk_buff *skb)
+{
+ const struct phonethdr *ph;
+ const struct phonetmsg *pm;
+ u8 submsg_id;
+
+ if (!pskb_may_pull(skb, 3))
+ return 0;
+
+ ph = pn_hdr(skb);
+ if (ph->pn_res == PN_PREFIX && !pskb_may_pull(skb, 5))
+ return 0;
+ if (ph->pn_res == PN_COMMGR) /* indications */
+ return 0;
+
+ ph = pn_hdr(skb); /* re-acquires the pointer */
+ pm = pn_msg(skb);
+ if (pm->pn_msg_id != PN_COMMON_MESSAGE)
+ return 1;
+ submsg_id = (ph->pn_res == PN_PREFIX)
+ ? pm->pn_e_submsg_id : pm->pn_submsg_id;
+ if (submsg_id != PN_COMM_ISA_ENTITY_NOT_REACHABLE_RESP &&
+ pm->pn_e_submsg_id != PN_COMM_SERVICE_NOT_IDENTIFIED_RESP)
+ return 1;
+ return 0;
+}
+
+static int send_obj_unreachable(struct sk_buff *rskb)
+{
+ const struct phonethdr *oph = pn_hdr(rskb);
+ const struct phonetmsg *opm = pn_msg(rskb);
+ struct phonetmsg resp;
+
+ memset(&resp, 0, sizeof(resp));
+ resp.pn_trans_id = opm->pn_trans_id;
+ resp.pn_msg_id = PN_COMMON_MESSAGE;
+ if (oph->pn_res == PN_PREFIX) {
+ resp.pn_e_res_id = opm->pn_e_res_id;
+ resp.pn_e_submsg_id = PN_COMM_ISA_ENTITY_NOT_REACHABLE_RESP;
+ resp.pn_e_orig_msg_id = opm->pn_msg_id;
+ resp.pn_e_status = 0;
+ } else {
+ resp.pn_submsg_id = PN_COMM_ISA_ENTITY_NOT_REACHABLE_RESP;
+ resp.pn_orig_msg_id = opm->pn_msg_id;
+ resp.pn_status = 0;
+ }
+ return pn_raw_send(&resp, sizeof(resp), rskb->dev,
+ pn_object(oph->pn_sdev, oph->pn_sobj),
+ pn_object(oph->pn_rdev, oph->pn_robj),
+ oph->pn_res);
+}
+
+static int send_reset_indications(struct sk_buff *rskb)
+{
+ struct phonethdr *oph = pn_hdr(rskb);
+ static const u8 data[4] = {
+ 0x00 /* trans ID */, 0x10 /* subscribe msg */,
+ 0x00 /* subscription count */, 0x00 /* dummy */
+ };
+
+ return pn_raw_send(data, sizeof(data), rskb->dev,
+ pn_object(oph->pn_sdev, 0x00),
+ pn_object(oph->pn_rdev, oph->pn_robj),
+ PN_COMMGR);
+}
+
+
+/* packet type functions */
+
+/*
+ * Stuff received packets to associated sockets.
+ * On error, returns non-zero and releases the skb.
+ */
+static int phonet_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pkttype,
+ struct net_device *orig_dev)
+{
+ struct net *net = dev_net(dev);
+ struct phonethdr *ph;
+ struct sockaddr_pn sa;
+ u16 len;
+
+ skb = skb_share_check(skb, GFP_ATOMIC);
+ if (!skb)
+ return NET_RX_DROP;
+
+ /* check we have at least a full Phonet header */
+ if (!pskb_pull(skb, sizeof(struct phonethdr)))
+ goto out;
+
+ /* check that the advertised length is correct */
+ ph = pn_hdr(skb);
+ len = get_unaligned_be16(&ph->pn_length);
+ if (len < 2)
+ goto out;
+ len -= 2;
+ if ((len > skb->len) || pskb_trim(skb, len))
+ goto out;
+ skb_reset_transport_header(skb);
+
+ pn_skb_get_dst_sockaddr(skb, &sa);
+
+ /* check if this is broadcasted */
+ if (pn_sockaddr_get_addr(&sa) == PNADDR_BROADCAST) {
+ pn_deliver_sock_broadcast(net, skb);
+ goto out;
+ }
+
+ /* resource routing */
+ if (pn_sockaddr_get_object(&sa) == 0) {
+ struct sock *sk = pn_find_sock_by_res(net, sa.spn_resource);
+ if (sk)
+ return sk_receive_skb(sk, skb, 0);
+ }
+
+ /* check if we are the destination */
+ if (phonet_address_lookup(net, pn_sockaddr_get_addr(&sa)) == 0) {
+ /* Phonet packet input */
+ struct sock *sk = pn_find_sock_by_sa(net, &sa);
+
+ if (sk)
+ return sk_receive_skb(sk, skb, 0);
+
+ if (can_respond(skb)) {
+ send_obj_unreachable(skb);
+ send_reset_indications(skb);
+ }
+ } else if (unlikely(skb->pkt_type == PACKET_LOOPBACK))
+ goto out; /* Race between address deletion and loopback */
+ else {
+ /* Phonet packet routing */
+ struct net_device *out_dev;
+
+ out_dev = phonet_route_output(net, pn_sockaddr_get_addr(&sa));
+ if (!out_dev) {
+ net_dbg_ratelimited("No Phonet route to %02X\n",
+ pn_sockaddr_get_addr(&sa));
+ goto out;
+ }
+
+ __skb_push(skb, sizeof(struct phonethdr));
+ skb->dev = out_dev;
+ if (out_dev == dev) {
+ net_dbg_ratelimited("Phonet loop to %02X on %s\n",
+ pn_sockaddr_get_addr(&sa),
+ dev->name);
+ goto out_dev;
+ }
+ /* Some drivers (e.g. TUN) do not allocate HW header space */
+ if (skb_cow_head(skb, out_dev->hard_header_len))
+ goto out_dev;
+
+ if (dev_hard_header(skb, out_dev, ETH_P_PHONET, NULL, NULL,
+ skb->len) < 0)
+ goto out_dev;
+ dev_queue_xmit(skb);
+ dev_put(out_dev);
+ return NET_RX_SUCCESS;
+out_dev:
+ dev_put(out_dev);
+ }
+
+out:
+ kfree_skb(skb);
+ return NET_RX_DROP;
+}
+
+static struct packet_type phonet_packet_type __read_mostly = {
+ .type = cpu_to_be16(ETH_P_PHONET),
+ .func = phonet_rcv,
+};
+
+static DEFINE_MUTEX(proto_tab_lock);
+
+int __init_or_module phonet_proto_register(unsigned int protocol,
+ const struct phonet_protocol *pp)
+{
+ int err = 0;
+
+ if (protocol >= PHONET_NPROTO)
+ return -EINVAL;
+
+ err = proto_register(pp->prot, 1);
+ if (err)
+ return err;
+
+ mutex_lock(&proto_tab_lock);
+ if (proto_tab[protocol])
+ err = -EBUSY;
+ else
+ rcu_assign_pointer(proto_tab[protocol], pp);
+ mutex_unlock(&proto_tab_lock);
+
+ return err;
+}
+EXPORT_SYMBOL(phonet_proto_register);
+
+void phonet_proto_unregister(unsigned int protocol,
+ const struct phonet_protocol *pp)
+{
+ mutex_lock(&proto_tab_lock);
+ BUG_ON(proto_tab[protocol] != pp);
+ RCU_INIT_POINTER(proto_tab[protocol], NULL);
+ mutex_unlock(&proto_tab_lock);
+ synchronize_rcu();
+ proto_unregister(pp->prot);
+}
+EXPORT_SYMBOL(phonet_proto_unregister);
+
+/* Module registration */
+static int __init phonet_init(void)
+{
+ int err;
+
+ err = phonet_device_init();
+ if (err)
+ return err;
+
+ pn_sock_init();
+ err = sock_register(&phonet_proto_family);
+ if (err) {
+ printk(KERN_ALERT
+ "phonet protocol family initialization failed\n");
+ goto err_sock;
+ }
+
+ dev_add_pack(&phonet_packet_type);
+ phonet_sysctl_init();
+
+ err = isi_register();
+ if (err)
+ goto err;
+ return 0;
+
+err:
+ phonet_sysctl_exit();
+ sock_unregister(PF_PHONET);
+ dev_remove_pack(&phonet_packet_type);
+err_sock:
+ phonet_device_exit();
+ return err;
+}
+
+static void __exit phonet_exit(void)
+{
+ isi_unregister();
+ phonet_sysctl_exit();
+ sock_unregister(PF_PHONET);
+ dev_remove_pack(&phonet_packet_type);
+ phonet_device_exit();
+}
+
+module_init(phonet_init);
+module_exit(phonet_exit);
+MODULE_DESCRIPTION("Phonet protocol stack for Linux");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_NETPROTO(PF_PHONET);
diff --git a/net/phonet/datagram.c b/net/phonet/datagram.c
new file mode 100644
index 0000000000..3aa50dc753
--- /dev/null
+++ b/net/phonet/datagram.c
@@ -0,0 +1,196 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * File: datagram.c
+ *
+ * Datagram (ISI) Phonet sockets
+ *
+ * Copyright (C) 2008 Nokia Corporation.
+ *
+ * Authors: Sakari Ailus <sakari.ailus@nokia.com>
+ * Rémi Denis-Courmont
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/socket.h>
+#include <asm/ioctls.h>
+#include <net/sock.h>
+
+#include <linux/phonet.h>
+#include <linux/export.h>
+#include <net/phonet/phonet.h>
+
+static int pn_backlog_rcv(struct sock *sk, struct sk_buff *skb);
+
+/* associated socket ceases to exist */
+static void pn_sock_close(struct sock *sk, long timeout)
+{
+ sk_common_release(sk);
+}
+
+static int pn_ioctl(struct sock *sk, int cmd, int *karg)
+{
+ struct sk_buff *skb;
+
+ switch (cmd) {
+ case SIOCINQ:
+ lock_sock(sk);
+ skb = skb_peek(&sk->sk_receive_queue);
+ *karg = skb ? skb->len : 0;
+ release_sock(sk);
+ return 0;
+
+ case SIOCPNADDRESOURCE:
+ case SIOCPNDELRESOURCE: {
+ u32 res = *karg;
+ if (res >= 256)
+ return -EINVAL;
+ if (cmd == SIOCPNADDRESOURCE)
+ return pn_sock_bind_res(sk, res);
+ else
+ return pn_sock_unbind_res(sk, res);
+ }
+ }
+
+ return -ENOIOCTLCMD;
+}
+
+/* Destroy socket. All references are gone. */
+static void pn_destruct(struct sock *sk)
+{
+ skb_queue_purge(&sk->sk_receive_queue);
+}
+
+static int pn_init(struct sock *sk)
+{
+ sk->sk_destruct = pn_destruct;
+ return 0;
+}
+
+static int pn_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+{
+ DECLARE_SOCKADDR(struct sockaddr_pn *, target, msg->msg_name);
+ struct sk_buff *skb;
+ int err;
+
+ if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_NOSIGNAL|
+ MSG_CMSG_COMPAT))
+ return -EOPNOTSUPP;
+
+ if (target == NULL)
+ return -EDESTADDRREQ;
+
+ if (msg->msg_namelen < sizeof(struct sockaddr_pn))
+ return -EINVAL;
+
+ if (target->spn_family != AF_PHONET)
+ return -EAFNOSUPPORT;
+
+ skb = sock_alloc_send_skb(sk, MAX_PHONET_HEADER + len,
+ msg->msg_flags & MSG_DONTWAIT, &err);
+ if (skb == NULL)
+ return err;
+ skb_reserve(skb, MAX_PHONET_HEADER);
+
+ err = memcpy_from_msg((void *)skb_put(skb, len), msg, len);
+ if (err < 0) {
+ kfree_skb(skb);
+ return err;
+ }
+
+ /*
+ * Fill in the Phonet header and
+ * finally pass the packet forwards.
+ */
+ err = pn_skb_send(sk, skb, target);
+
+ /* If ok, return len. */
+ return (err >= 0) ? len : err;
+}
+
+static int pn_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ int flags, int *addr_len)
+{
+ struct sk_buff *skb = NULL;
+ struct sockaddr_pn sa;
+ int rval = -EOPNOTSUPP;
+ int copylen;
+
+ if (flags & ~(MSG_PEEK|MSG_TRUNC|MSG_DONTWAIT|MSG_NOSIGNAL|
+ MSG_CMSG_COMPAT))
+ goto out_nofree;
+
+ skb = skb_recv_datagram(sk, flags, &rval);
+ if (skb == NULL)
+ goto out_nofree;
+
+ pn_skb_get_src_sockaddr(skb, &sa);
+
+ copylen = skb->len;
+ if (len < copylen) {
+ msg->msg_flags |= MSG_TRUNC;
+ copylen = len;
+ }
+
+ rval = skb_copy_datagram_msg(skb, 0, msg, copylen);
+ if (rval) {
+ rval = -EFAULT;
+ goto out;
+ }
+
+ rval = (flags & MSG_TRUNC) ? skb->len : copylen;
+
+ if (msg->msg_name != NULL) {
+ __sockaddr_check_size(sizeof(sa));
+ memcpy(msg->msg_name, &sa, sizeof(sa));
+ *addr_len = sizeof(sa);
+ }
+
+out:
+ skb_free_datagram(sk, skb);
+
+out_nofree:
+ return rval;
+}
+
+/* Queue an skb for a sock. */
+static int pn_backlog_rcv(struct sock *sk, struct sk_buff *skb)
+{
+ int err = sock_queue_rcv_skb(sk, skb);
+
+ if (err < 0)
+ kfree_skb(skb);
+ return err ? NET_RX_DROP : NET_RX_SUCCESS;
+}
+
+/* Module registration */
+static struct proto pn_proto = {
+ .close = pn_sock_close,
+ .ioctl = pn_ioctl,
+ .init = pn_init,
+ .sendmsg = pn_sendmsg,
+ .recvmsg = pn_recvmsg,
+ .backlog_rcv = pn_backlog_rcv,
+ .hash = pn_sock_hash,
+ .unhash = pn_sock_unhash,
+ .get_port = pn_sock_get_port,
+ .obj_size = sizeof(struct pn_sock),
+ .owner = THIS_MODULE,
+ .name = "PHONET",
+};
+
+static const struct phonet_protocol pn_dgram_proto = {
+ .ops = &phonet_dgram_ops,
+ .prot = &pn_proto,
+ .sock_type = SOCK_DGRAM,
+};
+
+int __init isi_register(void)
+{
+ return phonet_proto_register(PN_PROTO_PHONET, &pn_dgram_proto);
+}
+
+void __exit isi_unregister(void)
+{
+ phonet_proto_unregister(PN_PROTO_PHONET, &pn_dgram_proto);
+}
diff --git a/net/phonet/pep-gprs.c b/net/phonet/pep-gprs.c
new file mode 100644
index 0000000000..7f68d8662c
--- /dev/null
+++ b/net/phonet/pep-gprs.c
@@ -0,0 +1,310 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * File: pep-gprs.c
+ *
+ * GPRS over Phonet pipe end point socket
+ *
+ * Copyright (C) 2008 Nokia Corporation.
+ *
+ * Author: Rémi Denis-Courmont
+ */
+
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/if_ether.h>
+#include <linux/if_arp.h>
+#include <net/sock.h>
+
+#include <linux/if_phonet.h>
+#include <net/tcp_states.h>
+#include <net/phonet/gprs.h>
+
+#include <trace/events/sock.h>
+
+#define GPRS_DEFAULT_MTU 1400
+
+struct gprs_dev {
+ struct sock *sk;
+ void (*old_state_change)(struct sock *);
+ void (*old_data_ready)(struct sock *);
+ void (*old_write_space)(struct sock *);
+
+ struct net_device *dev;
+};
+
+static __be16 gprs_type_trans(struct sk_buff *skb)
+{
+ const u8 *pvfc;
+ u8 buf;
+
+ pvfc = skb_header_pointer(skb, 0, 1, &buf);
+ if (!pvfc)
+ return htons(0);
+ /* Look at IP version field */
+ switch (*pvfc >> 4) {
+ case 4:
+ return htons(ETH_P_IP);
+ case 6:
+ return htons(ETH_P_IPV6);
+ }
+ return htons(0);
+}
+
+static void gprs_writeable(struct gprs_dev *gp)
+{
+ struct net_device *dev = gp->dev;
+
+ if (pep_writeable(gp->sk))
+ netif_wake_queue(dev);
+}
+
+/*
+ * Socket callbacks
+ */
+
+static void gprs_state_change(struct sock *sk)
+{
+ struct gprs_dev *gp = sk->sk_user_data;
+
+ if (sk->sk_state == TCP_CLOSE_WAIT) {
+ struct net_device *dev = gp->dev;
+
+ netif_stop_queue(dev);
+ netif_carrier_off(dev);
+ }
+}
+
+static int gprs_recv(struct gprs_dev *gp, struct sk_buff *skb)
+{
+ struct net_device *dev = gp->dev;
+ int err = 0;
+ __be16 protocol = gprs_type_trans(skb);
+
+ if (!protocol) {
+ err = -EINVAL;
+ goto drop;
+ }
+
+ if (skb_headroom(skb) & 3) {
+ struct sk_buff *rskb, *fs;
+ int flen = 0;
+
+ /* Phonet Pipe data header may be misaligned (3 bytes),
+ * so wrap the IP packet as a single fragment of an head-less
+ * socket buffer. The network stack will pull what it needs,
+ * but at least, the whole IP payload is not memcpy'd. */
+ rskb = netdev_alloc_skb(dev, 0);
+ if (!rskb) {
+ err = -ENOBUFS;
+ goto drop;
+ }
+ skb_shinfo(rskb)->frag_list = skb;
+ rskb->len += skb->len;
+ rskb->data_len += rskb->len;
+ rskb->truesize += rskb->len;
+
+ /* Avoid nested fragments */
+ skb_walk_frags(skb, fs)
+ flen += fs->len;
+ skb->next = skb_shinfo(skb)->frag_list;
+ skb_frag_list_init(skb);
+ skb->len -= flen;
+ skb->data_len -= flen;
+ skb->truesize -= flen;
+
+ skb = rskb;
+ }
+
+ skb->protocol = protocol;
+ skb_reset_mac_header(skb);
+ skb->dev = dev;
+
+ if (likely(dev->flags & IFF_UP)) {
+ dev->stats.rx_packets++;
+ dev->stats.rx_bytes += skb->len;
+ netif_rx(skb);
+ skb = NULL;
+ } else
+ err = -ENODEV;
+
+drop:
+ if (skb) {
+ dev_kfree_skb(skb);
+ dev->stats.rx_dropped++;
+ }
+ return err;
+}
+
+static void gprs_data_ready(struct sock *sk)
+{
+ struct gprs_dev *gp = sk->sk_user_data;
+ struct sk_buff *skb;
+
+ trace_sk_data_ready(sk);
+
+ while ((skb = pep_read(sk)) != NULL) {
+ skb_orphan(skb);
+ gprs_recv(gp, skb);
+ }
+}
+
+static void gprs_write_space(struct sock *sk)
+{
+ struct gprs_dev *gp = sk->sk_user_data;
+
+ if (netif_running(gp->dev))
+ gprs_writeable(gp);
+}
+
+/*
+ * Network device callbacks
+ */
+
+static int gprs_open(struct net_device *dev)
+{
+ struct gprs_dev *gp = netdev_priv(dev);
+
+ gprs_writeable(gp);
+ return 0;
+}
+
+static int gprs_close(struct net_device *dev)
+{
+ netif_stop_queue(dev);
+ return 0;
+}
+
+static netdev_tx_t gprs_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct gprs_dev *gp = netdev_priv(dev);
+ struct sock *sk = gp->sk;
+ int len, err;
+
+ switch (skb->protocol) {
+ case htons(ETH_P_IP):
+ case htons(ETH_P_IPV6):
+ break;
+ default:
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+
+ skb_orphan(skb);
+ skb_set_owner_w(skb, sk);
+ len = skb->len;
+ err = pep_write(sk, skb);
+ if (err) {
+ net_dbg_ratelimited("%s: TX error (%d)\n", dev->name, err);
+ dev->stats.tx_aborted_errors++;
+ dev->stats.tx_errors++;
+ } else {
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += len;
+ }
+
+ netif_stop_queue(dev);
+ if (pep_writeable(sk))
+ netif_wake_queue(dev);
+ return NETDEV_TX_OK;
+}
+
+static const struct net_device_ops gprs_netdev_ops = {
+ .ndo_open = gprs_open,
+ .ndo_stop = gprs_close,
+ .ndo_start_xmit = gprs_xmit,
+};
+
+static void gprs_setup(struct net_device *dev)
+{
+ dev->features = NETIF_F_FRAGLIST;
+ dev->type = ARPHRD_PHONET_PIPE;
+ dev->flags = IFF_POINTOPOINT | IFF_NOARP;
+ dev->mtu = GPRS_DEFAULT_MTU;
+ dev->min_mtu = 576;
+ dev->max_mtu = (PHONET_MAX_MTU - 11);
+ dev->hard_header_len = 0;
+ dev->addr_len = 0;
+ dev->tx_queue_len = 10;
+
+ dev->netdev_ops = &gprs_netdev_ops;
+ dev->needs_free_netdev = true;
+}
+
+/*
+ * External interface
+ */
+
+/*
+ * Attach a GPRS interface to a datagram socket.
+ * Returns the interface index on success, negative error code on error.
+ */
+int gprs_attach(struct sock *sk)
+{
+ static const char ifname[] = "gprs%d";
+ struct gprs_dev *gp;
+ struct net_device *dev;
+ int err;
+
+ if (unlikely(sk->sk_type == SOCK_STREAM))
+ return -EINVAL; /* need packet boundaries */
+
+ /* Create net device */
+ dev = alloc_netdev(sizeof(*gp), ifname, NET_NAME_UNKNOWN, gprs_setup);
+ if (!dev)
+ return -ENOMEM;
+ gp = netdev_priv(dev);
+ gp->sk = sk;
+ gp->dev = dev;
+
+ netif_stop_queue(dev);
+ err = register_netdev(dev);
+ if (err) {
+ free_netdev(dev);
+ return err;
+ }
+
+ lock_sock(sk);
+ if (unlikely(sk->sk_user_data)) {
+ err = -EBUSY;
+ goto out_rel;
+ }
+ if (unlikely((1 << sk->sk_state & (TCPF_CLOSE|TCPF_LISTEN)) ||
+ sock_flag(sk, SOCK_DEAD))) {
+ err = -EINVAL;
+ goto out_rel;
+ }
+ sk->sk_user_data = gp;
+ gp->old_state_change = sk->sk_state_change;
+ gp->old_data_ready = sk->sk_data_ready;
+ gp->old_write_space = sk->sk_write_space;
+ sk->sk_state_change = gprs_state_change;
+ sk->sk_data_ready = gprs_data_ready;
+ sk->sk_write_space = gprs_write_space;
+ release_sock(sk);
+ sock_hold(sk);
+
+ printk(KERN_DEBUG"%s: attached\n", dev->name);
+ return dev->ifindex;
+
+out_rel:
+ release_sock(sk);
+ unregister_netdev(dev);
+ return err;
+}
+
+void gprs_detach(struct sock *sk)
+{
+ struct gprs_dev *gp = sk->sk_user_data;
+ struct net_device *dev = gp->dev;
+
+ lock_sock(sk);
+ sk->sk_user_data = NULL;
+ sk->sk_state_change = gp->old_state_change;
+ sk->sk_data_ready = gp->old_data_ready;
+ sk->sk_write_space = gp->old_write_space;
+ release_sock(sk);
+
+ printk(KERN_DEBUG"%s: detached\n", dev->name);
+ unregister_netdev(dev);
+ sock_put(sk);
+}
diff --git a/net/phonet/pep.c b/net/phonet/pep.c
new file mode 100644
index 0000000000..faba31f2ef
--- /dev/null
+++ b/net/phonet/pep.c
@@ -0,0 +1,1365 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * File: pep.c
+ *
+ * Phonet pipe protocol end point socket
+ *
+ * Copyright (C) 2008 Nokia Corporation.
+ *
+ * Author: Rémi Denis-Courmont
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched/signal.h>
+#include <linux/slab.h>
+#include <linux/socket.h>
+#include <net/sock.h>
+#include <net/tcp_states.h>
+#include <asm/ioctls.h>
+
+#include <linux/phonet.h>
+#include <linux/module.h>
+#include <net/phonet/phonet.h>
+#include <net/phonet/pep.h>
+#include <net/phonet/gprs.h>
+
+/* sk_state values:
+ * TCP_CLOSE sock not in use yet
+ * TCP_CLOSE_WAIT disconnected pipe
+ * TCP_LISTEN listening pipe endpoint
+ * TCP_SYN_RECV connected pipe in disabled state
+ * TCP_ESTABLISHED connected pipe in enabled state
+ *
+ * pep_sock locking:
+ * - sk_state, hlist: sock lock needed
+ * - listener: read only
+ * - pipe_handle: read only
+ */
+
+#define CREDITS_MAX 10
+#define CREDITS_THR 7
+
+#define pep_sb_size(s) (((s) + 5) & ~3) /* 2-bytes head, 32-bits aligned */
+
+/* Get the next TLV sub-block. */
+static unsigned char *pep_get_sb(struct sk_buff *skb, u8 *ptype, u8 *plen,
+ void *buf)
+{
+ void *data = NULL;
+ struct {
+ u8 sb_type;
+ u8 sb_len;
+ } *ph, h;
+ int buflen = *plen;
+
+ ph = skb_header_pointer(skb, 0, 2, &h);
+ if (ph == NULL || ph->sb_len < 2 || !pskb_may_pull(skb, ph->sb_len))
+ return NULL;
+ ph->sb_len -= 2;
+ *ptype = ph->sb_type;
+ *plen = ph->sb_len;
+
+ if (buflen > ph->sb_len)
+ buflen = ph->sb_len;
+ data = skb_header_pointer(skb, 2, buflen, buf);
+ __skb_pull(skb, 2 + ph->sb_len);
+ return data;
+}
+
+static struct sk_buff *pep_alloc_skb(struct sock *sk, const void *payload,
+ int len, gfp_t priority)
+{
+ struct sk_buff *skb = alloc_skb(MAX_PNPIPE_HEADER + len, priority);
+ if (!skb)
+ return NULL;
+ skb_set_owner_w(skb, sk);
+
+ skb_reserve(skb, MAX_PNPIPE_HEADER);
+ __skb_put(skb, len);
+ skb_copy_to_linear_data(skb, payload, len);
+ __skb_push(skb, sizeof(struct pnpipehdr));
+ skb_reset_transport_header(skb);
+ return skb;
+}
+
+static int pep_reply(struct sock *sk, struct sk_buff *oskb, u8 code,
+ const void *data, int len, gfp_t priority)
+{
+ const struct pnpipehdr *oph = pnp_hdr(oskb);
+ struct pnpipehdr *ph;
+ struct sk_buff *skb;
+ struct sockaddr_pn peer;
+
+ skb = pep_alloc_skb(sk, data, len, priority);
+ if (!skb)
+ return -ENOMEM;
+
+ ph = pnp_hdr(skb);
+ ph->utid = oph->utid;
+ ph->message_id = oph->message_id + 1; /* REQ -> RESP */
+ ph->pipe_handle = oph->pipe_handle;
+ ph->error_code = code;
+
+ pn_skb_get_src_sockaddr(oskb, &peer);
+ return pn_skb_send(sk, skb, &peer);
+}
+
+static int pep_indicate(struct sock *sk, u8 id, u8 code,
+ const void *data, int len, gfp_t priority)
+{
+ struct pep_sock *pn = pep_sk(sk);
+ struct pnpipehdr *ph;
+ struct sk_buff *skb;
+
+ skb = pep_alloc_skb(sk, data, len, priority);
+ if (!skb)
+ return -ENOMEM;
+
+ ph = pnp_hdr(skb);
+ ph->utid = 0;
+ ph->message_id = id;
+ ph->pipe_handle = pn->pipe_handle;
+ ph->error_code = code;
+ return pn_skb_send(sk, skb, NULL);
+}
+
+#define PAD 0x00
+
+static int pipe_handler_request(struct sock *sk, u8 id, u8 code,
+ const void *data, int len)
+{
+ struct pep_sock *pn = pep_sk(sk);
+ struct pnpipehdr *ph;
+ struct sk_buff *skb;
+
+ skb = pep_alloc_skb(sk, data, len, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ ph = pnp_hdr(skb);
+ ph->utid = id; /* whatever */
+ ph->message_id = id;
+ ph->pipe_handle = pn->pipe_handle;
+ ph->error_code = code;
+ return pn_skb_send(sk, skb, NULL);
+}
+
+static int pipe_handler_send_created_ind(struct sock *sk)
+{
+ struct pep_sock *pn = pep_sk(sk);
+ u8 data[4] = {
+ PN_PIPE_SB_NEGOTIATED_FC, pep_sb_size(2),
+ pn->tx_fc, pn->rx_fc,
+ };
+
+ return pep_indicate(sk, PNS_PIPE_CREATED_IND, 1 /* sub-blocks */,
+ data, 4, GFP_ATOMIC);
+}
+
+static int pep_accept_conn(struct sock *sk, struct sk_buff *skb)
+{
+ static const u8 data[20] = {
+ PAD, PAD, PAD, 2 /* sub-blocks */,
+ PN_PIPE_SB_REQUIRED_FC_TX, pep_sb_size(5), 3, PAD,
+ PN_MULTI_CREDIT_FLOW_CONTROL,
+ PN_ONE_CREDIT_FLOW_CONTROL,
+ PN_LEGACY_FLOW_CONTROL,
+ PAD,
+ PN_PIPE_SB_PREFERRED_FC_RX, pep_sb_size(5), 3, PAD,
+ PN_MULTI_CREDIT_FLOW_CONTROL,
+ PN_ONE_CREDIT_FLOW_CONTROL,
+ PN_LEGACY_FLOW_CONTROL,
+ PAD,
+ };
+
+ might_sleep();
+ return pep_reply(sk, skb, PN_PIPE_NO_ERROR, data, sizeof(data),
+ GFP_KERNEL);
+}
+
+static int pep_reject_conn(struct sock *sk, struct sk_buff *skb, u8 code,
+ gfp_t priority)
+{
+ static const u8 data[4] = { PAD, PAD, PAD, 0 /* sub-blocks */ };
+ WARN_ON(code == PN_PIPE_NO_ERROR);
+ return pep_reply(sk, skb, code, data, sizeof(data), priority);
+}
+
+/* Control requests are not sent by the pipe service and have a specific
+ * message format. */
+static int pep_ctrlreq_error(struct sock *sk, struct sk_buff *oskb, u8 code,
+ gfp_t priority)
+{
+ const struct pnpipehdr *oph = pnp_hdr(oskb);
+ struct sk_buff *skb;
+ struct pnpipehdr *ph;
+ struct sockaddr_pn dst;
+ u8 data[4] = {
+ oph->pep_type, /* PEP type */
+ code, /* error code, at an unusual offset */
+ PAD, PAD,
+ };
+
+ skb = pep_alloc_skb(sk, data, 4, priority);
+ if (!skb)
+ return -ENOMEM;
+
+ ph = pnp_hdr(skb);
+ ph->utid = oph->utid;
+ ph->message_id = PNS_PEP_CTRL_RESP;
+ ph->pipe_handle = oph->pipe_handle;
+ ph->data0 = oph->data[0]; /* CTRL id */
+
+ pn_skb_get_src_sockaddr(oskb, &dst);
+ return pn_skb_send(sk, skb, &dst);
+}
+
+static int pipe_snd_status(struct sock *sk, u8 type, u8 status, gfp_t priority)
+{
+ u8 data[4] = { type, PAD, PAD, status };
+
+ return pep_indicate(sk, PNS_PEP_STATUS_IND, PN_PEP_TYPE_COMMON,
+ data, 4, priority);
+}
+
+/* Send our RX flow control information to the sender.
+ * Socket must be locked. */
+static void pipe_grant_credits(struct sock *sk, gfp_t priority)
+{
+ struct pep_sock *pn = pep_sk(sk);
+
+ BUG_ON(sk->sk_state != TCP_ESTABLISHED);
+
+ switch (pn->rx_fc) {
+ case PN_LEGACY_FLOW_CONTROL: /* TODO */
+ break;
+ case PN_ONE_CREDIT_FLOW_CONTROL:
+ if (pipe_snd_status(sk, PN_PEP_IND_FLOW_CONTROL,
+ PEP_IND_READY, priority) == 0)
+ pn->rx_credits = 1;
+ break;
+ case PN_MULTI_CREDIT_FLOW_CONTROL:
+ if ((pn->rx_credits + CREDITS_THR) > CREDITS_MAX)
+ break;
+ if (pipe_snd_status(sk, PN_PEP_IND_ID_MCFC_GRANT_CREDITS,
+ CREDITS_MAX - pn->rx_credits,
+ priority) == 0)
+ pn->rx_credits = CREDITS_MAX;
+ break;
+ }
+}
+
+static int pipe_rcv_status(struct sock *sk, struct sk_buff *skb)
+{
+ struct pep_sock *pn = pep_sk(sk);
+ struct pnpipehdr *hdr;
+ int wake = 0;
+
+ if (!pskb_may_pull(skb, sizeof(*hdr) + 4))
+ return -EINVAL;
+
+ hdr = pnp_hdr(skb);
+ if (hdr->pep_type != PN_PEP_TYPE_COMMON) {
+ net_dbg_ratelimited("Phonet unknown PEP type: %u\n",
+ (unsigned int)hdr->pep_type);
+ return -EOPNOTSUPP;
+ }
+
+ switch (hdr->data[0]) {
+ case PN_PEP_IND_FLOW_CONTROL:
+ switch (pn->tx_fc) {
+ case PN_LEGACY_FLOW_CONTROL:
+ switch (hdr->data[3]) {
+ case PEP_IND_BUSY:
+ atomic_set(&pn->tx_credits, 0);
+ break;
+ case PEP_IND_READY:
+ atomic_set(&pn->tx_credits, wake = 1);
+ break;
+ }
+ break;
+ case PN_ONE_CREDIT_FLOW_CONTROL:
+ if (hdr->data[3] == PEP_IND_READY)
+ atomic_set(&pn->tx_credits, wake = 1);
+ break;
+ }
+ break;
+
+ case PN_PEP_IND_ID_MCFC_GRANT_CREDITS:
+ if (pn->tx_fc != PN_MULTI_CREDIT_FLOW_CONTROL)
+ break;
+ atomic_add(wake = hdr->data[3], &pn->tx_credits);
+ break;
+
+ default:
+ net_dbg_ratelimited("Phonet unknown PEP indication: %u\n",
+ (unsigned int)hdr->data[0]);
+ return -EOPNOTSUPP;
+ }
+ if (wake)
+ sk->sk_write_space(sk);
+ return 0;
+}
+
+static int pipe_rcv_created(struct sock *sk, struct sk_buff *skb)
+{
+ struct pep_sock *pn = pep_sk(sk);
+ struct pnpipehdr *hdr = pnp_hdr(skb);
+ u8 n_sb = hdr->data0;
+
+ pn->rx_fc = pn->tx_fc = PN_LEGACY_FLOW_CONTROL;
+ __skb_pull(skb, sizeof(*hdr));
+ while (n_sb > 0) {
+ u8 type, buf[2], len = sizeof(buf);
+ u8 *data = pep_get_sb(skb, &type, &len, buf);
+
+ if (data == NULL)
+ return -EINVAL;
+ switch (type) {
+ case PN_PIPE_SB_NEGOTIATED_FC:
+ if (len < 2 || (data[0] | data[1]) > 3)
+ break;
+ pn->tx_fc = data[0] & 3;
+ pn->rx_fc = data[1] & 3;
+ break;
+ }
+ n_sb--;
+ }
+ return 0;
+}
+
+/* Queue an skb to a connected sock.
+ * Socket lock must be held. */
+static int pipe_do_rcv(struct sock *sk, struct sk_buff *skb)
+{
+ struct pep_sock *pn = pep_sk(sk);
+ struct pnpipehdr *hdr = pnp_hdr(skb);
+ struct sk_buff_head *queue;
+ int err = 0;
+
+ BUG_ON(sk->sk_state == TCP_CLOSE_WAIT);
+
+ switch (hdr->message_id) {
+ case PNS_PEP_CONNECT_REQ:
+ pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE, GFP_ATOMIC);
+ break;
+
+ case PNS_PEP_DISCONNECT_REQ:
+ pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
+ sk->sk_state = TCP_CLOSE_WAIT;
+ if (!sock_flag(sk, SOCK_DEAD))
+ sk->sk_state_change(sk);
+ break;
+
+ case PNS_PEP_ENABLE_REQ:
+ /* Wait for PNS_PIPE_(ENABLED|REDIRECTED)_IND */
+ pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
+ break;
+
+ case PNS_PEP_RESET_REQ:
+ switch (hdr->state_after_reset) {
+ case PN_PIPE_DISABLE:
+ pn->init_enable = 0;
+ break;
+ case PN_PIPE_ENABLE:
+ pn->init_enable = 1;
+ break;
+ default: /* not allowed to send an error here!? */
+ err = -EINVAL;
+ goto out;
+ }
+ fallthrough;
+ case PNS_PEP_DISABLE_REQ:
+ atomic_set(&pn->tx_credits, 0);
+ pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
+ break;
+
+ case PNS_PEP_CTRL_REQ:
+ if (skb_queue_len(&pn->ctrlreq_queue) >= PNPIPE_CTRLREQ_MAX) {
+ atomic_inc(&sk->sk_drops);
+ break;
+ }
+ __skb_pull(skb, 4);
+ queue = &pn->ctrlreq_queue;
+ goto queue;
+
+ case PNS_PIPE_ALIGNED_DATA:
+ __skb_pull(skb, 1);
+ fallthrough;
+ case PNS_PIPE_DATA:
+ __skb_pull(skb, 3); /* Pipe data header */
+ if (!pn_flow_safe(pn->rx_fc)) {
+ err = sock_queue_rcv_skb(sk, skb);
+ if (!err)
+ return NET_RX_SUCCESS;
+ err = -ENOBUFS;
+ break;
+ }
+
+ if (pn->rx_credits == 0) {
+ atomic_inc(&sk->sk_drops);
+ err = -ENOBUFS;
+ break;
+ }
+ pn->rx_credits--;
+ queue = &sk->sk_receive_queue;
+ goto queue;
+
+ case PNS_PEP_STATUS_IND:
+ pipe_rcv_status(sk, skb);
+ break;
+
+ case PNS_PIPE_REDIRECTED_IND:
+ err = pipe_rcv_created(sk, skb);
+ break;
+
+ case PNS_PIPE_CREATED_IND:
+ err = pipe_rcv_created(sk, skb);
+ if (err)
+ break;
+ fallthrough;
+ case PNS_PIPE_RESET_IND:
+ if (!pn->init_enable)
+ break;
+ fallthrough;
+ case PNS_PIPE_ENABLED_IND:
+ if (!pn_flow_safe(pn->tx_fc)) {
+ atomic_set(&pn->tx_credits, 1);
+ sk->sk_write_space(sk);
+ }
+ if (sk->sk_state == TCP_ESTABLISHED)
+ break; /* Nothing to do */
+ sk->sk_state = TCP_ESTABLISHED;
+ pipe_grant_credits(sk, GFP_ATOMIC);
+ break;
+
+ case PNS_PIPE_DISABLED_IND:
+ sk->sk_state = TCP_SYN_RECV;
+ pn->rx_credits = 0;
+ break;
+
+ default:
+ net_dbg_ratelimited("Phonet unknown PEP message: %u\n",
+ hdr->message_id);
+ err = -EINVAL;
+ }
+out:
+ kfree_skb(skb);
+ return (err == -ENOBUFS) ? NET_RX_DROP : NET_RX_SUCCESS;
+
+queue:
+ skb->dev = NULL;
+ skb_set_owner_r(skb, sk);
+ skb_queue_tail(queue, skb);
+ if (!sock_flag(sk, SOCK_DEAD))
+ sk->sk_data_ready(sk);
+ return NET_RX_SUCCESS;
+}
+
+/* Destroy connected sock. */
+static void pipe_destruct(struct sock *sk)
+{
+ struct pep_sock *pn = pep_sk(sk);
+
+ skb_queue_purge(&sk->sk_receive_queue);
+ skb_queue_purge(&pn->ctrlreq_queue);
+}
+
+static u8 pipe_negotiate_fc(const u8 *fcs, unsigned int n)
+{
+ unsigned int i;
+ u8 final_fc = PN_NO_FLOW_CONTROL;
+
+ for (i = 0; i < n; i++) {
+ u8 fc = fcs[i];
+
+ if (fc > final_fc && fc < PN_MAX_FLOW_CONTROL)
+ final_fc = fc;
+ }
+ return final_fc;
+}
+
+static int pep_connresp_rcv(struct sock *sk, struct sk_buff *skb)
+{
+ struct pep_sock *pn = pep_sk(sk);
+ struct pnpipehdr *hdr;
+ u8 n_sb;
+
+ if (!pskb_pull(skb, sizeof(*hdr) + 4))
+ return -EINVAL;
+
+ hdr = pnp_hdr(skb);
+ if (hdr->error_code != PN_PIPE_NO_ERROR)
+ return -ECONNREFUSED;
+
+ /* Parse sub-blocks */
+ n_sb = hdr->data[3];
+ while (n_sb > 0) {
+ u8 type, buf[6], len = sizeof(buf);
+ const u8 *data = pep_get_sb(skb, &type, &len, buf);
+
+ if (data == NULL)
+ return -EINVAL;
+
+ switch (type) {
+ case PN_PIPE_SB_REQUIRED_FC_TX:
+ if (len < 2 || len < data[0])
+ break;
+ pn->tx_fc = pipe_negotiate_fc(data + 2, len - 2);
+ break;
+
+ case PN_PIPE_SB_PREFERRED_FC_RX:
+ if (len < 2 || len < data[0])
+ break;
+ pn->rx_fc = pipe_negotiate_fc(data + 2, len - 2);
+ break;
+
+ }
+ n_sb--;
+ }
+
+ return pipe_handler_send_created_ind(sk);
+}
+
+static int pep_enableresp_rcv(struct sock *sk, struct sk_buff *skb)
+{
+ struct pnpipehdr *hdr = pnp_hdr(skb);
+
+ if (hdr->error_code != PN_PIPE_NO_ERROR)
+ return -ECONNREFUSED;
+
+ return pep_indicate(sk, PNS_PIPE_ENABLED_IND, 0 /* sub-blocks */,
+ NULL, 0, GFP_ATOMIC);
+
+}
+
+static void pipe_start_flow_control(struct sock *sk)
+{
+ struct pep_sock *pn = pep_sk(sk);
+
+ if (!pn_flow_safe(pn->tx_fc)) {
+ atomic_set(&pn->tx_credits, 1);
+ sk->sk_write_space(sk);
+ }
+ pipe_grant_credits(sk, GFP_ATOMIC);
+}
+
+/* Queue an skb to an actively connected sock.
+ * Socket lock must be held. */
+static int pipe_handler_do_rcv(struct sock *sk, struct sk_buff *skb)
+{
+ struct pep_sock *pn = pep_sk(sk);
+ struct pnpipehdr *hdr = pnp_hdr(skb);
+ int err = NET_RX_SUCCESS;
+
+ switch (hdr->message_id) {
+ case PNS_PIPE_ALIGNED_DATA:
+ __skb_pull(skb, 1);
+ fallthrough;
+ case PNS_PIPE_DATA:
+ __skb_pull(skb, 3); /* Pipe data header */
+ if (!pn_flow_safe(pn->rx_fc)) {
+ err = sock_queue_rcv_skb(sk, skb);
+ if (!err)
+ return NET_RX_SUCCESS;
+ err = NET_RX_DROP;
+ break;
+ }
+
+ if (pn->rx_credits == 0) {
+ atomic_inc(&sk->sk_drops);
+ err = NET_RX_DROP;
+ break;
+ }
+ pn->rx_credits--;
+ skb->dev = NULL;
+ skb_set_owner_r(skb, sk);
+ skb_queue_tail(&sk->sk_receive_queue, skb);
+ if (!sock_flag(sk, SOCK_DEAD))
+ sk->sk_data_ready(sk);
+ return NET_RX_SUCCESS;
+
+ case PNS_PEP_CONNECT_RESP:
+ if (sk->sk_state != TCP_SYN_SENT)
+ break;
+ if (!sock_flag(sk, SOCK_DEAD))
+ sk->sk_state_change(sk);
+ if (pep_connresp_rcv(sk, skb)) {
+ sk->sk_state = TCP_CLOSE_WAIT;
+ break;
+ }
+ if (pn->init_enable == PN_PIPE_DISABLE)
+ sk->sk_state = TCP_SYN_RECV;
+ else {
+ sk->sk_state = TCP_ESTABLISHED;
+ pipe_start_flow_control(sk);
+ }
+ break;
+
+ case PNS_PEP_ENABLE_RESP:
+ if (sk->sk_state != TCP_SYN_SENT)
+ break;
+
+ if (pep_enableresp_rcv(sk, skb)) {
+ sk->sk_state = TCP_CLOSE_WAIT;
+ break;
+ }
+
+ sk->sk_state = TCP_ESTABLISHED;
+ pipe_start_flow_control(sk);
+ break;
+
+ case PNS_PEP_DISCONNECT_RESP:
+ /* sock should already be dead, nothing to do */
+ break;
+
+ case PNS_PEP_STATUS_IND:
+ pipe_rcv_status(sk, skb);
+ break;
+ }
+ kfree_skb(skb);
+ return err;
+}
+
+/* Listening sock must be locked */
+static struct sock *pep_find_pipe(const struct hlist_head *hlist,
+ const struct sockaddr_pn *dst,
+ u8 pipe_handle)
+{
+ struct sock *sknode;
+ u16 dobj = pn_sockaddr_get_object(dst);
+
+ sk_for_each(sknode, hlist) {
+ struct pep_sock *pnnode = pep_sk(sknode);
+
+ /* Ports match, but addresses might not: */
+ if (pnnode->pn_sk.sobject != dobj)
+ continue;
+ if (pnnode->pipe_handle != pipe_handle)
+ continue;
+ if (sknode->sk_state == TCP_CLOSE_WAIT)
+ continue;
+
+ sock_hold(sknode);
+ return sknode;
+ }
+ return NULL;
+}
+
+/*
+ * Deliver an skb to a listening sock.
+ * Socket lock must be held.
+ * We then queue the skb to the right connected sock (if any).
+ */
+static int pep_do_rcv(struct sock *sk, struct sk_buff *skb)
+{
+ struct pep_sock *pn = pep_sk(sk);
+ struct sock *sknode;
+ struct pnpipehdr *hdr;
+ struct sockaddr_pn dst;
+ u8 pipe_handle;
+
+ if (!pskb_may_pull(skb, sizeof(*hdr)))
+ goto drop;
+
+ hdr = pnp_hdr(skb);
+ pipe_handle = hdr->pipe_handle;
+ if (pipe_handle == PN_PIPE_INVALID_HANDLE)
+ goto drop;
+
+ pn_skb_get_dst_sockaddr(skb, &dst);
+
+ /* Look for an existing pipe handle */
+ sknode = pep_find_pipe(&pn->hlist, &dst, pipe_handle);
+ if (sknode)
+ return sk_receive_skb(sknode, skb, 1);
+
+ switch (hdr->message_id) {
+ case PNS_PEP_CONNECT_REQ:
+ if (sk->sk_state != TCP_LISTEN || sk_acceptq_is_full(sk)) {
+ pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE,
+ GFP_ATOMIC);
+ break;
+ }
+ skb_queue_head(&sk->sk_receive_queue, skb);
+ sk_acceptq_added(sk);
+ if (!sock_flag(sk, SOCK_DEAD))
+ sk->sk_data_ready(sk);
+ return NET_RX_SUCCESS;
+
+ case PNS_PEP_DISCONNECT_REQ:
+ pep_reply(sk, skb, PN_PIPE_NO_ERROR, NULL, 0, GFP_ATOMIC);
+ break;
+
+ case PNS_PEP_CTRL_REQ:
+ pep_ctrlreq_error(sk, skb, PN_PIPE_INVALID_HANDLE, GFP_ATOMIC);
+ break;
+
+ case PNS_PEP_RESET_REQ:
+ case PNS_PEP_ENABLE_REQ:
+ case PNS_PEP_DISABLE_REQ:
+ /* invalid handle is not even allowed here! */
+ break;
+
+ default:
+ if ((1 << sk->sk_state)
+ & ~(TCPF_CLOSE|TCPF_LISTEN|TCPF_CLOSE_WAIT))
+ /* actively connected socket */
+ return pipe_handler_do_rcv(sk, skb);
+ }
+drop:
+ kfree_skb(skb);
+ return NET_RX_SUCCESS;
+}
+
+static int pipe_do_remove(struct sock *sk)
+{
+ struct pep_sock *pn = pep_sk(sk);
+ struct pnpipehdr *ph;
+ struct sk_buff *skb;
+
+ skb = pep_alloc_skb(sk, NULL, 0, GFP_KERNEL);
+ if (!skb)
+ return -ENOMEM;
+
+ ph = pnp_hdr(skb);
+ ph->utid = 0;
+ ph->message_id = PNS_PIPE_REMOVE_REQ;
+ ph->pipe_handle = pn->pipe_handle;
+ ph->data0 = PAD;
+ return pn_skb_send(sk, skb, NULL);
+}
+
+/* associated socket ceases to exist */
+static void pep_sock_close(struct sock *sk, long timeout)
+{
+ struct pep_sock *pn = pep_sk(sk);
+ int ifindex = 0;
+
+ sock_hold(sk); /* keep a reference after sk_common_release() */
+ sk_common_release(sk);
+
+ lock_sock(sk);
+ if ((1 << sk->sk_state) & (TCPF_SYN_RECV|TCPF_ESTABLISHED)) {
+ if (sk->sk_backlog_rcv == pipe_do_rcv)
+ /* Forcefully remove dangling Phonet pipe */
+ pipe_do_remove(sk);
+ else
+ pipe_handler_request(sk, PNS_PEP_DISCONNECT_REQ, PAD,
+ NULL, 0);
+ }
+ sk->sk_state = TCP_CLOSE;
+
+ ifindex = pn->ifindex;
+ pn->ifindex = 0;
+ release_sock(sk);
+
+ if (ifindex)
+ gprs_detach(sk);
+ sock_put(sk);
+}
+
+static struct sock *pep_sock_accept(struct sock *sk, int flags, int *errp,
+ bool kern)
+{
+ struct pep_sock *pn = pep_sk(sk), *newpn;
+ struct sock *newsk = NULL;
+ struct sk_buff *skb;
+ struct pnpipehdr *hdr;
+ struct sockaddr_pn dst, src;
+ int err;
+ u16 peer_type;
+ u8 pipe_handle, enabled, n_sb;
+ u8 aligned = 0;
+
+ skb = skb_recv_datagram(sk, (flags & O_NONBLOCK) ? MSG_DONTWAIT : 0,
+ errp);
+ if (!skb)
+ return NULL;
+
+ lock_sock(sk);
+ if (sk->sk_state != TCP_LISTEN) {
+ err = -EINVAL;
+ goto drop;
+ }
+ sk_acceptq_removed(sk);
+
+ err = -EPROTO;
+ if (!pskb_may_pull(skb, sizeof(*hdr) + 4))
+ goto drop;
+
+ hdr = pnp_hdr(skb);
+ pipe_handle = hdr->pipe_handle;
+ switch (hdr->state_after_connect) {
+ case PN_PIPE_DISABLE:
+ enabled = 0;
+ break;
+ case PN_PIPE_ENABLE:
+ enabled = 1;
+ break;
+ default:
+ pep_reject_conn(sk, skb, PN_PIPE_ERR_INVALID_PARAM,
+ GFP_KERNEL);
+ goto drop;
+ }
+ peer_type = hdr->other_pep_type << 8;
+
+ /* Parse sub-blocks (options) */
+ n_sb = hdr->data[3];
+ while (n_sb > 0) {
+ u8 type, buf[1], len = sizeof(buf);
+ const u8 *data = pep_get_sb(skb, &type, &len, buf);
+
+ if (data == NULL)
+ goto drop;
+ switch (type) {
+ case PN_PIPE_SB_CONNECT_REQ_PEP_SUB_TYPE:
+ if (len < 1)
+ goto drop;
+ peer_type = (peer_type & 0xff00) | data[0];
+ break;
+ case PN_PIPE_SB_ALIGNED_DATA:
+ aligned = data[0] != 0;
+ break;
+ }
+ n_sb--;
+ }
+
+ /* Check for duplicate pipe handle */
+ newsk = pep_find_pipe(&pn->hlist, &dst, pipe_handle);
+ if (unlikely(newsk)) {
+ __sock_put(newsk);
+ newsk = NULL;
+ pep_reject_conn(sk, skb, PN_PIPE_ERR_PEP_IN_USE, GFP_KERNEL);
+ goto drop;
+ }
+
+ /* Create a new to-be-accepted sock */
+ newsk = sk_alloc(sock_net(sk), PF_PHONET, GFP_KERNEL, sk->sk_prot,
+ kern);
+ if (!newsk) {
+ pep_reject_conn(sk, skb, PN_PIPE_ERR_OVERLOAD, GFP_KERNEL);
+ err = -ENOBUFS;
+ goto drop;
+ }
+
+ sock_init_data(NULL, newsk);
+ newsk->sk_state = TCP_SYN_RECV;
+ newsk->sk_backlog_rcv = pipe_do_rcv;
+ newsk->sk_protocol = sk->sk_protocol;
+ newsk->sk_destruct = pipe_destruct;
+
+ newpn = pep_sk(newsk);
+ pn_skb_get_dst_sockaddr(skb, &dst);
+ pn_skb_get_src_sockaddr(skb, &src);
+ newpn->pn_sk.sobject = pn_sockaddr_get_object(&dst);
+ newpn->pn_sk.dobject = pn_sockaddr_get_object(&src);
+ newpn->pn_sk.resource = pn_sockaddr_get_resource(&dst);
+ sock_hold(sk);
+ newpn->listener = sk;
+ skb_queue_head_init(&newpn->ctrlreq_queue);
+ newpn->pipe_handle = pipe_handle;
+ atomic_set(&newpn->tx_credits, 0);
+ newpn->ifindex = 0;
+ newpn->peer_type = peer_type;
+ newpn->rx_credits = 0;
+ newpn->rx_fc = newpn->tx_fc = PN_LEGACY_FLOW_CONTROL;
+ newpn->init_enable = enabled;
+ newpn->aligned = aligned;
+
+ err = pep_accept_conn(newsk, skb);
+ if (err) {
+ __sock_put(sk);
+ sock_put(newsk);
+ newsk = NULL;
+ goto drop;
+ }
+ sk_add_node(newsk, &pn->hlist);
+drop:
+ release_sock(sk);
+ kfree_skb(skb);
+ *errp = err;
+ return newsk;
+}
+
+static int pep_sock_connect(struct sock *sk, struct sockaddr *addr, int len)
+{
+ struct pep_sock *pn = pep_sk(sk);
+ int err;
+ u8 data[4] = { 0 /* sub-blocks */, PAD, PAD, PAD };
+
+ if (pn->pipe_handle == PN_PIPE_INVALID_HANDLE)
+ pn->pipe_handle = 1; /* anything but INVALID_HANDLE */
+
+ err = pipe_handler_request(sk, PNS_PEP_CONNECT_REQ,
+ pn->init_enable, data, 4);
+ if (err) {
+ pn->pipe_handle = PN_PIPE_INVALID_HANDLE;
+ return err;
+ }
+
+ sk->sk_state = TCP_SYN_SENT;
+
+ return 0;
+}
+
+static int pep_sock_enable(struct sock *sk, struct sockaddr *addr, int len)
+{
+ int err;
+
+ err = pipe_handler_request(sk, PNS_PEP_ENABLE_REQ, PAD,
+ NULL, 0);
+ if (err)
+ return err;
+
+ sk->sk_state = TCP_SYN_SENT;
+
+ return 0;
+}
+
+static int pep_ioctl(struct sock *sk, int cmd, int *karg)
+{
+ struct pep_sock *pn = pep_sk(sk);
+ int ret = -ENOIOCTLCMD;
+
+ switch (cmd) {
+ case SIOCINQ:
+ if (sk->sk_state == TCP_LISTEN) {
+ ret = -EINVAL;
+ break;
+ }
+
+ lock_sock(sk);
+ if (sock_flag(sk, SOCK_URGINLINE) &&
+ !skb_queue_empty(&pn->ctrlreq_queue))
+ *karg = skb_peek(&pn->ctrlreq_queue)->len;
+ else if (!skb_queue_empty(&sk->sk_receive_queue))
+ *karg = skb_peek(&sk->sk_receive_queue)->len;
+ else
+ *karg = 0;
+ release_sock(sk);
+ ret = 0;
+ break;
+
+ case SIOCPNENABLEPIPE:
+ lock_sock(sk);
+ if (sk->sk_state == TCP_SYN_SENT)
+ ret = -EBUSY;
+ else if (sk->sk_state == TCP_ESTABLISHED)
+ ret = -EISCONN;
+ else if (!pn->pn_sk.sobject)
+ ret = -EADDRNOTAVAIL;
+ else
+ ret = pep_sock_enable(sk, NULL, 0);
+ release_sock(sk);
+ break;
+ }
+
+ return ret;
+}
+
+static int pep_init(struct sock *sk)
+{
+ struct pep_sock *pn = pep_sk(sk);
+
+ sk->sk_destruct = pipe_destruct;
+ INIT_HLIST_HEAD(&pn->hlist);
+ pn->listener = NULL;
+ skb_queue_head_init(&pn->ctrlreq_queue);
+ atomic_set(&pn->tx_credits, 0);
+ pn->ifindex = 0;
+ pn->peer_type = 0;
+ pn->pipe_handle = PN_PIPE_INVALID_HANDLE;
+ pn->rx_credits = 0;
+ pn->rx_fc = pn->tx_fc = PN_LEGACY_FLOW_CONTROL;
+ pn->init_enable = 1;
+ pn->aligned = 0;
+ return 0;
+}
+
+static int pep_setsockopt(struct sock *sk, int level, int optname,
+ sockptr_t optval, unsigned int optlen)
+{
+ struct pep_sock *pn = pep_sk(sk);
+ int val = 0, err = 0;
+
+ if (level != SOL_PNPIPE)
+ return -ENOPROTOOPT;
+ if (optlen >= sizeof(int)) {
+ if (copy_from_sockptr(&val, optval, sizeof(int)))
+ return -EFAULT;
+ }
+
+ lock_sock(sk);
+ switch (optname) {
+ case PNPIPE_ENCAP:
+ if (val && val != PNPIPE_ENCAP_IP) {
+ err = -EINVAL;
+ break;
+ }
+ if (!pn->ifindex == !val)
+ break; /* Nothing to do! */
+ if (!capable(CAP_NET_ADMIN)) {
+ err = -EPERM;
+ break;
+ }
+ if (val) {
+ release_sock(sk);
+ err = gprs_attach(sk);
+ if (err > 0) {
+ pn->ifindex = err;
+ err = 0;
+ }
+ } else {
+ pn->ifindex = 0;
+ release_sock(sk);
+ gprs_detach(sk);
+ err = 0;
+ }
+ goto out_norel;
+
+ case PNPIPE_HANDLE:
+ if ((sk->sk_state == TCP_CLOSE) &&
+ (val >= 0) && (val < PN_PIPE_INVALID_HANDLE))
+ pn->pipe_handle = val;
+ else
+ err = -EINVAL;
+ break;
+
+ case PNPIPE_INITSTATE:
+ pn->init_enable = !!val;
+ break;
+
+ default:
+ err = -ENOPROTOOPT;
+ }
+ release_sock(sk);
+
+out_norel:
+ return err;
+}
+
+static int pep_getsockopt(struct sock *sk, int level, int optname,
+ char __user *optval, int __user *optlen)
+{
+ struct pep_sock *pn = pep_sk(sk);
+ int len, val;
+
+ if (level != SOL_PNPIPE)
+ return -ENOPROTOOPT;
+ if (get_user(len, optlen))
+ return -EFAULT;
+
+ switch (optname) {
+ case PNPIPE_ENCAP:
+ val = pn->ifindex ? PNPIPE_ENCAP_IP : PNPIPE_ENCAP_NONE;
+ break;
+
+ case PNPIPE_IFINDEX:
+ val = pn->ifindex;
+ break;
+
+ case PNPIPE_HANDLE:
+ val = pn->pipe_handle;
+ if (val == PN_PIPE_INVALID_HANDLE)
+ return -EINVAL;
+ break;
+
+ case PNPIPE_INITSTATE:
+ val = pn->init_enable;
+ break;
+
+ default:
+ return -ENOPROTOOPT;
+ }
+
+ len = min_t(unsigned int, sizeof(int), len);
+ if (put_user(len, optlen))
+ return -EFAULT;
+ if (put_user(val, (int __user *) optval))
+ return -EFAULT;
+ return 0;
+}
+
+static int pipe_skb_send(struct sock *sk, struct sk_buff *skb)
+{
+ struct pep_sock *pn = pep_sk(sk);
+ struct pnpipehdr *ph;
+ int err;
+
+ if (pn_flow_safe(pn->tx_fc) &&
+ !atomic_add_unless(&pn->tx_credits, -1, 0)) {
+ kfree_skb(skb);
+ return -ENOBUFS;
+ }
+
+ skb_push(skb, 3 + pn->aligned);
+ skb_reset_transport_header(skb);
+ ph = pnp_hdr(skb);
+ ph->utid = 0;
+ if (pn->aligned) {
+ ph->message_id = PNS_PIPE_ALIGNED_DATA;
+ ph->data0 = 0; /* padding */
+ } else
+ ph->message_id = PNS_PIPE_DATA;
+ ph->pipe_handle = pn->pipe_handle;
+ err = pn_skb_send(sk, skb, NULL);
+
+ if (err && pn_flow_safe(pn->tx_fc))
+ atomic_inc(&pn->tx_credits);
+ return err;
+
+}
+
+static int pep_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
+{
+ struct pep_sock *pn = pep_sk(sk);
+ struct sk_buff *skb;
+ long timeo;
+ int flags = msg->msg_flags;
+ int err, done;
+
+ if (len > USHRT_MAX)
+ return -EMSGSIZE;
+
+ if ((msg->msg_flags & ~(MSG_DONTWAIT|MSG_EOR|MSG_NOSIGNAL|
+ MSG_CMSG_COMPAT)) ||
+ !(msg->msg_flags & MSG_EOR))
+ return -EOPNOTSUPP;
+
+ skb = sock_alloc_send_skb(sk, MAX_PNPIPE_HEADER + len,
+ flags & MSG_DONTWAIT, &err);
+ if (!skb)
+ return err;
+
+ skb_reserve(skb, MAX_PHONET_HEADER + 3 + pn->aligned);
+ err = memcpy_from_msg(skb_put(skb, len), msg, len);
+ if (err < 0)
+ goto outfree;
+
+ lock_sock(sk);
+ timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
+ if ((1 << sk->sk_state) & (TCPF_LISTEN|TCPF_CLOSE)) {
+ err = -ENOTCONN;
+ goto out;
+ }
+ if (sk->sk_state != TCP_ESTABLISHED) {
+ /* Wait until the pipe gets to enabled state */
+disabled:
+ err = sk_stream_wait_connect(sk, &timeo);
+ if (err)
+ goto out;
+
+ if (sk->sk_state == TCP_CLOSE_WAIT) {
+ err = -ECONNRESET;
+ goto out;
+ }
+ }
+ BUG_ON(sk->sk_state != TCP_ESTABLISHED);
+
+ /* Wait until flow control allows TX */
+ done = atomic_read(&pn->tx_credits);
+ while (!done) {
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
+
+ if (!timeo) {
+ err = -EAGAIN;
+ goto out;
+ }
+ if (signal_pending(current)) {
+ err = sock_intr_errno(timeo);
+ goto out;
+ }
+
+ add_wait_queue(sk_sleep(sk), &wait);
+ done = sk_wait_event(sk, &timeo, atomic_read(&pn->tx_credits), &wait);
+ remove_wait_queue(sk_sleep(sk), &wait);
+
+ if (sk->sk_state != TCP_ESTABLISHED)
+ goto disabled;
+ }
+
+ err = pipe_skb_send(sk, skb);
+ if (err >= 0)
+ err = len; /* success! */
+ skb = NULL;
+out:
+ release_sock(sk);
+outfree:
+ kfree_skb(skb);
+ return err;
+}
+
+int pep_writeable(struct sock *sk)
+{
+ struct pep_sock *pn = pep_sk(sk);
+
+ return atomic_read(&pn->tx_credits);
+}
+
+int pep_write(struct sock *sk, struct sk_buff *skb)
+{
+ struct sk_buff *rskb, *fs;
+ int flen = 0;
+
+ if (pep_sk(sk)->aligned)
+ return pipe_skb_send(sk, skb);
+
+ rskb = alloc_skb(MAX_PNPIPE_HEADER, GFP_ATOMIC);
+ if (!rskb) {
+ kfree_skb(skb);
+ return -ENOMEM;
+ }
+ skb_shinfo(rskb)->frag_list = skb;
+ rskb->len += skb->len;
+ rskb->data_len += rskb->len;
+ rskb->truesize += rskb->len;
+
+ /* Avoid nested fragments */
+ skb_walk_frags(skb, fs)
+ flen += fs->len;
+ skb->next = skb_shinfo(skb)->frag_list;
+ skb_frag_list_init(skb);
+ skb->len -= flen;
+ skb->data_len -= flen;
+ skb->truesize -= flen;
+
+ skb_reserve(rskb, MAX_PHONET_HEADER + 3);
+ return pipe_skb_send(sk, rskb);
+}
+
+struct sk_buff *pep_read(struct sock *sk)
+{
+ struct sk_buff *skb = skb_dequeue(&sk->sk_receive_queue);
+
+ if (sk->sk_state == TCP_ESTABLISHED)
+ pipe_grant_credits(sk, GFP_ATOMIC);
+ return skb;
+}
+
+static int pep_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ int flags, int *addr_len)
+{
+ struct sk_buff *skb;
+ int err;
+
+ if (flags & ~(MSG_OOB|MSG_PEEK|MSG_TRUNC|MSG_DONTWAIT|MSG_WAITALL|
+ MSG_NOSIGNAL|MSG_CMSG_COMPAT))
+ return -EOPNOTSUPP;
+
+ if (unlikely(1 << sk->sk_state & (TCPF_LISTEN | TCPF_CLOSE)))
+ return -ENOTCONN;
+
+ if ((flags & MSG_OOB) || sock_flag(sk, SOCK_URGINLINE)) {
+ /* Dequeue and acknowledge control request */
+ struct pep_sock *pn = pep_sk(sk);
+
+ if (flags & MSG_PEEK)
+ return -EOPNOTSUPP;
+ skb = skb_dequeue(&pn->ctrlreq_queue);
+ if (skb) {
+ pep_ctrlreq_error(sk, skb, PN_PIPE_NO_ERROR,
+ GFP_KERNEL);
+ msg->msg_flags |= MSG_OOB;
+ goto copy;
+ }
+ if (flags & MSG_OOB)
+ return -EINVAL;
+ }
+
+ skb = skb_recv_datagram(sk, flags, &err);
+ lock_sock(sk);
+ if (skb == NULL) {
+ if (err == -ENOTCONN && sk->sk_state == TCP_CLOSE_WAIT)
+ err = -ECONNRESET;
+ release_sock(sk);
+ return err;
+ }
+
+ if (sk->sk_state == TCP_ESTABLISHED)
+ pipe_grant_credits(sk, GFP_KERNEL);
+ release_sock(sk);
+copy:
+ msg->msg_flags |= MSG_EOR;
+ if (skb->len > len)
+ msg->msg_flags |= MSG_TRUNC;
+ else
+ len = skb->len;
+
+ err = skb_copy_datagram_msg(skb, 0, msg, len);
+ if (!err)
+ err = (flags & MSG_TRUNC) ? skb->len : len;
+
+ skb_free_datagram(sk, skb);
+ return err;
+}
+
+static void pep_sock_unhash(struct sock *sk)
+{
+ struct pep_sock *pn = pep_sk(sk);
+ struct sock *skparent = NULL;
+
+ lock_sock(sk);
+
+ if (pn->listener != NULL) {
+ skparent = pn->listener;
+ pn->listener = NULL;
+ release_sock(sk);
+
+ pn = pep_sk(skparent);
+ lock_sock(skparent);
+ sk_del_node_init(sk);
+ sk = skparent;
+ }
+
+ /* Unhash a listening sock only when it is closed
+ * and all of its active connected pipes are closed. */
+ if (hlist_empty(&pn->hlist))
+ pn_sock_unhash(&pn->pn_sk.sk);
+ release_sock(sk);
+
+ if (skparent)
+ sock_put(skparent);
+}
+
+static struct proto pep_proto = {
+ .close = pep_sock_close,
+ .accept = pep_sock_accept,
+ .connect = pep_sock_connect,
+ .ioctl = pep_ioctl,
+ .init = pep_init,
+ .setsockopt = pep_setsockopt,
+ .getsockopt = pep_getsockopt,
+ .sendmsg = pep_sendmsg,
+ .recvmsg = pep_recvmsg,
+ .backlog_rcv = pep_do_rcv,
+ .hash = pn_sock_hash,
+ .unhash = pep_sock_unhash,
+ .get_port = pn_sock_get_port,
+ .obj_size = sizeof(struct pep_sock),
+ .owner = THIS_MODULE,
+ .name = "PNPIPE",
+};
+
+static const struct phonet_protocol pep_pn_proto = {
+ .ops = &phonet_stream_ops,
+ .prot = &pep_proto,
+ .sock_type = SOCK_SEQPACKET,
+};
+
+static int __init pep_register(void)
+{
+ return phonet_proto_register(PN_PROTO_PIPE, &pep_pn_proto);
+}
+
+static void __exit pep_unregister(void)
+{
+ phonet_proto_unregister(PN_PROTO_PIPE, &pep_pn_proto);
+}
+
+module_init(pep_register);
+module_exit(pep_unregister);
+MODULE_AUTHOR("Remi Denis-Courmont, Nokia");
+MODULE_DESCRIPTION("Phonet pipe protocol");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_NET_PF_PROTO(PF_PHONET, PN_PROTO_PIPE);
diff --git a/net/phonet/pn_dev.c b/net/phonet/pn_dev.c
new file mode 100644
index 0000000000..cde671d29d
--- /dev/null
+++ b/net/phonet/pn_dev.c
@@ -0,0 +1,419 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * File: pn_dev.c
+ *
+ * Phonet network device
+ *
+ * Copyright (C) 2008 Nokia Corporation.
+ *
+ * Authors: Sakari Ailus <sakari.ailus@nokia.com>
+ * Rémi Denis-Courmont
+ */
+
+#include <linux/kernel.h>
+#include <linux/net.h>
+#include <linux/slab.h>
+#include <linux/netdevice.h>
+#include <linux/phonet.h>
+#include <linux/proc_fs.h>
+#include <linux/if_arp.h>
+#include <net/sock.h>
+#include <net/netns/generic.h>
+#include <net/phonet/pn_dev.h>
+
+struct phonet_routes {
+ struct mutex lock;
+ struct net_device __rcu *table[64];
+};
+
+struct phonet_net {
+ struct phonet_device_list pndevs;
+ struct phonet_routes routes;
+};
+
+static unsigned int phonet_net_id __read_mostly;
+
+static struct phonet_net *phonet_pernet(struct net *net)
+{
+ return net_generic(net, phonet_net_id);
+}
+
+struct phonet_device_list *phonet_device_list(struct net *net)
+{
+ struct phonet_net *pnn = phonet_pernet(net);
+ return &pnn->pndevs;
+}
+
+/* Allocate new Phonet device. */
+static struct phonet_device *__phonet_device_alloc(struct net_device *dev)
+{
+ struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev));
+ struct phonet_device *pnd = kmalloc(sizeof(*pnd), GFP_ATOMIC);
+ if (pnd == NULL)
+ return NULL;
+ pnd->netdev = dev;
+ bitmap_zero(pnd->addrs, 64);
+
+ BUG_ON(!mutex_is_locked(&pndevs->lock));
+ list_add_rcu(&pnd->list, &pndevs->list);
+ return pnd;
+}
+
+static struct phonet_device *__phonet_get(struct net_device *dev)
+{
+ struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev));
+ struct phonet_device *pnd;
+
+ BUG_ON(!mutex_is_locked(&pndevs->lock));
+ list_for_each_entry(pnd, &pndevs->list, list) {
+ if (pnd->netdev == dev)
+ return pnd;
+ }
+ return NULL;
+}
+
+static struct phonet_device *__phonet_get_rcu(struct net_device *dev)
+{
+ struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev));
+ struct phonet_device *pnd;
+
+ list_for_each_entry_rcu(pnd, &pndevs->list, list) {
+ if (pnd->netdev == dev)
+ return pnd;
+ }
+ return NULL;
+}
+
+static void phonet_device_destroy(struct net_device *dev)
+{
+ struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev));
+ struct phonet_device *pnd;
+
+ ASSERT_RTNL();
+
+ mutex_lock(&pndevs->lock);
+ pnd = __phonet_get(dev);
+ if (pnd)
+ list_del_rcu(&pnd->list);
+ mutex_unlock(&pndevs->lock);
+
+ if (pnd) {
+ u8 addr;
+
+ for_each_set_bit(addr, pnd->addrs, 64)
+ phonet_address_notify(RTM_DELADDR, dev, addr);
+ kfree(pnd);
+ }
+}
+
+struct net_device *phonet_device_get(struct net *net)
+{
+ struct phonet_device_list *pndevs = phonet_device_list(net);
+ struct phonet_device *pnd;
+ struct net_device *dev = NULL;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(pnd, &pndevs->list, list) {
+ dev = pnd->netdev;
+ BUG_ON(!dev);
+
+ if ((dev->reg_state == NETREG_REGISTERED) &&
+ ((pnd->netdev->flags & IFF_UP)) == IFF_UP)
+ break;
+ dev = NULL;
+ }
+ dev_hold(dev);
+ rcu_read_unlock();
+ return dev;
+}
+
+int phonet_address_add(struct net_device *dev, u8 addr)
+{
+ struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev));
+ struct phonet_device *pnd;
+ int err = 0;
+
+ mutex_lock(&pndevs->lock);
+ /* Find or create Phonet-specific device data */
+ pnd = __phonet_get(dev);
+ if (pnd == NULL)
+ pnd = __phonet_device_alloc(dev);
+ if (unlikely(pnd == NULL))
+ err = -ENOMEM;
+ else if (test_and_set_bit(addr >> 2, pnd->addrs))
+ err = -EEXIST;
+ mutex_unlock(&pndevs->lock);
+ return err;
+}
+
+int phonet_address_del(struct net_device *dev, u8 addr)
+{
+ struct phonet_device_list *pndevs = phonet_device_list(dev_net(dev));
+ struct phonet_device *pnd;
+ int err = 0;
+
+ mutex_lock(&pndevs->lock);
+ pnd = __phonet_get(dev);
+ if (!pnd || !test_and_clear_bit(addr >> 2, pnd->addrs)) {
+ err = -EADDRNOTAVAIL;
+ pnd = NULL;
+ } else if (bitmap_empty(pnd->addrs, 64))
+ list_del_rcu(&pnd->list);
+ else
+ pnd = NULL;
+ mutex_unlock(&pndevs->lock);
+
+ if (pnd)
+ kfree_rcu(pnd, rcu);
+
+ return err;
+}
+
+/* Gets a source address toward a destination, through a interface. */
+u8 phonet_address_get(struct net_device *dev, u8 daddr)
+{
+ struct phonet_device *pnd;
+ u8 saddr;
+
+ rcu_read_lock();
+ pnd = __phonet_get_rcu(dev);
+ if (pnd) {
+ BUG_ON(bitmap_empty(pnd->addrs, 64));
+
+ /* Use same source address as destination, if possible */
+ if (test_bit(daddr >> 2, pnd->addrs))
+ saddr = daddr;
+ else
+ saddr = find_first_bit(pnd->addrs, 64) << 2;
+ } else
+ saddr = PN_NO_ADDR;
+ rcu_read_unlock();
+
+ if (saddr == PN_NO_ADDR) {
+ /* Fallback to another device */
+ struct net_device *def_dev;
+
+ def_dev = phonet_device_get(dev_net(dev));
+ if (def_dev) {
+ if (def_dev != dev)
+ saddr = phonet_address_get(def_dev, daddr);
+ dev_put(def_dev);
+ }
+ }
+ return saddr;
+}
+
+int phonet_address_lookup(struct net *net, u8 addr)
+{
+ struct phonet_device_list *pndevs = phonet_device_list(net);
+ struct phonet_device *pnd;
+ int err = -EADDRNOTAVAIL;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(pnd, &pndevs->list, list) {
+ /* Don't allow unregistering devices! */
+ if ((pnd->netdev->reg_state != NETREG_REGISTERED) ||
+ ((pnd->netdev->flags & IFF_UP)) != IFF_UP)
+ continue;
+
+ if (test_bit(addr >> 2, pnd->addrs)) {
+ err = 0;
+ goto found;
+ }
+ }
+found:
+ rcu_read_unlock();
+ return err;
+}
+
+/* automatically configure a Phonet device, if supported */
+static int phonet_device_autoconf(struct net_device *dev)
+{
+ struct if_phonet_req req;
+ int ret;
+
+ if (!dev->netdev_ops->ndo_siocdevprivate)
+ return -EOPNOTSUPP;
+
+ ret = dev->netdev_ops->ndo_siocdevprivate(dev, (struct ifreq *)&req,
+ NULL, SIOCPNGAUTOCONF);
+ if (ret < 0)
+ return ret;
+
+ ASSERT_RTNL();
+ ret = phonet_address_add(dev, req.ifr_phonet_autoconf.device);
+ if (ret)
+ return ret;
+ phonet_address_notify(RTM_NEWADDR, dev,
+ req.ifr_phonet_autoconf.device);
+ return 0;
+}
+
+static void phonet_route_autodel(struct net_device *dev)
+{
+ struct phonet_net *pnn = phonet_pernet(dev_net(dev));
+ unsigned int i;
+ DECLARE_BITMAP(deleted, 64);
+
+ /* Remove left-over Phonet routes */
+ bitmap_zero(deleted, 64);
+ mutex_lock(&pnn->routes.lock);
+ for (i = 0; i < 64; i++)
+ if (rcu_access_pointer(pnn->routes.table[i]) == dev) {
+ RCU_INIT_POINTER(pnn->routes.table[i], NULL);
+ set_bit(i, deleted);
+ }
+ mutex_unlock(&pnn->routes.lock);
+
+ if (bitmap_empty(deleted, 64))
+ return; /* short-circuit RCU */
+ synchronize_rcu();
+ for_each_set_bit(i, deleted, 64) {
+ rtm_phonet_notify(RTM_DELROUTE, dev, i);
+ dev_put(dev);
+ }
+}
+
+/* notify Phonet of device events */
+static int phonet_device_notify(struct notifier_block *me, unsigned long what,
+ void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+
+ switch (what) {
+ case NETDEV_REGISTER:
+ if (dev->type == ARPHRD_PHONET)
+ phonet_device_autoconf(dev);
+ break;
+ case NETDEV_UNREGISTER:
+ phonet_device_destroy(dev);
+ phonet_route_autodel(dev);
+ break;
+ }
+ return 0;
+
+}
+
+static struct notifier_block phonet_device_notifier = {
+ .notifier_call = phonet_device_notify,
+ .priority = 0,
+};
+
+/* Per-namespace Phonet devices handling */
+static int __net_init phonet_init_net(struct net *net)
+{
+ struct phonet_net *pnn = phonet_pernet(net);
+
+ if (!proc_create_net("phonet", 0, net->proc_net, &pn_sock_seq_ops,
+ sizeof(struct seq_net_private)))
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&pnn->pndevs.list);
+ mutex_init(&pnn->pndevs.lock);
+ mutex_init(&pnn->routes.lock);
+ return 0;
+}
+
+static void __net_exit phonet_exit_net(struct net *net)
+{
+ struct phonet_net *pnn = phonet_pernet(net);
+
+ remove_proc_entry("phonet", net->proc_net);
+ WARN_ON_ONCE(!list_empty(&pnn->pndevs.list));
+}
+
+static struct pernet_operations phonet_net_ops = {
+ .init = phonet_init_net,
+ .exit = phonet_exit_net,
+ .id = &phonet_net_id,
+ .size = sizeof(struct phonet_net),
+};
+
+/* Initialize Phonet devices list */
+int __init phonet_device_init(void)
+{
+ int err = register_pernet_subsys(&phonet_net_ops);
+ if (err)
+ return err;
+
+ proc_create_net("pnresource", 0, init_net.proc_net, &pn_res_seq_ops,
+ sizeof(struct seq_net_private));
+ register_netdevice_notifier(&phonet_device_notifier);
+ err = phonet_netlink_register();
+ if (err)
+ phonet_device_exit();
+ return err;
+}
+
+void phonet_device_exit(void)
+{
+ rtnl_unregister_all(PF_PHONET);
+ unregister_netdevice_notifier(&phonet_device_notifier);
+ unregister_pernet_subsys(&phonet_net_ops);
+ remove_proc_entry("pnresource", init_net.proc_net);
+}
+
+int phonet_route_add(struct net_device *dev, u8 daddr)
+{
+ struct phonet_net *pnn = phonet_pernet(dev_net(dev));
+ struct phonet_routes *routes = &pnn->routes;
+ int err = -EEXIST;
+
+ daddr = daddr >> 2;
+ mutex_lock(&routes->lock);
+ if (routes->table[daddr] == NULL) {
+ rcu_assign_pointer(routes->table[daddr], dev);
+ dev_hold(dev);
+ err = 0;
+ }
+ mutex_unlock(&routes->lock);
+ return err;
+}
+
+int phonet_route_del(struct net_device *dev, u8 daddr)
+{
+ struct phonet_net *pnn = phonet_pernet(dev_net(dev));
+ struct phonet_routes *routes = &pnn->routes;
+
+ daddr = daddr >> 2;
+ mutex_lock(&routes->lock);
+ if (rcu_access_pointer(routes->table[daddr]) == dev)
+ RCU_INIT_POINTER(routes->table[daddr], NULL);
+ else
+ dev = NULL;
+ mutex_unlock(&routes->lock);
+
+ if (!dev)
+ return -ENOENT;
+ synchronize_rcu();
+ dev_put(dev);
+ return 0;
+}
+
+struct net_device *phonet_route_get_rcu(struct net *net, u8 daddr)
+{
+ struct phonet_net *pnn = phonet_pernet(net);
+ struct phonet_routes *routes = &pnn->routes;
+ struct net_device *dev;
+
+ daddr >>= 2;
+ dev = rcu_dereference(routes->table[daddr]);
+ return dev;
+}
+
+struct net_device *phonet_route_output(struct net *net, u8 daddr)
+{
+ struct phonet_net *pnn = phonet_pernet(net);
+ struct phonet_routes *routes = &pnn->routes;
+ struct net_device *dev;
+
+ daddr >>= 2;
+ rcu_read_lock();
+ dev = rcu_dereference(routes->table[daddr]);
+ dev_hold(dev);
+ rcu_read_unlock();
+
+ if (!dev)
+ dev = phonet_device_get(net); /* Default route */
+ return dev;
+}
diff --git a/net/phonet/pn_netlink.c b/net/phonet/pn_netlink.c
new file mode 100644
index 0000000000..59aebe2968
--- /dev/null
+++ b/net/phonet/pn_netlink.c
@@ -0,0 +1,306 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * File: pn_netlink.c
+ *
+ * Phonet netlink interface
+ *
+ * Copyright (C) 2008 Nokia Corporation.
+ *
+ * Authors: Sakari Ailus <sakari.ailus@nokia.com>
+ * Remi Denis-Courmont
+ */
+
+#include <linux/kernel.h>
+#include <linux/netlink.h>
+#include <linux/phonet.h>
+#include <linux/slab.h>
+#include <net/sock.h>
+#include <net/phonet/pn_dev.h>
+
+/* Device address handling */
+
+static int fill_addr(struct sk_buff *skb, struct net_device *dev, u8 addr,
+ u32 portid, u32 seq, int event);
+
+void phonet_address_notify(int event, struct net_device *dev, u8 addr)
+{
+ struct sk_buff *skb;
+ int err = -ENOBUFS;
+
+ skb = nlmsg_new(NLMSG_ALIGN(sizeof(struct ifaddrmsg)) +
+ nla_total_size(1), GFP_KERNEL);
+ if (skb == NULL)
+ goto errout;
+ err = fill_addr(skb, dev, addr, 0, 0, event);
+ if (err < 0) {
+ WARN_ON(err == -EMSGSIZE);
+ kfree_skb(skb);
+ goto errout;
+ }
+ rtnl_notify(skb, dev_net(dev), 0,
+ RTNLGRP_PHONET_IFADDR, NULL, GFP_KERNEL);
+ return;
+errout:
+ rtnl_set_sk_err(dev_net(dev), RTNLGRP_PHONET_IFADDR, err);
+}
+
+static const struct nla_policy ifa_phonet_policy[IFA_MAX+1] = {
+ [IFA_LOCAL] = { .type = NLA_U8 },
+};
+
+static int addr_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack)
+{
+ struct net *net = sock_net(skb->sk);
+ struct nlattr *tb[IFA_MAX+1];
+ struct net_device *dev;
+ struct ifaddrmsg *ifm;
+ int err;
+ u8 pnaddr;
+
+ if (!netlink_capable(skb, CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (!netlink_capable(skb, CAP_SYS_ADMIN))
+ return -EPERM;
+
+ ASSERT_RTNL();
+
+ err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX,
+ ifa_phonet_policy, extack);
+ if (err < 0)
+ return err;
+
+ ifm = nlmsg_data(nlh);
+ if (tb[IFA_LOCAL] == NULL)
+ return -EINVAL;
+ pnaddr = nla_get_u8(tb[IFA_LOCAL]);
+ if (pnaddr & 3)
+ /* Phonet addresses only have 6 high-order bits */
+ return -EINVAL;
+
+ dev = __dev_get_by_index(net, ifm->ifa_index);
+ if (dev == NULL)
+ return -ENODEV;
+
+ if (nlh->nlmsg_type == RTM_NEWADDR)
+ err = phonet_address_add(dev, pnaddr);
+ else
+ err = phonet_address_del(dev, pnaddr);
+ if (!err)
+ phonet_address_notify(nlh->nlmsg_type, dev, pnaddr);
+ return err;
+}
+
+static int fill_addr(struct sk_buff *skb, struct net_device *dev, u8 addr,
+ u32 portid, u32 seq, int event)
+{
+ struct ifaddrmsg *ifm;
+ struct nlmsghdr *nlh;
+
+ nlh = nlmsg_put(skb, portid, seq, event, sizeof(*ifm), 0);
+ if (nlh == NULL)
+ return -EMSGSIZE;
+
+ ifm = nlmsg_data(nlh);
+ ifm->ifa_family = AF_PHONET;
+ ifm->ifa_prefixlen = 0;
+ ifm->ifa_flags = IFA_F_PERMANENT;
+ ifm->ifa_scope = RT_SCOPE_LINK;
+ ifm->ifa_index = dev->ifindex;
+ if (nla_put_u8(skb, IFA_LOCAL, addr))
+ goto nla_put_failure;
+ nlmsg_end(skb, nlh);
+ return 0;
+
+nla_put_failure:
+ nlmsg_cancel(skb, nlh);
+ return -EMSGSIZE;
+}
+
+static int getaddr_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct phonet_device_list *pndevs;
+ struct phonet_device *pnd;
+ int dev_idx = 0, dev_start_idx = cb->args[0];
+ int addr_idx = 0, addr_start_idx = cb->args[1];
+
+ pndevs = phonet_device_list(sock_net(skb->sk));
+ rcu_read_lock();
+ list_for_each_entry_rcu(pnd, &pndevs->list, list) {
+ u8 addr;
+
+ if (dev_idx > dev_start_idx)
+ addr_start_idx = 0;
+ if (dev_idx++ < dev_start_idx)
+ continue;
+
+ addr_idx = 0;
+ for_each_set_bit(addr, pnd->addrs, 64) {
+ if (addr_idx++ < addr_start_idx)
+ continue;
+
+ if (fill_addr(skb, pnd->netdev, addr << 2,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, RTM_NEWADDR) < 0)
+ goto out;
+ }
+ }
+
+out:
+ rcu_read_unlock();
+ cb->args[0] = dev_idx;
+ cb->args[1] = addr_idx;
+
+ return skb->len;
+}
+
+/* Routes handling */
+
+static int fill_route(struct sk_buff *skb, struct net_device *dev, u8 dst,
+ u32 portid, u32 seq, int event)
+{
+ struct rtmsg *rtm;
+ struct nlmsghdr *nlh;
+
+ nlh = nlmsg_put(skb, portid, seq, event, sizeof(*rtm), 0);
+ if (nlh == NULL)
+ return -EMSGSIZE;
+
+ rtm = nlmsg_data(nlh);
+ rtm->rtm_family = AF_PHONET;
+ rtm->rtm_dst_len = 6;
+ rtm->rtm_src_len = 0;
+ rtm->rtm_tos = 0;
+ rtm->rtm_table = RT_TABLE_MAIN;
+ rtm->rtm_protocol = RTPROT_STATIC;
+ rtm->rtm_scope = RT_SCOPE_UNIVERSE;
+ rtm->rtm_type = RTN_UNICAST;
+ rtm->rtm_flags = 0;
+ if (nla_put_u8(skb, RTA_DST, dst) ||
+ nla_put_u32(skb, RTA_OIF, dev->ifindex))
+ goto nla_put_failure;
+ nlmsg_end(skb, nlh);
+ return 0;
+
+nla_put_failure:
+ nlmsg_cancel(skb, nlh);
+ return -EMSGSIZE;
+}
+
+void rtm_phonet_notify(int event, struct net_device *dev, u8 dst)
+{
+ struct sk_buff *skb;
+ int err = -ENOBUFS;
+
+ skb = nlmsg_new(NLMSG_ALIGN(sizeof(struct ifaddrmsg)) +
+ nla_total_size(1) + nla_total_size(4), GFP_KERNEL);
+ if (skb == NULL)
+ goto errout;
+ err = fill_route(skb, dev, dst, 0, 0, event);
+ if (err < 0) {
+ WARN_ON(err == -EMSGSIZE);
+ kfree_skb(skb);
+ goto errout;
+ }
+ rtnl_notify(skb, dev_net(dev), 0,
+ RTNLGRP_PHONET_ROUTE, NULL, GFP_KERNEL);
+ return;
+errout:
+ rtnl_set_sk_err(dev_net(dev), RTNLGRP_PHONET_ROUTE, err);
+}
+
+static const struct nla_policy rtm_phonet_policy[RTA_MAX+1] = {
+ [RTA_DST] = { .type = NLA_U8 },
+ [RTA_OIF] = { .type = NLA_U32 },
+};
+
+static int route_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack)
+{
+ struct net *net = sock_net(skb->sk);
+ struct nlattr *tb[RTA_MAX+1];
+ struct net_device *dev;
+ struct rtmsg *rtm;
+ int err;
+ u8 dst;
+
+ if (!netlink_capable(skb, CAP_NET_ADMIN))
+ return -EPERM;
+
+ if (!netlink_capable(skb, CAP_SYS_ADMIN))
+ return -EPERM;
+
+ ASSERT_RTNL();
+
+ err = nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
+ rtm_phonet_policy, extack);
+ if (err < 0)
+ return err;
+
+ rtm = nlmsg_data(nlh);
+ if (rtm->rtm_table != RT_TABLE_MAIN || rtm->rtm_type != RTN_UNICAST)
+ return -EINVAL;
+ if (tb[RTA_DST] == NULL || tb[RTA_OIF] == NULL)
+ return -EINVAL;
+ dst = nla_get_u8(tb[RTA_DST]);
+ if (dst & 3) /* Phonet addresses only have 6 high-order bits */
+ return -EINVAL;
+
+ dev = __dev_get_by_index(net, nla_get_u32(tb[RTA_OIF]));
+ if (dev == NULL)
+ return -ENODEV;
+
+ if (nlh->nlmsg_type == RTM_NEWROUTE)
+ err = phonet_route_add(dev, dst);
+ else
+ err = phonet_route_del(dev, dst);
+ if (!err)
+ rtm_phonet_notify(nlh->nlmsg_type, dev, dst);
+ return err;
+}
+
+static int route_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct net *net = sock_net(skb->sk);
+ u8 addr;
+
+ rcu_read_lock();
+ for (addr = cb->args[0]; addr < 64; addr++) {
+ struct net_device *dev = phonet_route_get_rcu(net, addr << 2);
+
+ if (!dev)
+ continue;
+
+ if (fill_route(skb, dev, addr << 2, NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, RTM_NEWROUTE) < 0)
+ goto out;
+ }
+
+out:
+ rcu_read_unlock();
+ cb->args[0] = addr;
+
+ return skb->len;
+}
+
+int __init phonet_netlink_register(void)
+{
+ int err = rtnl_register_module(THIS_MODULE, PF_PHONET, RTM_NEWADDR,
+ addr_doit, NULL, 0);
+ if (err)
+ return err;
+
+ /* Further rtnl_register_module() cannot fail */
+ rtnl_register_module(THIS_MODULE, PF_PHONET, RTM_DELADDR,
+ addr_doit, NULL, 0);
+ rtnl_register_module(THIS_MODULE, PF_PHONET, RTM_GETADDR,
+ NULL, getaddr_dumpit, 0);
+ rtnl_register_module(THIS_MODULE, PF_PHONET, RTM_NEWROUTE,
+ route_doit, NULL, 0);
+ rtnl_register_module(THIS_MODULE, PF_PHONET, RTM_DELROUTE,
+ route_doit, NULL, 0);
+ rtnl_register_module(THIS_MODULE, PF_PHONET, RTM_GETROUTE,
+ NULL, route_dumpit, 0);
+ return 0;
+}
diff --git a/net/phonet/socket.c b/net/phonet/socket.c
new file mode 100644
index 0000000000..1018340d89
--- /dev/null
+++ b/net/phonet/socket.c
@@ -0,0 +1,772 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * File: socket.c
+ *
+ * Phonet sockets
+ *
+ * Copyright (C) 2008 Nokia Corporation.
+ *
+ * Authors: Sakari Ailus <sakari.ailus@nokia.com>
+ * Rémi Denis-Courmont
+ */
+
+#include <linux/gfp.h>
+#include <linux/kernel.h>
+#include <linux/net.h>
+#include <linux/poll.h>
+#include <linux/sched/signal.h>
+
+#include <net/sock.h>
+#include <net/tcp_states.h>
+
+#include <linux/phonet.h>
+#include <linux/export.h>
+#include <net/phonet/phonet.h>
+#include <net/phonet/pep.h>
+#include <net/phonet/pn_dev.h>
+
+static int pn_socket_release(struct socket *sock)
+{
+ struct sock *sk = sock->sk;
+
+ if (sk) {
+ sock->sk = NULL;
+ sk->sk_prot->close(sk, 0);
+ }
+ return 0;
+}
+
+#define PN_HASHSIZE 16
+#define PN_HASHMASK (PN_HASHSIZE-1)
+
+
+static struct {
+ struct hlist_head hlist[PN_HASHSIZE];
+ struct mutex lock;
+} pnsocks;
+
+void __init pn_sock_init(void)
+{
+ unsigned int i;
+
+ for (i = 0; i < PN_HASHSIZE; i++)
+ INIT_HLIST_HEAD(pnsocks.hlist + i);
+ mutex_init(&pnsocks.lock);
+}
+
+static struct hlist_head *pn_hash_list(u16 obj)
+{
+ return pnsocks.hlist + (obj & PN_HASHMASK);
+}
+
+/*
+ * Find address based on socket address, match only certain fields.
+ * Also grab sock if it was found. Remember to sock_put it later.
+ */
+struct sock *pn_find_sock_by_sa(struct net *net, const struct sockaddr_pn *spn)
+{
+ struct sock *sknode;
+ struct sock *rval = NULL;
+ u16 obj = pn_sockaddr_get_object(spn);
+ u8 res = spn->spn_resource;
+ struct hlist_head *hlist = pn_hash_list(obj);
+
+ rcu_read_lock();
+ sk_for_each_rcu(sknode, hlist) {
+ struct pn_sock *pn = pn_sk(sknode);
+ BUG_ON(!pn->sobject); /* unbound socket */
+
+ if (!net_eq(sock_net(sknode), net))
+ continue;
+ if (pn_port(obj)) {
+ /* Look up socket by port */
+ if (pn_port(pn->sobject) != pn_port(obj))
+ continue;
+ } else {
+ /* If port is zero, look up by resource */
+ if (pn->resource != res)
+ continue;
+ }
+ if (pn_addr(pn->sobject) &&
+ pn_addr(pn->sobject) != pn_addr(obj))
+ continue;
+
+ rval = sknode;
+ sock_hold(sknode);
+ break;
+ }
+ rcu_read_unlock();
+
+ return rval;
+}
+
+/* Deliver a broadcast packet (only in bottom-half) */
+void pn_deliver_sock_broadcast(struct net *net, struct sk_buff *skb)
+{
+ struct hlist_head *hlist = pnsocks.hlist;
+ unsigned int h;
+
+ rcu_read_lock();
+ for (h = 0; h < PN_HASHSIZE; h++) {
+ struct sock *sknode;
+
+ sk_for_each(sknode, hlist) {
+ struct sk_buff *clone;
+
+ if (!net_eq(sock_net(sknode), net))
+ continue;
+ if (!sock_flag(sknode, SOCK_BROADCAST))
+ continue;
+
+ clone = skb_clone(skb, GFP_ATOMIC);
+ if (clone) {
+ sock_hold(sknode);
+ sk_receive_skb(sknode, clone, 0);
+ }
+ }
+ hlist++;
+ }
+ rcu_read_unlock();
+}
+
+int pn_sock_hash(struct sock *sk)
+{
+ struct hlist_head *hlist = pn_hash_list(pn_sk(sk)->sobject);
+
+ mutex_lock(&pnsocks.lock);
+ sk_add_node_rcu(sk, hlist);
+ mutex_unlock(&pnsocks.lock);
+
+ return 0;
+}
+EXPORT_SYMBOL(pn_sock_hash);
+
+void pn_sock_unhash(struct sock *sk)
+{
+ mutex_lock(&pnsocks.lock);
+ sk_del_node_init_rcu(sk);
+ mutex_unlock(&pnsocks.lock);
+ pn_sock_unbind_all_res(sk);
+ synchronize_rcu();
+}
+EXPORT_SYMBOL(pn_sock_unhash);
+
+static DEFINE_MUTEX(port_mutex);
+
+static int pn_socket_bind(struct socket *sock, struct sockaddr *addr, int len)
+{
+ struct sock *sk = sock->sk;
+ struct pn_sock *pn = pn_sk(sk);
+ struct sockaddr_pn *spn = (struct sockaddr_pn *)addr;
+ int err;
+ u16 handle;
+ u8 saddr;
+
+ if (sk->sk_prot->bind)
+ return sk->sk_prot->bind(sk, addr, len);
+
+ if (len < sizeof(struct sockaddr_pn))
+ return -EINVAL;
+ if (spn->spn_family != AF_PHONET)
+ return -EAFNOSUPPORT;
+
+ handle = pn_sockaddr_get_object((struct sockaddr_pn *)addr);
+ saddr = pn_addr(handle);
+ if (saddr && phonet_address_lookup(sock_net(sk), saddr))
+ return -EADDRNOTAVAIL;
+
+ lock_sock(sk);
+ if (sk->sk_state != TCP_CLOSE || pn_port(pn->sobject)) {
+ err = -EINVAL; /* attempt to rebind */
+ goto out;
+ }
+ WARN_ON(sk_hashed(sk));
+ mutex_lock(&port_mutex);
+ err = sk->sk_prot->get_port(sk, pn_port(handle));
+ if (err)
+ goto out_port;
+
+ /* get_port() sets the port, bind() sets the address if applicable */
+ pn->sobject = pn_object(saddr, pn_port(pn->sobject));
+ pn->resource = spn->spn_resource;
+
+ /* Enable RX on the socket */
+ err = sk->sk_prot->hash(sk);
+out_port:
+ mutex_unlock(&port_mutex);
+out:
+ release_sock(sk);
+ return err;
+}
+
+static int pn_socket_autobind(struct socket *sock)
+{
+ struct sockaddr_pn sa;
+ int err;
+
+ memset(&sa, 0, sizeof(sa));
+ sa.spn_family = AF_PHONET;
+ err = pn_socket_bind(sock, (struct sockaddr *)&sa,
+ sizeof(struct sockaddr_pn));
+ if (err != -EINVAL)
+ return err;
+ BUG_ON(!pn_port(pn_sk(sock->sk)->sobject));
+ return 0; /* socket was already bound */
+}
+
+static int pn_socket_connect(struct socket *sock, struct sockaddr *addr,
+ int len, int flags)
+{
+ struct sock *sk = sock->sk;
+ struct pn_sock *pn = pn_sk(sk);
+ struct sockaddr_pn *spn = (struct sockaddr_pn *)addr;
+ struct task_struct *tsk = current;
+ long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
+ int err;
+
+ if (pn_socket_autobind(sock))
+ return -ENOBUFS;
+ if (len < sizeof(struct sockaddr_pn))
+ return -EINVAL;
+ if (spn->spn_family != AF_PHONET)
+ return -EAFNOSUPPORT;
+
+ lock_sock(sk);
+
+ switch (sock->state) {
+ case SS_UNCONNECTED:
+ if (sk->sk_state != TCP_CLOSE) {
+ err = -EISCONN;
+ goto out;
+ }
+ break;
+ case SS_CONNECTING:
+ err = -EALREADY;
+ goto out;
+ default:
+ err = -EISCONN;
+ goto out;
+ }
+
+ pn->dobject = pn_sockaddr_get_object(spn);
+ pn->resource = pn_sockaddr_get_resource(spn);
+ sock->state = SS_CONNECTING;
+
+ err = sk->sk_prot->connect(sk, addr, len);
+ if (err) {
+ sock->state = SS_UNCONNECTED;
+ pn->dobject = 0;
+ goto out;
+ }
+
+ while (sk->sk_state == TCP_SYN_SENT) {
+ DEFINE_WAIT(wait);
+
+ if (!timeo) {
+ err = -EINPROGRESS;
+ goto out;
+ }
+ if (signal_pending(tsk)) {
+ err = sock_intr_errno(timeo);
+ goto out;
+ }
+
+ prepare_to_wait_exclusive(sk_sleep(sk), &wait,
+ TASK_INTERRUPTIBLE);
+ release_sock(sk);
+ timeo = schedule_timeout(timeo);
+ lock_sock(sk);
+ finish_wait(sk_sleep(sk), &wait);
+ }
+
+ if ((1 << sk->sk_state) & (TCPF_SYN_RECV|TCPF_ESTABLISHED))
+ err = 0;
+ else if (sk->sk_state == TCP_CLOSE_WAIT)
+ err = -ECONNRESET;
+ else
+ err = -ECONNREFUSED;
+ sock->state = err ? SS_UNCONNECTED : SS_CONNECTED;
+out:
+ release_sock(sk);
+ return err;
+}
+
+static int pn_socket_accept(struct socket *sock, struct socket *newsock,
+ int flags, bool kern)
+{
+ struct sock *sk = sock->sk;
+ struct sock *newsk;
+ int err;
+
+ if (unlikely(sk->sk_state != TCP_LISTEN))
+ return -EINVAL;
+
+ newsk = sk->sk_prot->accept(sk, flags, &err, kern);
+ if (!newsk)
+ return err;
+
+ lock_sock(newsk);
+ sock_graft(newsk, newsock);
+ newsock->state = SS_CONNECTED;
+ release_sock(newsk);
+ return 0;
+}
+
+static int pn_socket_getname(struct socket *sock, struct sockaddr *addr,
+ int peer)
+{
+ struct sock *sk = sock->sk;
+ struct pn_sock *pn = pn_sk(sk);
+
+ memset(addr, 0, sizeof(struct sockaddr_pn));
+ addr->sa_family = AF_PHONET;
+ if (!peer) /* Race with bind() here is userland's problem. */
+ pn_sockaddr_set_object((struct sockaddr_pn *)addr,
+ pn->sobject);
+
+ return sizeof(struct sockaddr_pn);
+}
+
+static __poll_t pn_socket_poll(struct file *file, struct socket *sock,
+ poll_table *wait)
+{
+ struct sock *sk = sock->sk;
+ struct pep_sock *pn = pep_sk(sk);
+ __poll_t mask = 0;
+
+ poll_wait(file, sk_sleep(sk), wait);
+
+ if (sk->sk_state == TCP_CLOSE)
+ return EPOLLERR;
+ if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
+ mask |= EPOLLIN | EPOLLRDNORM;
+ if (!skb_queue_empty_lockless(&pn->ctrlreq_queue))
+ mask |= EPOLLPRI;
+ if (!mask && sk->sk_state == TCP_CLOSE_WAIT)
+ return EPOLLHUP;
+
+ if (sk->sk_state == TCP_ESTABLISHED &&
+ refcount_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf &&
+ atomic_read(&pn->tx_credits))
+ mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND;
+
+ return mask;
+}
+
+static int pn_socket_ioctl(struct socket *sock, unsigned int cmd,
+ unsigned long arg)
+{
+ struct sock *sk = sock->sk;
+ struct pn_sock *pn = pn_sk(sk);
+
+ if (cmd == SIOCPNGETOBJECT) {
+ struct net_device *dev;
+ u16 handle;
+ u8 saddr;
+
+ if (get_user(handle, (__u16 __user *)arg))
+ return -EFAULT;
+
+ lock_sock(sk);
+ if (sk->sk_bound_dev_if)
+ dev = dev_get_by_index(sock_net(sk),
+ sk->sk_bound_dev_if);
+ else
+ dev = phonet_device_get(sock_net(sk));
+ if (dev && (dev->flags & IFF_UP))
+ saddr = phonet_address_get(dev, pn_addr(handle));
+ else
+ saddr = PN_NO_ADDR;
+ release_sock(sk);
+
+ dev_put(dev);
+ if (saddr == PN_NO_ADDR)
+ return -EHOSTUNREACH;
+
+ handle = pn_object(saddr, pn_port(pn->sobject));
+ return put_user(handle, (__u16 __user *)arg);
+ }
+
+ return sk_ioctl(sk, cmd, (void __user *)arg);
+}
+
+static int pn_socket_listen(struct socket *sock, int backlog)
+{
+ struct sock *sk = sock->sk;
+ int err = 0;
+
+ if (pn_socket_autobind(sock))
+ return -ENOBUFS;
+
+ lock_sock(sk);
+ if (sock->state != SS_UNCONNECTED) {
+ err = -EINVAL;
+ goto out;
+ }
+
+ if (sk->sk_state != TCP_LISTEN) {
+ sk->sk_state = TCP_LISTEN;
+ sk->sk_ack_backlog = 0;
+ }
+ sk->sk_max_ack_backlog = backlog;
+out:
+ release_sock(sk);
+ return err;
+}
+
+static int pn_socket_sendmsg(struct socket *sock, struct msghdr *m,
+ size_t total_len)
+{
+ struct sock *sk = sock->sk;
+
+ if (pn_socket_autobind(sock))
+ return -EAGAIN;
+
+ return sk->sk_prot->sendmsg(sk, m, total_len);
+}
+
+const struct proto_ops phonet_dgram_ops = {
+ .family = AF_PHONET,
+ .owner = THIS_MODULE,
+ .release = pn_socket_release,
+ .bind = pn_socket_bind,
+ .connect = sock_no_connect,
+ .socketpair = sock_no_socketpair,
+ .accept = sock_no_accept,
+ .getname = pn_socket_getname,
+ .poll = datagram_poll,
+ .ioctl = pn_socket_ioctl,
+ .listen = sock_no_listen,
+ .shutdown = sock_no_shutdown,
+ .sendmsg = pn_socket_sendmsg,
+ .recvmsg = sock_common_recvmsg,
+ .mmap = sock_no_mmap,
+};
+
+const struct proto_ops phonet_stream_ops = {
+ .family = AF_PHONET,
+ .owner = THIS_MODULE,
+ .release = pn_socket_release,
+ .bind = pn_socket_bind,
+ .connect = pn_socket_connect,
+ .socketpair = sock_no_socketpair,
+ .accept = pn_socket_accept,
+ .getname = pn_socket_getname,
+ .poll = pn_socket_poll,
+ .ioctl = pn_socket_ioctl,
+ .listen = pn_socket_listen,
+ .shutdown = sock_no_shutdown,
+ .setsockopt = sock_common_setsockopt,
+ .getsockopt = sock_common_getsockopt,
+ .sendmsg = pn_socket_sendmsg,
+ .recvmsg = sock_common_recvmsg,
+ .mmap = sock_no_mmap,
+};
+EXPORT_SYMBOL(phonet_stream_ops);
+
+/* allocate port for a socket */
+int pn_sock_get_port(struct sock *sk, unsigned short sport)
+{
+ static int port_cur;
+ struct net *net = sock_net(sk);
+ struct pn_sock *pn = pn_sk(sk);
+ struct sockaddr_pn try_sa;
+ struct sock *tmpsk;
+
+ memset(&try_sa, 0, sizeof(struct sockaddr_pn));
+ try_sa.spn_family = AF_PHONET;
+ WARN_ON(!mutex_is_locked(&port_mutex));
+ if (!sport) {
+ /* search free port */
+ int port, pmin, pmax;
+
+ phonet_get_local_port_range(&pmin, &pmax);
+ for (port = pmin; port <= pmax; port++) {
+ port_cur++;
+ if (port_cur < pmin || port_cur > pmax)
+ port_cur = pmin;
+
+ pn_sockaddr_set_port(&try_sa, port_cur);
+ tmpsk = pn_find_sock_by_sa(net, &try_sa);
+ if (tmpsk == NULL) {
+ sport = port_cur;
+ goto found;
+ } else
+ sock_put(tmpsk);
+ }
+ } else {
+ /* try to find specific port */
+ pn_sockaddr_set_port(&try_sa, sport);
+ tmpsk = pn_find_sock_by_sa(net, &try_sa);
+ if (tmpsk == NULL)
+ /* No sock there! We can use that port... */
+ goto found;
+ else
+ sock_put(tmpsk);
+ }
+ /* the port must be in use already */
+ return -EADDRINUSE;
+
+found:
+ pn->sobject = pn_object(pn_addr(pn->sobject), sport);
+ return 0;
+}
+EXPORT_SYMBOL(pn_sock_get_port);
+
+#ifdef CONFIG_PROC_FS
+static struct sock *pn_sock_get_idx(struct seq_file *seq, loff_t pos)
+{
+ struct net *net = seq_file_net(seq);
+ struct hlist_head *hlist = pnsocks.hlist;
+ struct sock *sknode;
+ unsigned int h;
+
+ for (h = 0; h < PN_HASHSIZE; h++) {
+ sk_for_each_rcu(sknode, hlist) {
+ if (!net_eq(net, sock_net(sknode)))
+ continue;
+ if (!pos)
+ return sknode;
+ pos--;
+ }
+ hlist++;
+ }
+ return NULL;
+}
+
+static struct sock *pn_sock_get_next(struct seq_file *seq, struct sock *sk)
+{
+ struct net *net = seq_file_net(seq);
+
+ do
+ sk = sk_next(sk);
+ while (sk && !net_eq(net, sock_net(sk)));
+
+ return sk;
+}
+
+static void *pn_sock_seq_start(struct seq_file *seq, loff_t *pos)
+ __acquires(rcu)
+{
+ rcu_read_lock();
+ return *pos ? pn_sock_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
+}
+
+static void *pn_sock_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ struct sock *sk;
+
+ if (v == SEQ_START_TOKEN)
+ sk = pn_sock_get_idx(seq, 0);
+ else
+ sk = pn_sock_get_next(seq, v);
+ (*pos)++;
+ return sk;
+}
+
+static void pn_sock_seq_stop(struct seq_file *seq, void *v)
+ __releases(rcu)
+{
+ rcu_read_unlock();
+}
+
+static int pn_sock_seq_show(struct seq_file *seq, void *v)
+{
+ seq_setwidth(seq, 127);
+ if (v == SEQ_START_TOKEN)
+ seq_puts(seq, "pt loc rem rs st tx_queue rx_queue "
+ " uid inode ref pointer drops");
+ else {
+ struct sock *sk = v;
+ struct pn_sock *pn = pn_sk(sk);
+
+ seq_printf(seq, "%2d %04X:%04X:%02X %02X %08X:%08X %5d %lu "
+ "%d %pK %u",
+ sk->sk_protocol, pn->sobject, pn->dobject,
+ pn->resource, sk->sk_state,
+ sk_wmem_alloc_get(sk), sk_rmem_alloc_get(sk),
+ from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
+ sock_i_ino(sk),
+ refcount_read(&sk->sk_refcnt), sk,
+ atomic_read(&sk->sk_drops));
+ }
+ seq_pad(seq, '\n');
+ return 0;
+}
+
+const struct seq_operations pn_sock_seq_ops = {
+ .start = pn_sock_seq_start,
+ .next = pn_sock_seq_next,
+ .stop = pn_sock_seq_stop,
+ .show = pn_sock_seq_show,
+};
+#endif
+
+static struct {
+ struct sock *sk[256];
+} pnres;
+
+/*
+ * Find and hold socket based on resource.
+ */
+struct sock *pn_find_sock_by_res(struct net *net, u8 res)
+{
+ struct sock *sk;
+
+ if (!net_eq(net, &init_net))
+ return NULL;
+
+ rcu_read_lock();
+ sk = rcu_dereference(pnres.sk[res]);
+ if (sk)
+ sock_hold(sk);
+ rcu_read_unlock();
+ return sk;
+}
+
+static DEFINE_MUTEX(resource_mutex);
+
+int pn_sock_bind_res(struct sock *sk, u8 res)
+{
+ int ret = -EADDRINUSE;
+
+ if (!net_eq(sock_net(sk), &init_net))
+ return -ENOIOCTLCMD;
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ if (pn_socket_autobind(sk->sk_socket))
+ return -EAGAIN;
+
+ mutex_lock(&resource_mutex);
+ if (pnres.sk[res] == NULL) {
+ sock_hold(sk);
+ rcu_assign_pointer(pnres.sk[res], sk);
+ ret = 0;
+ }
+ mutex_unlock(&resource_mutex);
+ return ret;
+}
+
+int pn_sock_unbind_res(struct sock *sk, u8 res)
+{
+ int ret = -ENOENT;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ mutex_lock(&resource_mutex);
+ if (pnres.sk[res] == sk) {
+ RCU_INIT_POINTER(pnres.sk[res], NULL);
+ ret = 0;
+ }
+ mutex_unlock(&resource_mutex);
+
+ if (ret == 0) {
+ synchronize_rcu();
+ sock_put(sk);
+ }
+ return ret;
+}
+
+void pn_sock_unbind_all_res(struct sock *sk)
+{
+ unsigned int res, match = 0;
+
+ mutex_lock(&resource_mutex);
+ for (res = 0; res < 256; res++) {
+ if (pnres.sk[res] == sk) {
+ RCU_INIT_POINTER(pnres.sk[res], NULL);
+ match++;
+ }
+ }
+ mutex_unlock(&resource_mutex);
+
+ while (match > 0) {
+ __sock_put(sk);
+ match--;
+ }
+ /* Caller is responsible for RCU sync before final sock_put() */
+}
+
+#ifdef CONFIG_PROC_FS
+static struct sock **pn_res_get_idx(struct seq_file *seq, loff_t pos)
+{
+ struct net *net = seq_file_net(seq);
+ unsigned int i;
+
+ if (!net_eq(net, &init_net))
+ return NULL;
+
+ for (i = 0; i < 256; i++) {
+ if (pnres.sk[i] == NULL)
+ continue;
+ if (!pos)
+ return pnres.sk + i;
+ pos--;
+ }
+ return NULL;
+}
+
+static struct sock **pn_res_get_next(struct seq_file *seq, struct sock **sk)
+{
+ struct net *net = seq_file_net(seq);
+ unsigned int i;
+
+ BUG_ON(!net_eq(net, &init_net));
+
+ for (i = (sk - pnres.sk) + 1; i < 256; i++)
+ if (pnres.sk[i])
+ return pnres.sk + i;
+ return NULL;
+}
+
+static void *pn_res_seq_start(struct seq_file *seq, loff_t *pos)
+ __acquires(resource_mutex)
+{
+ mutex_lock(&resource_mutex);
+ return *pos ? pn_res_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
+}
+
+static void *pn_res_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ struct sock **sk;
+
+ if (v == SEQ_START_TOKEN)
+ sk = pn_res_get_idx(seq, 0);
+ else
+ sk = pn_res_get_next(seq, v);
+ (*pos)++;
+ return sk;
+}
+
+static void pn_res_seq_stop(struct seq_file *seq, void *v)
+ __releases(resource_mutex)
+{
+ mutex_unlock(&resource_mutex);
+}
+
+static int pn_res_seq_show(struct seq_file *seq, void *v)
+{
+ seq_setwidth(seq, 63);
+ if (v == SEQ_START_TOKEN)
+ seq_puts(seq, "rs uid inode");
+ else {
+ struct sock **psk = v;
+ struct sock *sk = *psk;
+
+ seq_printf(seq, "%02X %5u %lu",
+ (int) (psk - pnres.sk),
+ from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk)),
+ sock_i_ino(sk));
+ }
+ seq_pad(seq, '\n');
+ return 0;
+}
+
+const struct seq_operations pn_res_seq_ops = {
+ .start = pn_res_seq_start,
+ .next = pn_res_seq_next,
+ .stop = pn_res_seq_stop,
+ .show = pn_res_seq_show,
+};
+#endif
diff --git a/net/phonet/sysctl.c b/net/phonet/sysctl.c
new file mode 100644
index 0000000000..0d0bf41381
--- /dev/null
+++ b/net/phonet/sysctl.c
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * File: sysctl.c
+ *
+ * Phonet /proc/sys/net/phonet interface implementation
+ *
+ * Copyright (C) 2008 Nokia Corporation.
+ *
+ * Author: Rémi Denis-Courmont
+ */
+
+#include <linux/seqlock.h>
+#include <linux/sysctl.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+
+#include <net/sock.h>
+#include <linux/phonet.h>
+#include <net/phonet/phonet.h>
+
+#define DYNAMIC_PORT_MIN 0x40
+#define DYNAMIC_PORT_MAX 0x7f
+
+static DEFINE_SEQLOCK(local_port_range_lock);
+static int local_port_range_min[2] = {0, 0};
+static int local_port_range_max[2] = {1023, 1023};
+static int local_port_range[2] = {DYNAMIC_PORT_MIN, DYNAMIC_PORT_MAX};
+static struct ctl_table_header *phonet_table_hrd;
+
+static void set_local_port_range(int range[2])
+{
+ write_seqlock(&local_port_range_lock);
+ local_port_range[0] = range[0];
+ local_port_range[1] = range[1];
+ write_sequnlock(&local_port_range_lock);
+}
+
+void phonet_get_local_port_range(int *min, int *max)
+{
+ unsigned int seq;
+
+ do {
+ seq = read_seqbegin(&local_port_range_lock);
+ if (min)
+ *min = local_port_range[0];
+ if (max)
+ *max = local_port_range[1];
+ } while (read_seqretry(&local_port_range_lock, seq));
+}
+
+static int proc_local_port_range(struct ctl_table *table, int write,
+ void *buffer, size_t *lenp, loff_t *ppos)
+{
+ int ret;
+ int range[2] = {local_port_range[0], local_port_range[1]};
+ struct ctl_table tmp = {
+ .data = &range,
+ .maxlen = sizeof(range),
+ .mode = table->mode,
+ .extra1 = &local_port_range_min,
+ .extra2 = &local_port_range_max,
+ };
+
+ ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
+
+ if (write && ret == 0) {
+ if (range[1] < range[0])
+ ret = -EINVAL;
+ else
+ set_local_port_range(range);
+ }
+
+ return ret;
+}
+
+static struct ctl_table phonet_table[] = {
+ {
+ .procname = "local_port_range",
+ .data = &local_port_range,
+ .maxlen = sizeof(local_port_range),
+ .mode = 0644,
+ .proc_handler = proc_local_port_range,
+ },
+ { }
+};
+
+int __init phonet_sysctl_init(void)
+{
+ phonet_table_hrd = register_net_sysctl(&init_net, "net/phonet", phonet_table);
+ return phonet_table_hrd == NULL ? -ENOMEM : 0;
+}
+
+void phonet_sysctl_exit(void)
+{
+ unregister_net_sysctl_table(phonet_table_hrd);
+}