diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-07 18:49:45 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-07 18:49:45 +0000 |
commit | 2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch) | |
tree | 848558de17fb3008cdf4d861b01ac7781903ce39 /net/bluetooth | |
parent | Initial commit. (diff) | |
download | linux-b8823030eac27fc7a3d149e3a443a0b68810a78f.tar.xz linux-b8823030eac27fc7a3d149e3a443a0b68810a78f.zip |
Adding upstream version 6.1.76.upstream/6.1.76upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
66 files changed, 71908 insertions, 0 deletions
diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c new file mode 100644 index 000000000..4eb1b3ced --- /dev/null +++ b/net/bluetooth/6lowpan.c @@ -0,0 +1,1283 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + Copyright (c) 2013-2014 Intel Corp. + +*/ + +#include <linux/if_arp.h> +#include <linux/netdevice.h> +#include <linux/etherdevice.h> +#include <linux/module.h> +#include <linux/debugfs.h> + +#include <net/ipv6.h> +#include <net/ip6_route.h> +#include <net/addrconf.h> +#include <net/pkt_sched.h> + +#include <net/bluetooth/bluetooth.h> +#include <net/bluetooth/hci_core.h> +#include <net/bluetooth/l2cap.h> + +#include <net/6lowpan.h> /* for the compression support */ + +#define VERSION "0.1" + +static struct dentry *lowpan_enable_debugfs; +static struct dentry *lowpan_control_debugfs; + +#define IFACE_NAME_TEMPLATE "bt%d" + +struct skb_cb { + struct in6_addr addr; + struct in6_addr gw; + struct l2cap_chan *chan; +}; +#define lowpan_cb(skb) ((struct skb_cb *)((skb)->cb)) + +/* The devices list contains those devices that we are acting + * as a proxy. The BT 6LoWPAN device is a virtual device that + * connects to the Bluetooth LE device. The real connection to + * BT device is done via l2cap layer. There exists one + * virtual device / one BT 6LoWPAN network (=hciX device). + * The list contains struct lowpan_dev elements. + */ +static LIST_HEAD(bt_6lowpan_devices); +static DEFINE_SPINLOCK(devices_lock); + +static bool enable_6lowpan; + +/* We are listening incoming connections via this channel + */ +static struct l2cap_chan *listen_chan; +static DEFINE_MUTEX(set_lock); + +struct lowpan_peer { + struct list_head list; + struct rcu_head rcu; + struct l2cap_chan *chan; + + /* peer addresses in various formats */ + unsigned char lladdr[ETH_ALEN]; + struct in6_addr peer_addr; +}; + +struct lowpan_btle_dev { + struct list_head list; + + struct hci_dev *hdev; + struct net_device *netdev; + struct list_head peers; + atomic_t peer_count; /* number of items in peers list */ + + struct work_struct delete_netdev; + struct delayed_work notify_peers; +}; + +static inline struct lowpan_btle_dev * +lowpan_btle_dev(const struct net_device *netdev) +{ + return (struct lowpan_btle_dev *)lowpan_dev(netdev)->priv; +} + +static inline void peer_add(struct lowpan_btle_dev *dev, + struct lowpan_peer *peer) +{ + list_add_rcu(&peer->list, &dev->peers); + atomic_inc(&dev->peer_count); +} + +static inline bool peer_del(struct lowpan_btle_dev *dev, + struct lowpan_peer *peer) +{ + list_del_rcu(&peer->list); + kfree_rcu(peer, rcu); + + module_put(THIS_MODULE); + + if (atomic_dec_and_test(&dev->peer_count)) { + BT_DBG("last peer"); + return true; + } + + return false; +} + +static inline struct lowpan_peer * +__peer_lookup_chan(struct lowpan_btle_dev *dev, struct l2cap_chan *chan) +{ + struct lowpan_peer *peer; + + list_for_each_entry_rcu(peer, &dev->peers, list) { + if (peer->chan == chan) + return peer; + } + + return NULL; +} + +static inline struct lowpan_peer * +__peer_lookup_conn(struct lowpan_btle_dev *dev, struct l2cap_conn *conn) +{ + struct lowpan_peer *peer; + + list_for_each_entry_rcu(peer, &dev->peers, list) { + if (peer->chan->conn == conn) + return peer; + } + + return NULL; +} + +static inline struct lowpan_peer *peer_lookup_dst(struct lowpan_btle_dev *dev, + struct in6_addr *daddr, + struct sk_buff *skb) +{ + struct rt6_info *rt = (struct rt6_info *)skb_dst(skb); + int count = atomic_read(&dev->peer_count); + const struct in6_addr *nexthop; + struct lowpan_peer *peer; + struct neighbour *neigh; + + BT_DBG("peers %d addr %pI6c rt %p", count, daddr, rt); + + if (!rt) { + if (ipv6_addr_any(&lowpan_cb(skb)->gw)) { + /* There is neither route nor gateway, + * probably the destination is a direct peer. + */ + nexthop = daddr; + } else { + /* There is a known gateway + */ + nexthop = &lowpan_cb(skb)->gw; + } + } else { + nexthop = rt6_nexthop(rt, daddr); + + /* We need to remember the address because it is needed + * by bt_xmit() when sending the packet. In bt_xmit(), the + * destination routing info is not set. + */ + memcpy(&lowpan_cb(skb)->gw, nexthop, sizeof(struct in6_addr)); + } + + BT_DBG("gw %pI6c", nexthop); + + rcu_read_lock(); + + list_for_each_entry_rcu(peer, &dev->peers, list) { + BT_DBG("dst addr %pMR dst type %u ip %pI6c", + &peer->chan->dst, peer->chan->dst_type, + &peer->peer_addr); + + if (!ipv6_addr_cmp(&peer->peer_addr, nexthop)) { + rcu_read_unlock(); + return peer; + } + } + + /* use the neighbour cache for matching addresses assigned by SLAAC */ + neigh = __ipv6_neigh_lookup(dev->netdev, nexthop); + if (neigh) { + list_for_each_entry_rcu(peer, &dev->peers, list) { + if (!memcmp(neigh->ha, peer->lladdr, ETH_ALEN)) { + neigh_release(neigh); + rcu_read_unlock(); + return peer; + } + } + neigh_release(neigh); + } + + rcu_read_unlock(); + + return NULL; +} + +static struct lowpan_peer *lookup_peer(struct l2cap_conn *conn) +{ + struct lowpan_btle_dev *entry; + struct lowpan_peer *peer = NULL; + + rcu_read_lock(); + + list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) { + peer = __peer_lookup_conn(entry, conn); + if (peer) + break; + } + + rcu_read_unlock(); + + return peer; +} + +static struct lowpan_btle_dev *lookup_dev(struct l2cap_conn *conn) +{ + struct lowpan_btle_dev *entry; + struct lowpan_btle_dev *dev = NULL; + + rcu_read_lock(); + + list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) { + if (conn->hcon->hdev == entry->hdev) { + dev = entry; + break; + } + } + + rcu_read_unlock(); + + return dev; +} + +static int give_skb_to_upper(struct sk_buff *skb, struct net_device *dev) +{ + struct sk_buff *skb_cp; + + skb_cp = skb_copy(skb, GFP_ATOMIC); + if (!skb_cp) + return NET_RX_DROP; + + return netif_rx(skb_cp); +} + +static int iphc_decompress(struct sk_buff *skb, struct net_device *netdev, + struct lowpan_peer *peer) +{ + const u8 *saddr; + + saddr = peer->lladdr; + + return lowpan_header_decompress(skb, netdev, netdev->dev_addr, saddr); +} + +static int recv_pkt(struct sk_buff *skb, struct net_device *dev, + struct lowpan_peer *peer) +{ + struct sk_buff *local_skb; + int ret; + + if (!netif_running(dev)) + goto drop; + + if (dev->type != ARPHRD_6LOWPAN || !skb->len) + goto drop; + + skb_reset_network_header(skb); + + skb = skb_share_check(skb, GFP_ATOMIC); + if (!skb) + goto drop; + + /* check that it's our buffer */ + if (lowpan_is_ipv6(*skb_network_header(skb))) { + /* Pull off the 1-byte of 6lowpan header. */ + skb_pull(skb, 1); + + /* Copy the packet so that the IPv6 header is + * properly aligned. + */ + local_skb = skb_copy_expand(skb, NET_SKB_PAD - 1, + skb_tailroom(skb), GFP_ATOMIC); + if (!local_skb) + goto drop; + + local_skb->protocol = htons(ETH_P_IPV6); + local_skb->pkt_type = PACKET_HOST; + local_skb->dev = dev; + + skb_set_transport_header(local_skb, sizeof(struct ipv6hdr)); + + if (give_skb_to_upper(local_skb, dev) != NET_RX_SUCCESS) { + kfree_skb(local_skb); + goto drop; + } + + dev->stats.rx_bytes += skb->len; + dev->stats.rx_packets++; + + consume_skb(local_skb); + consume_skb(skb); + } else if (lowpan_is_iphc(*skb_network_header(skb))) { + local_skb = skb_clone(skb, GFP_ATOMIC); + if (!local_skb) + goto drop; + + local_skb->dev = dev; + + ret = iphc_decompress(local_skb, dev, peer); + if (ret < 0) { + BT_DBG("iphc_decompress failed: %d", ret); + kfree_skb(local_skb); + goto drop; + } + + local_skb->protocol = htons(ETH_P_IPV6); + local_skb->pkt_type = PACKET_HOST; + + if (give_skb_to_upper(local_skb, dev) + != NET_RX_SUCCESS) { + kfree_skb(local_skb); + goto drop; + } + + dev->stats.rx_bytes += skb->len; + dev->stats.rx_packets++; + + consume_skb(local_skb); + consume_skb(skb); + } else { + BT_DBG("unknown packet type"); + goto drop; + } + + return NET_RX_SUCCESS; + +drop: + dev->stats.rx_dropped++; + return NET_RX_DROP; +} + +/* Packet from BT LE device */ +static int chan_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb) +{ + struct lowpan_btle_dev *dev; + struct lowpan_peer *peer; + int err; + + peer = lookup_peer(chan->conn); + if (!peer) + return -ENOENT; + + dev = lookup_dev(chan->conn); + if (!dev || !dev->netdev) + return -ENOENT; + + err = recv_pkt(skb, dev->netdev, peer); + if (err) { + BT_DBG("recv pkt %d", err); + err = -EAGAIN; + } + + return err; +} + +static int setup_header(struct sk_buff *skb, struct net_device *netdev, + bdaddr_t *peer_addr, u8 *peer_addr_type) +{ + struct in6_addr ipv6_daddr; + struct ipv6hdr *hdr; + struct lowpan_btle_dev *dev; + struct lowpan_peer *peer; + u8 *daddr; + int err, status = 0; + + hdr = ipv6_hdr(skb); + + dev = lowpan_btle_dev(netdev); + + memcpy(&ipv6_daddr, &hdr->daddr, sizeof(ipv6_daddr)); + + if (ipv6_addr_is_multicast(&ipv6_daddr)) { + lowpan_cb(skb)->chan = NULL; + daddr = NULL; + } else { + BT_DBG("dest IP %pI6c", &ipv6_daddr); + + /* The packet might be sent to 6lowpan interface + * because of routing (either via default route + * or user set route) so get peer according to + * the destination address. + */ + peer = peer_lookup_dst(dev, &ipv6_daddr, skb); + if (!peer) { + BT_DBG("no such peer"); + return -ENOENT; + } + + daddr = peer->lladdr; + *peer_addr = peer->chan->dst; + *peer_addr_type = peer->chan->dst_type; + lowpan_cb(skb)->chan = peer->chan; + + status = 1; + } + + lowpan_header_compress(skb, netdev, daddr, dev->netdev->dev_addr); + + err = dev_hard_header(skb, netdev, ETH_P_IPV6, NULL, NULL, 0); + if (err < 0) + return err; + + return status; +} + +static int header_create(struct sk_buff *skb, struct net_device *netdev, + unsigned short type, const void *_daddr, + const void *_saddr, unsigned int len) +{ + if (type != ETH_P_IPV6) + return -EINVAL; + + return 0; +} + +/* Packet to BT LE device */ +static int send_pkt(struct l2cap_chan *chan, struct sk_buff *skb, + struct net_device *netdev) +{ + struct msghdr msg; + struct kvec iv; + int err; + + /* Remember the skb so that we can send EAGAIN to the caller if + * we run out of credits. + */ + chan->data = skb; + + iv.iov_base = skb->data; + iv.iov_len = skb->len; + + memset(&msg, 0, sizeof(msg)); + iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &iv, 1, skb->len); + + err = l2cap_chan_send(chan, &msg, skb->len); + if (err > 0) { + netdev->stats.tx_bytes += err; + netdev->stats.tx_packets++; + return 0; + } + + if (err < 0) + netdev->stats.tx_errors++; + + return err; +} + +static int send_mcast_pkt(struct sk_buff *skb, struct net_device *netdev) +{ + struct sk_buff *local_skb; + struct lowpan_btle_dev *entry; + int err = 0; + + rcu_read_lock(); + + list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) { + struct lowpan_peer *pentry; + struct lowpan_btle_dev *dev; + + if (entry->netdev != netdev) + continue; + + dev = lowpan_btle_dev(entry->netdev); + + list_for_each_entry_rcu(pentry, &dev->peers, list) { + int ret; + + local_skb = skb_clone(skb, GFP_ATOMIC); + + BT_DBG("xmit %s to %pMR type %u IP %pI6c chan %p", + netdev->name, + &pentry->chan->dst, pentry->chan->dst_type, + &pentry->peer_addr, pentry->chan); + ret = send_pkt(pentry->chan, local_skb, netdev); + if (ret < 0) + err = ret; + + kfree_skb(local_skb); + } + } + + rcu_read_unlock(); + + return err; +} + +static netdev_tx_t bt_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + int err = 0; + bdaddr_t addr; + u8 addr_type; + + /* We must take a copy of the skb before we modify/replace the ipv6 + * header as the header could be used elsewhere + */ + skb = skb_unshare(skb, GFP_ATOMIC); + if (!skb) + return NET_XMIT_DROP; + + /* Return values from setup_header() + * <0 - error, packet is dropped + * 0 - this is a multicast packet + * 1 - this is unicast packet + */ + err = setup_header(skb, netdev, &addr, &addr_type); + if (err < 0) { + kfree_skb(skb); + return NET_XMIT_DROP; + } + + if (err) { + if (lowpan_cb(skb)->chan) { + BT_DBG("xmit %s to %pMR type %u IP %pI6c chan %p", + netdev->name, &addr, addr_type, + &lowpan_cb(skb)->addr, lowpan_cb(skb)->chan); + err = send_pkt(lowpan_cb(skb)->chan, skb, netdev); + } else { + err = -ENOENT; + } + } else { + /* We need to send the packet to every device behind this + * interface. + */ + err = send_mcast_pkt(skb, netdev); + } + + dev_kfree_skb(skb); + + if (err) + BT_DBG("ERROR: xmit failed (%d)", err); + + return err < 0 ? NET_XMIT_DROP : err; +} + +static int bt_dev_init(struct net_device *dev) +{ + netdev_lockdep_set_classes(dev); + + return 0; +} + +static const struct net_device_ops netdev_ops = { + .ndo_init = bt_dev_init, + .ndo_start_xmit = bt_xmit, +}; + +static const struct header_ops header_ops = { + .create = header_create, +}; + +static void netdev_setup(struct net_device *dev) +{ + dev->hard_header_len = 0; + dev->needed_tailroom = 0; + dev->flags = IFF_RUNNING | IFF_MULTICAST; + dev->watchdog_timeo = 0; + dev->tx_queue_len = DEFAULT_TX_QUEUE_LEN; + + dev->netdev_ops = &netdev_ops; + dev->header_ops = &header_ops; + dev->needs_free_netdev = true; +} + +static struct device_type bt_type = { + .name = "bluetooth", +}; + +static void ifup(struct net_device *netdev) +{ + int err; + + rtnl_lock(); + err = dev_open(netdev, NULL); + if (err < 0) + BT_INFO("iface %s cannot be opened (%d)", netdev->name, err); + rtnl_unlock(); +} + +static void ifdown(struct net_device *netdev) +{ + rtnl_lock(); + dev_close(netdev); + rtnl_unlock(); +} + +static void do_notify_peers(struct work_struct *work) +{ + struct lowpan_btle_dev *dev = container_of(work, struct lowpan_btle_dev, + notify_peers.work); + + netdev_notify_peers(dev->netdev); /* send neighbour adv at startup */ +} + +static bool is_bt_6lowpan(struct hci_conn *hcon) +{ + if (hcon->type != LE_LINK) + return false; + + if (!enable_6lowpan) + return false; + + return true; +} + +static struct l2cap_chan *chan_create(void) +{ + struct l2cap_chan *chan; + + chan = l2cap_chan_create(); + if (!chan) + return NULL; + + l2cap_chan_set_defaults(chan); + + chan->chan_type = L2CAP_CHAN_CONN_ORIENTED; + chan->mode = L2CAP_MODE_LE_FLOWCTL; + chan->imtu = 1280; + + return chan; +} + +static struct l2cap_chan *add_peer_chan(struct l2cap_chan *chan, + struct lowpan_btle_dev *dev, + bool new_netdev) +{ + struct lowpan_peer *peer; + + peer = kzalloc(sizeof(*peer), GFP_ATOMIC); + if (!peer) + return NULL; + + peer->chan = chan; + + baswap((void *)peer->lladdr, &chan->dst); + + lowpan_iphc_uncompress_eui48_lladdr(&peer->peer_addr, peer->lladdr); + + spin_lock(&devices_lock); + INIT_LIST_HEAD(&peer->list); + peer_add(dev, peer); + spin_unlock(&devices_lock); + + /* Notifying peers about us needs to be done without locks held */ + if (new_netdev) + INIT_DELAYED_WORK(&dev->notify_peers, do_notify_peers); + schedule_delayed_work(&dev->notify_peers, msecs_to_jiffies(100)); + + return peer->chan; +} + +static int setup_netdev(struct l2cap_chan *chan, struct lowpan_btle_dev **dev) +{ + struct net_device *netdev; + bdaddr_t addr; + int err; + + netdev = alloc_netdev(LOWPAN_PRIV_SIZE(sizeof(struct lowpan_btle_dev)), + IFACE_NAME_TEMPLATE, NET_NAME_UNKNOWN, + netdev_setup); + if (!netdev) + return -ENOMEM; + + netdev->addr_assign_type = NET_ADDR_PERM; + baswap(&addr, &chan->src); + __dev_addr_set(netdev, &addr, sizeof(addr)); + + netdev->netdev_ops = &netdev_ops; + SET_NETDEV_DEV(netdev, &chan->conn->hcon->hdev->dev); + SET_NETDEV_DEVTYPE(netdev, &bt_type); + + *dev = lowpan_btle_dev(netdev); + (*dev)->netdev = netdev; + (*dev)->hdev = chan->conn->hcon->hdev; + INIT_LIST_HEAD(&(*dev)->peers); + + spin_lock(&devices_lock); + INIT_LIST_HEAD(&(*dev)->list); + list_add_rcu(&(*dev)->list, &bt_6lowpan_devices); + spin_unlock(&devices_lock); + + err = lowpan_register_netdev(netdev, LOWPAN_LLTYPE_BTLE); + if (err < 0) { + BT_INFO("register_netdev failed %d", err); + spin_lock(&devices_lock); + list_del_rcu(&(*dev)->list); + spin_unlock(&devices_lock); + free_netdev(netdev); + goto out; + } + + BT_DBG("ifindex %d peer bdaddr %pMR type %d my addr %pMR type %d", + netdev->ifindex, &chan->dst, chan->dst_type, + &chan->src, chan->src_type); + set_bit(__LINK_STATE_PRESENT, &netdev->state); + + return 0; + +out: + return err; +} + +static inline void chan_ready_cb(struct l2cap_chan *chan) +{ + struct lowpan_btle_dev *dev; + bool new_netdev = false; + + dev = lookup_dev(chan->conn); + + BT_DBG("chan %p conn %p dev %p", chan, chan->conn, dev); + + if (!dev) { + if (setup_netdev(chan, &dev) < 0) { + l2cap_chan_del(chan, -ENOENT); + return; + } + new_netdev = true; + } + + if (!try_module_get(THIS_MODULE)) + return; + + add_peer_chan(chan, dev, new_netdev); + ifup(dev->netdev); +} + +static inline struct l2cap_chan *chan_new_conn_cb(struct l2cap_chan *pchan) +{ + struct l2cap_chan *chan; + + chan = chan_create(); + if (!chan) + return NULL; + + chan->ops = pchan->ops; + + BT_DBG("chan %p pchan %p", chan, pchan); + + return chan; +} + +static void delete_netdev(struct work_struct *work) +{ + struct lowpan_btle_dev *entry = container_of(work, + struct lowpan_btle_dev, + delete_netdev); + + lowpan_unregister_netdev(entry->netdev); + + /* The entry pointer is deleted by the netdev destructor. */ +} + +static void chan_close_cb(struct l2cap_chan *chan) +{ + struct lowpan_btle_dev *entry; + struct lowpan_btle_dev *dev = NULL; + struct lowpan_peer *peer; + int err = -ENOENT; + bool last = false, remove = true; + + BT_DBG("chan %p conn %p", chan, chan->conn); + + if (chan->conn && chan->conn->hcon) { + if (!is_bt_6lowpan(chan->conn->hcon)) + return; + + /* If conn is set, then the netdev is also there and we should + * not remove it. + */ + remove = false; + } + + spin_lock(&devices_lock); + + list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) { + dev = lowpan_btle_dev(entry->netdev); + peer = __peer_lookup_chan(dev, chan); + if (peer) { + last = peer_del(dev, peer); + err = 0; + + BT_DBG("dev %p removing %speer %p", dev, + last ? "last " : "1 ", peer); + BT_DBG("chan %p orig refcnt %u", chan, + kref_read(&chan->kref)); + + l2cap_chan_put(chan); + break; + } + } + + if (!err && last && dev && !atomic_read(&dev->peer_count)) { + spin_unlock(&devices_lock); + + cancel_delayed_work_sync(&dev->notify_peers); + + ifdown(dev->netdev); + + if (remove) { + INIT_WORK(&entry->delete_netdev, delete_netdev); + schedule_work(&entry->delete_netdev); + } + } else { + spin_unlock(&devices_lock); + } +} + +static void chan_state_change_cb(struct l2cap_chan *chan, int state, int err) +{ + BT_DBG("chan %p conn %p state %s err %d", chan, chan->conn, + state_to_string(state), err); +} + +static struct sk_buff *chan_alloc_skb_cb(struct l2cap_chan *chan, + unsigned long hdr_len, + unsigned long len, int nb) +{ + /* Note that we must allocate using GFP_ATOMIC here as + * this function is called originally from netdev hard xmit + * function in atomic context. + */ + return bt_skb_alloc(hdr_len + len, GFP_ATOMIC); +} + +static void chan_suspend_cb(struct l2cap_chan *chan) +{ + struct lowpan_btle_dev *dev; + + BT_DBG("chan %p suspend", chan); + + dev = lookup_dev(chan->conn); + if (!dev || !dev->netdev) + return; + + netif_stop_queue(dev->netdev); +} + +static void chan_resume_cb(struct l2cap_chan *chan) +{ + struct lowpan_btle_dev *dev; + + BT_DBG("chan %p resume", chan); + + dev = lookup_dev(chan->conn); + if (!dev || !dev->netdev) + return; + + netif_wake_queue(dev->netdev); +} + +static long chan_get_sndtimeo_cb(struct l2cap_chan *chan) +{ + return L2CAP_CONN_TIMEOUT; +} + +static const struct l2cap_ops bt_6lowpan_chan_ops = { + .name = "L2CAP 6LoWPAN channel", + .new_connection = chan_new_conn_cb, + .recv = chan_recv_cb, + .close = chan_close_cb, + .state_change = chan_state_change_cb, + .ready = chan_ready_cb, + .resume = chan_resume_cb, + .suspend = chan_suspend_cb, + .get_sndtimeo = chan_get_sndtimeo_cb, + .alloc_skb = chan_alloc_skb_cb, + + .teardown = l2cap_chan_no_teardown, + .defer = l2cap_chan_no_defer, + .set_shutdown = l2cap_chan_no_set_shutdown, +}; + +static int bt_6lowpan_connect(bdaddr_t *addr, u8 dst_type) +{ + struct l2cap_chan *chan; + int err; + + chan = chan_create(); + if (!chan) + return -EINVAL; + + chan->ops = &bt_6lowpan_chan_ops; + + err = l2cap_chan_connect(chan, cpu_to_le16(L2CAP_PSM_IPSP), 0, + addr, dst_type); + + BT_DBG("chan %p err %d", chan, err); + if (err < 0) + l2cap_chan_put(chan); + + return err; +} + +static int bt_6lowpan_disconnect(struct l2cap_conn *conn, u8 dst_type) +{ + struct lowpan_peer *peer; + + BT_DBG("conn %p dst type %u", conn, dst_type); + + peer = lookup_peer(conn); + if (!peer) + return -ENOENT; + + BT_DBG("peer %p chan %p", peer, peer->chan); + + l2cap_chan_close(peer->chan, ENOENT); + + return 0; +} + +static struct l2cap_chan *bt_6lowpan_listen(void) +{ + bdaddr_t *addr = BDADDR_ANY; + struct l2cap_chan *chan; + int err; + + if (!enable_6lowpan) + return NULL; + + chan = chan_create(); + if (!chan) + return NULL; + + chan->ops = &bt_6lowpan_chan_ops; + chan->state = BT_LISTEN; + chan->src_type = BDADDR_LE_PUBLIC; + + atomic_set(&chan->nesting, L2CAP_NESTING_PARENT); + + BT_DBG("chan %p src type %u", chan, chan->src_type); + + err = l2cap_add_psm(chan, addr, cpu_to_le16(L2CAP_PSM_IPSP)); + if (err) { + l2cap_chan_put(chan); + BT_ERR("psm cannot be added err %d", err); + return NULL; + } + + return chan; +} + +static int get_l2cap_conn(char *buf, bdaddr_t *addr, u8 *addr_type, + struct l2cap_conn **conn) +{ + struct hci_conn *hcon; + struct hci_dev *hdev; + int n; + + n = sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx %hhu", + &addr->b[5], &addr->b[4], &addr->b[3], + &addr->b[2], &addr->b[1], &addr->b[0], + addr_type); + + if (n < 7) + return -EINVAL; + + /* The LE_PUBLIC address type is ignored because of BDADDR_ANY */ + hdev = hci_get_route(addr, BDADDR_ANY, BDADDR_LE_PUBLIC); + if (!hdev) + return -ENOENT; + + hci_dev_lock(hdev); + hcon = hci_conn_hash_lookup_le(hdev, addr, *addr_type); + hci_dev_unlock(hdev); + hci_dev_put(hdev); + + if (!hcon) + return -ENOENT; + + *conn = (struct l2cap_conn *)hcon->l2cap_data; + + BT_DBG("conn %p dst %pMR type %u", *conn, &hcon->dst, hcon->dst_type); + + return 0; +} + +static void disconnect_all_peers(void) +{ + struct lowpan_btle_dev *entry; + struct lowpan_peer *peer, *tmp_peer, *new_peer; + struct list_head peers; + + INIT_LIST_HEAD(&peers); + + /* We make a separate list of peers as the close_cb() will + * modify the device peers list so it is better not to mess + * with the same list at the same time. + */ + + rcu_read_lock(); + + list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) { + list_for_each_entry_rcu(peer, &entry->peers, list) { + new_peer = kmalloc(sizeof(*new_peer), GFP_ATOMIC); + if (!new_peer) + break; + + new_peer->chan = peer->chan; + INIT_LIST_HEAD(&new_peer->list); + + list_add(&new_peer->list, &peers); + } + } + + rcu_read_unlock(); + + spin_lock(&devices_lock); + list_for_each_entry_safe(peer, tmp_peer, &peers, list) { + l2cap_chan_close(peer->chan, ENOENT); + + list_del_rcu(&peer->list); + kfree_rcu(peer, rcu); + } + spin_unlock(&devices_lock); +} + +struct set_enable { + struct work_struct work; + bool flag; +}; + +static void do_enable_set(struct work_struct *work) +{ + struct set_enable *set_enable = container_of(work, + struct set_enable, work); + + if (!set_enable->flag || enable_6lowpan != set_enable->flag) + /* Disconnect existing connections if 6lowpan is + * disabled + */ + disconnect_all_peers(); + + enable_6lowpan = set_enable->flag; + + mutex_lock(&set_lock); + if (listen_chan) { + l2cap_chan_close(listen_chan, 0); + l2cap_chan_put(listen_chan); + } + + listen_chan = bt_6lowpan_listen(); + mutex_unlock(&set_lock); + + kfree(set_enable); +} + +static int lowpan_enable_set(void *data, u64 val) +{ + struct set_enable *set_enable; + + set_enable = kzalloc(sizeof(*set_enable), GFP_KERNEL); + if (!set_enable) + return -ENOMEM; + + set_enable->flag = !!val; + INIT_WORK(&set_enable->work, do_enable_set); + + schedule_work(&set_enable->work); + + return 0; +} + +static int lowpan_enable_get(void *data, u64 *val) +{ + *val = enable_6lowpan; + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(lowpan_enable_fops, lowpan_enable_get, + lowpan_enable_set, "%llu\n"); + +static ssize_t lowpan_control_write(struct file *fp, + const char __user *user_buffer, + size_t count, + loff_t *position) +{ + char buf[32]; + size_t buf_size = min(count, sizeof(buf) - 1); + int ret; + bdaddr_t addr; + u8 addr_type; + struct l2cap_conn *conn = NULL; + + if (copy_from_user(buf, user_buffer, buf_size)) + return -EFAULT; + + buf[buf_size] = '\0'; + + if (memcmp(buf, "connect ", 8) == 0) { + ret = get_l2cap_conn(&buf[8], &addr, &addr_type, &conn); + if (ret == -EINVAL) + return ret; + + mutex_lock(&set_lock); + if (listen_chan) { + l2cap_chan_close(listen_chan, 0); + l2cap_chan_put(listen_chan); + listen_chan = NULL; + } + mutex_unlock(&set_lock); + + if (conn) { + struct lowpan_peer *peer; + + if (!is_bt_6lowpan(conn->hcon)) + return -EINVAL; + + peer = lookup_peer(conn); + if (peer) { + BT_DBG("6LoWPAN connection already exists"); + return -EALREADY; + } + + BT_DBG("conn %p dst %pMR type %d user %u", conn, + &conn->hcon->dst, conn->hcon->dst_type, + addr_type); + } + + ret = bt_6lowpan_connect(&addr, addr_type); + if (ret < 0) + return ret; + + return count; + } + + if (memcmp(buf, "disconnect ", 11) == 0) { + ret = get_l2cap_conn(&buf[11], &addr, &addr_type, &conn); + if (ret < 0) + return ret; + + ret = bt_6lowpan_disconnect(conn, addr_type); + if (ret < 0) + return ret; + + return count; + } + + return count; +} + +static int lowpan_control_show(struct seq_file *f, void *ptr) +{ + struct lowpan_btle_dev *entry; + struct lowpan_peer *peer; + + spin_lock(&devices_lock); + + list_for_each_entry(entry, &bt_6lowpan_devices, list) { + list_for_each_entry(peer, &entry->peers, list) + seq_printf(f, "%pMR (type %u)\n", + &peer->chan->dst, peer->chan->dst_type); + } + + spin_unlock(&devices_lock); + + return 0; +} + +static int lowpan_control_open(struct inode *inode, struct file *file) +{ + return single_open(file, lowpan_control_show, inode->i_private); +} + +static const struct file_operations lowpan_control_fops = { + .open = lowpan_control_open, + .read = seq_read, + .write = lowpan_control_write, + .llseek = seq_lseek, + .release = single_release, +}; + +static void disconnect_devices(void) +{ + struct lowpan_btle_dev *entry, *tmp, *new_dev; + struct list_head devices; + + INIT_LIST_HEAD(&devices); + + /* We make a separate list of devices because the unregister_netdev() + * will call device_event() which will also want to modify the same + * devices list. + */ + + rcu_read_lock(); + + list_for_each_entry_rcu(entry, &bt_6lowpan_devices, list) { + new_dev = kmalloc(sizeof(*new_dev), GFP_ATOMIC); + if (!new_dev) + break; + + new_dev->netdev = entry->netdev; + INIT_LIST_HEAD(&new_dev->list); + + list_add_rcu(&new_dev->list, &devices); + } + + rcu_read_unlock(); + + list_for_each_entry_safe(entry, tmp, &devices, list) { + ifdown(entry->netdev); + BT_DBG("Unregistering netdev %s %p", + entry->netdev->name, entry->netdev); + lowpan_unregister_netdev(entry->netdev); + kfree(entry); + } +} + +static int device_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *netdev = netdev_notifier_info_to_dev(ptr); + struct lowpan_btle_dev *entry; + + if (netdev->type != ARPHRD_6LOWPAN) + return NOTIFY_DONE; + + switch (event) { + case NETDEV_UNREGISTER: + spin_lock(&devices_lock); + list_for_each_entry(entry, &bt_6lowpan_devices, list) { + if (entry->netdev == netdev) { + BT_DBG("Unregistered netdev %s %p", + netdev->name, netdev); + list_del(&entry->list); + break; + } + } + spin_unlock(&devices_lock); + break; + } + + return NOTIFY_DONE; +} + +static struct notifier_block bt_6lowpan_dev_notifier = { + .notifier_call = device_event, +}; + +static int __init bt_6lowpan_init(void) +{ + lowpan_enable_debugfs = debugfs_create_file_unsafe("6lowpan_enable", + 0644, bt_debugfs, + NULL, + &lowpan_enable_fops); + lowpan_control_debugfs = debugfs_create_file("6lowpan_control", 0644, + bt_debugfs, NULL, + &lowpan_control_fops); + + return register_netdevice_notifier(&bt_6lowpan_dev_notifier); +} + +static void __exit bt_6lowpan_exit(void) +{ + debugfs_remove(lowpan_enable_debugfs); + debugfs_remove(lowpan_control_debugfs); + + if (listen_chan) { + l2cap_chan_close(listen_chan, 0); + l2cap_chan_put(listen_chan); + } + + disconnect_devices(); + + unregister_netdevice_notifier(&bt_6lowpan_dev_notifier); +} + +module_init(bt_6lowpan_init); +module_exit(bt_6lowpan_exit); + +MODULE_AUTHOR("Jukka Rissanen <jukka.rissanen@linux.intel.com>"); +MODULE_DESCRIPTION("Bluetooth 6LoWPAN"); +MODULE_VERSION(VERSION); +MODULE_LICENSE("GPL"); diff --git a/net/bluetooth/Kconfig b/net/bluetooth/Kconfig new file mode 100644 index 000000000..ae3bdc6df --- /dev/null +++ b/net/bluetooth/Kconfig @@ -0,0 +1,152 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Bluetooth subsystem configuration +# + +menuconfig BT + tristate "Bluetooth subsystem support" + depends on !S390 + depends on RFKILL || !RFKILL + select CRC16 + select CRYPTO + select CRYPTO_SKCIPHER + select CRYPTO_LIB_AES + imply CRYPTO_AES + select CRYPTO_CMAC + select CRYPTO_ECB + select CRYPTO_SHA256 + select CRYPTO_ECDH + help + Bluetooth is low-cost, low-power, short-range wireless technology. + It was designed as a replacement for cables and other short-range + technologies like IrDA. Bluetooth operates in personal area range + that typically extends up to 10 meters. More information about + Bluetooth can be found at <https://www.bluetooth.com/>. + + Linux Bluetooth subsystem consist of several layers: + Bluetooth Core + HCI device and connection manager, scheduler + SCO audio links + L2CAP (Logical Link Control and Adaptation Protocol) + SMP (Security Manager Protocol) on LE (Low Energy) links + ISO isochronous links + HCI Device drivers (Interface to the hardware) + RFCOMM Module (RFCOMM Protocol) + BNEP Module (Bluetooth Network Encapsulation Protocol) + CMTP Module (CAPI Message Transport Protocol) + HIDP Module (Human Interface Device Protocol) + + Say Y here to compile Bluetooth support into the kernel or say M to + compile it as module (bluetooth). + + To use Linux Bluetooth subsystem, you will need several user-space + utilities like hciconfig and bluetoothd. These utilities and updates + to Bluetooth kernel modules are provided in the BlueZ packages. For + more information, see <http://www.bluez.org/>. + +config BT_BREDR + bool "Bluetooth Classic (BR/EDR) features" + depends on BT + default y + help + Bluetooth Classic includes support for Basic Rate (BR) + available with Bluetooth version 1.0b or later and support + for Enhanced Data Rate (EDR) available with Bluetooth + version 2.0 or later. + +source "net/bluetooth/rfcomm/Kconfig" + +source "net/bluetooth/bnep/Kconfig" + +source "net/bluetooth/cmtp/Kconfig" + +source "net/bluetooth/hidp/Kconfig" + +config BT_HS + bool "Bluetooth High Speed (HS) features" + depends on BT_BREDR + help + Bluetooth High Speed includes support for off-loading + Bluetooth connections via 802.11 (wifi) physical layer + available with Bluetooth version 3.0 or later. + +config BT_LE + bool "Bluetooth Low Energy (LE) features" + depends on BT + default y + help + Bluetooth Low Energy includes support low-energy physical + layer available with Bluetooth version 4.0 or later. + +config BT_6LOWPAN + tristate "Bluetooth 6LoWPAN support" + depends on BT_LE && 6LOWPAN + help + IPv6 compression over Bluetooth Low Energy. + +config BT_LEDS + bool "Enable LED triggers" + depends on BT + depends on LEDS_CLASS + select LEDS_TRIGGERS + help + This option selects a few LED triggers for different + Bluetooth events. + +config BT_MSFTEXT + bool "Enable Microsoft extensions" + depends on BT + help + This options enables support for the Microsoft defined HCI + vendor extensions. + +config BT_AOSPEXT + bool "Enable Android Open Source Project extensions" + depends on BT + help + This options enables support for the Android Open Source + Project defined HCI vendor extensions. + +config BT_DEBUGFS + bool "Export Bluetooth internals in debugfs" + depends on BT && DEBUG_FS + default y + help + Provide extensive information about internal Bluetooth states + in debugfs. + +config BT_SELFTEST + bool "Bluetooth self testing support" + depends on BT && DEBUG_KERNEL + help + Run self tests when initializing the Bluetooth subsystem. This + is a developer option and can cause significant delay when booting + the system. + + When the Bluetooth subsystem is built as module, then the test + cases are run first thing at module load time. When the Bluetooth + subsystem is compiled into the kernel image, then the test cases + are run late in the initcall hierarchy. + +config BT_SELFTEST_ECDH + bool "ECDH test cases" + depends on BT_LE && BT_SELFTEST + help + Run test cases for ECDH cryptographic functionality used by the + Bluetooth Low Energy Secure Connections feature. + +config BT_SELFTEST_SMP + bool "SMP test cases" + depends on BT_LE && BT_SELFTEST + help + Run test cases for SMP cryptographic functionality, including both + legacy SMP as well as the Secure Connections features. + +config BT_FEATURE_DEBUG + bool "Enable runtime option for debugging statements" + depends on BT && !DYNAMIC_DEBUG + help + This provides an option to enable/disable debugging statements + at runtime via the experimental features interface. + +source "drivers/bluetooth/Kconfig" diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile new file mode 100644 index 000000000..0e7b7db42 --- /dev/null +++ b/net/bluetooth/Makefile @@ -0,0 +1,27 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for the Linux Bluetooth subsystem. +# + +obj-$(CONFIG_BT) += bluetooth.o +obj-$(CONFIG_BT_RFCOMM) += rfcomm/ +obj-$(CONFIG_BT_BNEP) += bnep/ +obj-$(CONFIG_BT_CMTP) += cmtp/ +obj-$(CONFIG_BT_HIDP) += hidp/ +obj-$(CONFIG_BT_6LOWPAN) += bluetooth_6lowpan.o + +bluetooth_6lowpan-y := 6lowpan.o + +bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \ + hci_sock.o hci_sysfs.o l2cap_core.o l2cap_sock.o smp.o lib.o \ + ecdh_helper.o hci_request.o mgmt_util.o mgmt_config.o hci_codec.o \ + eir.o hci_sync.o + +bluetooth-$(CONFIG_BT_BREDR) += sco.o +bluetooth-$(CONFIG_BT_LE) += iso.o +bluetooth-$(CONFIG_BT_HS) += a2mp.o amp.o +bluetooth-$(CONFIG_BT_LEDS) += leds.o +bluetooth-$(CONFIG_BT_MSFTEXT) += msft.o +bluetooth-$(CONFIG_BT_AOSPEXT) += aosp.o +bluetooth-$(CONFIG_BT_DEBUGFS) += hci_debugfs.o +bluetooth-$(CONFIG_BT_SELFTEST) += selftest.o diff --git a/net/bluetooth/a2mp.c b/net/bluetooth/a2mp.c new file mode 100644 index 000000000..e7adb8a98 --- /dev/null +++ b/net/bluetooth/a2mp.c @@ -0,0 +1,1054 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + Copyright (c) 2010,2011 Code Aurora Forum. All rights reserved. + Copyright (c) 2011,2012 Intel Corp. + +*/ + +#include <net/bluetooth/bluetooth.h> +#include <net/bluetooth/hci_core.h> +#include <net/bluetooth/l2cap.h> + +#include "hci_request.h" +#include "a2mp.h" +#include "amp.h" + +#define A2MP_FEAT_EXT 0x8000 + +/* Global AMP Manager list */ +static LIST_HEAD(amp_mgr_list); +static DEFINE_MUTEX(amp_mgr_list_lock); + +/* A2MP build & send command helper functions */ +static struct a2mp_cmd *__a2mp_build(u8 code, u8 ident, u16 len, void *data) +{ + struct a2mp_cmd *cmd; + int plen; + + plen = sizeof(*cmd) + len; + cmd = kzalloc(plen, GFP_KERNEL); + if (!cmd) + return NULL; + + cmd->code = code; + cmd->ident = ident; + cmd->len = cpu_to_le16(len); + + memcpy(cmd->data, data, len); + + return cmd; +} + +static void a2mp_send(struct amp_mgr *mgr, u8 code, u8 ident, u16 len, void *data) +{ + struct l2cap_chan *chan = mgr->a2mp_chan; + struct a2mp_cmd *cmd; + u16 total_len = len + sizeof(*cmd); + struct kvec iv; + struct msghdr msg; + + cmd = __a2mp_build(code, ident, len, data); + if (!cmd) + return; + + iv.iov_base = cmd; + iv.iov_len = total_len; + + memset(&msg, 0, sizeof(msg)); + + iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, &iv, 1, total_len); + + l2cap_chan_send(chan, &msg, total_len); + + kfree(cmd); +} + +static u8 __next_ident(struct amp_mgr *mgr) +{ + if (++mgr->ident == 0) + mgr->ident = 1; + + return mgr->ident; +} + +static struct amp_mgr *amp_mgr_lookup_by_state(u8 state) +{ + struct amp_mgr *mgr; + + mutex_lock(&_mgr_list_lock); + list_for_each_entry(mgr, &_mgr_list, list) { + if (test_and_clear_bit(state, &mgr->state)) { + amp_mgr_get(mgr); + mutex_unlock(&_mgr_list_lock); + return mgr; + } + } + mutex_unlock(&_mgr_list_lock); + + return NULL; +} + +/* hci_dev_list shall be locked */ +static void __a2mp_add_cl(struct amp_mgr *mgr, struct a2mp_cl *cl) +{ + struct hci_dev *hdev; + int i = 1; + + cl[0].id = AMP_ID_BREDR; + cl[0].type = AMP_TYPE_BREDR; + cl[0].status = AMP_STATUS_BLUETOOTH_ONLY; + + list_for_each_entry(hdev, &hci_dev_list, list) { + if (hdev->dev_type == HCI_AMP) { + cl[i].id = hdev->id; + cl[i].type = hdev->amp_type; + if (test_bit(HCI_UP, &hdev->flags)) + cl[i].status = hdev->amp_status; + else + cl[i].status = AMP_STATUS_POWERED_DOWN; + i++; + } + } +} + +/* Processing A2MP messages */ +static int a2mp_command_rej(struct amp_mgr *mgr, struct sk_buff *skb, + struct a2mp_cmd *hdr) +{ + struct a2mp_cmd_rej *rej = (void *) skb->data; + + if (le16_to_cpu(hdr->len) < sizeof(*rej)) + return -EINVAL; + + BT_DBG("ident %u reason %d", hdr->ident, le16_to_cpu(rej->reason)); + + skb_pull(skb, sizeof(*rej)); + + return 0; +} + +static int a2mp_discover_req(struct amp_mgr *mgr, struct sk_buff *skb, + struct a2mp_cmd *hdr) +{ + struct a2mp_discov_req *req = (void *) skb->data; + u16 len = le16_to_cpu(hdr->len); + struct a2mp_discov_rsp *rsp; + u16 ext_feat; + u8 num_ctrl; + struct hci_dev *hdev; + + if (len < sizeof(*req)) + return -EINVAL; + + skb_pull(skb, sizeof(*req)); + + ext_feat = le16_to_cpu(req->ext_feat); + + BT_DBG("mtu %d efm 0x%4.4x", le16_to_cpu(req->mtu), ext_feat); + + /* check that packet is not broken for now */ + while (ext_feat & A2MP_FEAT_EXT) { + if (len < sizeof(ext_feat)) + return -EINVAL; + + ext_feat = get_unaligned_le16(skb->data); + BT_DBG("efm 0x%4.4x", ext_feat); + len -= sizeof(ext_feat); + skb_pull(skb, sizeof(ext_feat)); + } + + read_lock(&hci_dev_list_lock); + + /* at minimum the BR/EDR needs to be listed */ + num_ctrl = 1; + + list_for_each_entry(hdev, &hci_dev_list, list) { + if (hdev->dev_type == HCI_AMP) + num_ctrl++; + } + + len = struct_size(rsp, cl, num_ctrl); + rsp = kmalloc(len, GFP_ATOMIC); + if (!rsp) { + read_unlock(&hci_dev_list_lock); + return -ENOMEM; + } + + rsp->mtu = cpu_to_le16(L2CAP_A2MP_DEFAULT_MTU); + rsp->ext_feat = 0; + + __a2mp_add_cl(mgr, rsp->cl); + + read_unlock(&hci_dev_list_lock); + + a2mp_send(mgr, A2MP_DISCOVER_RSP, hdr->ident, len, rsp); + + kfree(rsp); + return 0; +} + +static int a2mp_discover_rsp(struct amp_mgr *mgr, struct sk_buff *skb, + struct a2mp_cmd *hdr) +{ + struct a2mp_discov_rsp *rsp = (void *) skb->data; + u16 len = le16_to_cpu(hdr->len); + struct a2mp_cl *cl; + u16 ext_feat; + bool found = false; + + if (len < sizeof(*rsp)) + return -EINVAL; + + len -= sizeof(*rsp); + skb_pull(skb, sizeof(*rsp)); + + ext_feat = le16_to_cpu(rsp->ext_feat); + + BT_DBG("mtu %d efm 0x%4.4x", le16_to_cpu(rsp->mtu), ext_feat); + + /* check that packet is not broken for now */ + while (ext_feat & A2MP_FEAT_EXT) { + if (len < sizeof(ext_feat)) + return -EINVAL; + + ext_feat = get_unaligned_le16(skb->data); + BT_DBG("efm 0x%4.4x", ext_feat); + len -= sizeof(ext_feat); + skb_pull(skb, sizeof(ext_feat)); + } + + cl = (void *) skb->data; + while (len >= sizeof(*cl)) { + BT_DBG("Remote AMP id %u type %u status %u", cl->id, cl->type, + cl->status); + + if (cl->id != AMP_ID_BREDR && cl->type != AMP_TYPE_BREDR) { + struct a2mp_info_req req; + + found = true; + + memset(&req, 0, sizeof(req)); + + req.id = cl->id; + a2mp_send(mgr, A2MP_GETINFO_REQ, __next_ident(mgr), + sizeof(req), &req); + } + + len -= sizeof(*cl); + cl = skb_pull(skb, sizeof(*cl)); + } + + /* Fall back to L2CAP init sequence */ + if (!found) { + struct l2cap_conn *conn = mgr->l2cap_conn; + struct l2cap_chan *chan; + + mutex_lock(&conn->chan_lock); + + list_for_each_entry(chan, &conn->chan_l, list) { + + BT_DBG("chan %p state %s", chan, + state_to_string(chan->state)); + + if (chan->scid == L2CAP_CID_A2MP) + continue; + + l2cap_chan_lock(chan); + + if (chan->state == BT_CONNECT) + l2cap_send_conn_req(chan); + + l2cap_chan_unlock(chan); + } + + mutex_unlock(&conn->chan_lock); + } + + return 0; +} + +static int a2mp_change_notify(struct amp_mgr *mgr, struct sk_buff *skb, + struct a2mp_cmd *hdr) +{ + struct a2mp_cl *cl = (void *) skb->data; + + while (skb->len >= sizeof(*cl)) { + BT_DBG("Controller id %u type %u status %u", cl->id, cl->type, + cl->status); + cl = skb_pull(skb, sizeof(*cl)); + } + + /* TODO send A2MP_CHANGE_RSP */ + + return 0; +} + +static void read_local_amp_info_complete(struct hci_dev *hdev, u8 status, + u16 opcode) +{ + BT_DBG("%s status 0x%2.2x", hdev->name, status); + + a2mp_send_getinfo_rsp(hdev); +} + +static int a2mp_getinfo_req(struct amp_mgr *mgr, struct sk_buff *skb, + struct a2mp_cmd *hdr) +{ + struct a2mp_info_req *req = (void *) skb->data; + struct hci_dev *hdev; + struct hci_request hreq; + int err = 0; + + if (le16_to_cpu(hdr->len) < sizeof(*req)) + return -EINVAL; + + BT_DBG("id %u", req->id); + + hdev = hci_dev_get(req->id); + if (!hdev || hdev->dev_type != HCI_AMP) { + struct a2mp_info_rsp rsp; + + memset(&rsp, 0, sizeof(rsp)); + + rsp.id = req->id; + rsp.status = A2MP_STATUS_INVALID_CTRL_ID; + + a2mp_send(mgr, A2MP_GETINFO_RSP, hdr->ident, sizeof(rsp), + &rsp); + + goto done; + } + + set_bit(READ_LOC_AMP_INFO, &mgr->state); + hci_req_init(&hreq, hdev); + hci_req_add(&hreq, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL); + err = hci_req_run(&hreq, read_local_amp_info_complete); + if (err < 0) + a2mp_send_getinfo_rsp(hdev); + +done: + if (hdev) + hci_dev_put(hdev); + + skb_pull(skb, sizeof(*req)); + return 0; +} + +static int a2mp_getinfo_rsp(struct amp_mgr *mgr, struct sk_buff *skb, + struct a2mp_cmd *hdr) +{ + struct a2mp_info_rsp *rsp = (struct a2mp_info_rsp *) skb->data; + struct a2mp_amp_assoc_req req; + struct amp_ctrl *ctrl; + + if (le16_to_cpu(hdr->len) < sizeof(*rsp)) + return -EINVAL; + + BT_DBG("id %u status 0x%2.2x", rsp->id, rsp->status); + + if (rsp->status) + return -EINVAL; + + ctrl = amp_ctrl_add(mgr, rsp->id); + if (!ctrl) + return -ENOMEM; + + memset(&req, 0, sizeof(req)); + + req.id = rsp->id; + a2mp_send(mgr, A2MP_GETAMPASSOC_REQ, __next_ident(mgr), sizeof(req), + &req); + + skb_pull(skb, sizeof(*rsp)); + return 0; +} + +static int a2mp_getampassoc_req(struct amp_mgr *mgr, struct sk_buff *skb, + struct a2mp_cmd *hdr) +{ + struct a2mp_amp_assoc_req *req = (void *) skb->data; + struct hci_dev *hdev; + struct amp_mgr *tmp; + + if (le16_to_cpu(hdr->len) < sizeof(*req)) + return -EINVAL; + + BT_DBG("id %u", req->id); + + /* Make sure that other request is not processed */ + tmp = amp_mgr_lookup_by_state(READ_LOC_AMP_ASSOC); + + hdev = hci_dev_get(req->id); + if (!hdev || hdev->amp_type == AMP_TYPE_BREDR || tmp) { + struct a2mp_amp_assoc_rsp rsp; + + memset(&rsp, 0, sizeof(rsp)); + rsp.id = req->id; + + if (tmp) { + rsp.status = A2MP_STATUS_COLLISION_OCCURED; + amp_mgr_put(tmp); + } else { + rsp.status = A2MP_STATUS_INVALID_CTRL_ID; + } + + a2mp_send(mgr, A2MP_GETAMPASSOC_RSP, hdr->ident, sizeof(rsp), + &rsp); + + goto done; + } + + amp_read_loc_assoc(hdev, mgr); + +done: + if (hdev) + hci_dev_put(hdev); + + skb_pull(skb, sizeof(*req)); + return 0; +} + +static int a2mp_getampassoc_rsp(struct amp_mgr *mgr, struct sk_buff *skb, + struct a2mp_cmd *hdr) +{ + struct a2mp_amp_assoc_rsp *rsp = (void *) skb->data; + u16 len = le16_to_cpu(hdr->len); + struct hci_dev *hdev; + struct amp_ctrl *ctrl; + struct hci_conn *hcon; + size_t assoc_len; + + if (len < sizeof(*rsp)) + return -EINVAL; + + assoc_len = len - sizeof(*rsp); + + BT_DBG("id %u status 0x%2.2x assoc len %zu", rsp->id, rsp->status, + assoc_len); + + if (rsp->status) + return -EINVAL; + + /* Save remote ASSOC data */ + ctrl = amp_ctrl_lookup(mgr, rsp->id); + if (ctrl) { + u8 *assoc; + + assoc = kmemdup(rsp->amp_assoc, assoc_len, GFP_KERNEL); + if (!assoc) { + amp_ctrl_put(ctrl); + return -ENOMEM; + } + + ctrl->assoc = assoc; + ctrl->assoc_len = assoc_len; + ctrl->assoc_rem_len = assoc_len; + ctrl->assoc_len_so_far = 0; + + amp_ctrl_put(ctrl); + } + + /* Create Phys Link */ + hdev = hci_dev_get(rsp->id); + if (!hdev) + return -EINVAL; + + hcon = phylink_add(hdev, mgr, rsp->id, true); + if (!hcon) + goto done; + + BT_DBG("Created hcon %p: loc:%u -> rem:%u", hcon, hdev->id, rsp->id); + + mgr->bredr_chan->remote_amp_id = rsp->id; + + amp_create_phylink(hdev, mgr, hcon); + +done: + hci_dev_put(hdev); + skb_pull(skb, len); + return 0; +} + +static int a2mp_createphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb, + struct a2mp_cmd *hdr) +{ + struct a2mp_physlink_req *req = (void *) skb->data; + struct a2mp_physlink_rsp rsp; + struct hci_dev *hdev; + struct hci_conn *hcon; + struct amp_ctrl *ctrl; + + if (le16_to_cpu(hdr->len) < sizeof(*req)) + return -EINVAL; + + BT_DBG("local_id %u, remote_id %u", req->local_id, req->remote_id); + + memset(&rsp, 0, sizeof(rsp)); + + rsp.local_id = req->remote_id; + rsp.remote_id = req->local_id; + + hdev = hci_dev_get(req->remote_id); + if (!hdev || hdev->amp_type == AMP_TYPE_BREDR) { + rsp.status = A2MP_STATUS_INVALID_CTRL_ID; + goto send_rsp; + } + + ctrl = amp_ctrl_lookup(mgr, rsp.remote_id); + if (!ctrl) { + ctrl = amp_ctrl_add(mgr, rsp.remote_id); + if (ctrl) { + amp_ctrl_get(ctrl); + } else { + rsp.status = A2MP_STATUS_UNABLE_START_LINK_CREATION; + goto send_rsp; + } + } + + if (ctrl) { + size_t assoc_len = le16_to_cpu(hdr->len) - sizeof(*req); + u8 *assoc; + + assoc = kmemdup(req->amp_assoc, assoc_len, GFP_KERNEL); + if (!assoc) { + amp_ctrl_put(ctrl); + hci_dev_put(hdev); + return -ENOMEM; + } + + ctrl->assoc = assoc; + ctrl->assoc_len = assoc_len; + ctrl->assoc_rem_len = assoc_len; + ctrl->assoc_len_so_far = 0; + + amp_ctrl_put(ctrl); + } + + hcon = phylink_add(hdev, mgr, req->local_id, false); + if (hcon) { + amp_accept_phylink(hdev, mgr, hcon); + rsp.status = A2MP_STATUS_SUCCESS; + } else { + rsp.status = A2MP_STATUS_UNABLE_START_LINK_CREATION; + } + +send_rsp: + if (hdev) + hci_dev_put(hdev); + + /* Reply error now and success after HCI Write Remote AMP Assoc + command complete with success status + */ + if (rsp.status != A2MP_STATUS_SUCCESS) { + a2mp_send(mgr, A2MP_CREATEPHYSLINK_RSP, hdr->ident, + sizeof(rsp), &rsp); + } else { + set_bit(WRITE_REMOTE_AMP_ASSOC, &mgr->state); + mgr->ident = hdr->ident; + } + + skb_pull(skb, le16_to_cpu(hdr->len)); + return 0; +} + +static int a2mp_discphyslink_req(struct amp_mgr *mgr, struct sk_buff *skb, + struct a2mp_cmd *hdr) +{ + struct a2mp_physlink_req *req = (void *) skb->data; + struct a2mp_physlink_rsp rsp; + struct hci_dev *hdev; + struct hci_conn *hcon; + + if (le16_to_cpu(hdr->len) < sizeof(*req)) + return -EINVAL; + + BT_DBG("local_id %u remote_id %u", req->local_id, req->remote_id); + + memset(&rsp, 0, sizeof(rsp)); + + rsp.local_id = req->remote_id; + rsp.remote_id = req->local_id; + rsp.status = A2MP_STATUS_SUCCESS; + + hdev = hci_dev_get(req->remote_id); + if (!hdev) { + rsp.status = A2MP_STATUS_INVALID_CTRL_ID; + goto send_rsp; + } + + hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK, + &mgr->l2cap_conn->hcon->dst); + if (!hcon) { + bt_dev_err(hdev, "no phys link exist"); + rsp.status = A2MP_STATUS_NO_PHYSICAL_LINK_EXISTS; + goto clean; + } + + /* TODO Disconnect Phys Link here */ + +clean: + hci_dev_put(hdev); + +send_rsp: + a2mp_send(mgr, A2MP_DISCONNPHYSLINK_RSP, hdr->ident, sizeof(rsp), &rsp); + + skb_pull(skb, sizeof(*req)); + return 0; +} + +static inline int a2mp_cmd_rsp(struct amp_mgr *mgr, struct sk_buff *skb, + struct a2mp_cmd *hdr) +{ + BT_DBG("ident %u code 0x%2.2x", hdr->ident, hdr->code); + + skb_pull(skb, le16_to_cpu(hdr->len)); + return 0; +} + +/* Handle A2MP signalling */ +static int a2mp_chan_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb) +{ + struct a2mp_cmd *hdr; + struct amp_mgr *mgr = chan->data; + int err = 0; + + amp_mgr_get(mgr); + + while (skb->len >= sizeof(*hdr)) { + u16 len; + + hdr = (void *) skb->data; + len = le16_to_cpu(hdr->len); + + BT_DBG("code 0x%2.2x id %u len %u", hdr->code, hdr->ident, len); + + skb_pull(skb, sizeof(*hdr)); + + if (len > skb->len || !hdr->ident) { + err = -EINVAL; + break; + } + + mgr->ident = hdr->ident; + + switch (hdr->code) { + case A2MP_COMMAND_REJ: + a2mp_command_rej(mgr, skb, hdr); + break; + + case A2MP_DISCOVER_REQ: + err = a2mp_discover_req(mgr, skb, hdr); + break; + + case A2MP_CHANGE_NOTIFY: + err = a2mp_change_notify(mgr, skb, hdr); + break; + + case A2MP_GETINFO_REQ: + err = a2mp_getinfo_req(mgr, skb, hdr); + break; + + case A2MP_GETAMPASSOC_REQ: + err = a2mp_getampassoc_req(mgr, skb, hdr); + break; + + case A2MP_CREATEPHYSLINK_REQ: + err = a2mp_createphyslink_req(mgr, skb, hdr); + break; + + case A2MP_DISCONNPHYSLINK_REQ: + err = a2mp_discphyslink_req(mgr, skb, hdr); + break; + + case A2MP_DISCOVER_RSP: + err = a2mp_discover_rsp(mgr, skb, hdr); + break; + + case A2MP_GETINFO_RSP: + err = a2mp_getinfo_rsp(mgr, skb, hdr); + break; + + case A2MP_GETAMPASSOC_RSP: + err = a2mp_getampassoc_rsp(mgr, skb, hdr); + break; + + case A2MP_CHANGE_RSP: + case A2MP_CREATEPHYSLINK_RSP: + case A2MP_DISCONNPHYSLINK_RSP: + err = a2mp_cmd_rsp(mgr, skb, hdr); + break; + + default: + BT_ERR("Unknown A2MP sig cmd 0x%2.2x", hdr->code); + err = -EINVAL; + break; + } + } + + if (err) { + struct a2mp_cmd_rej rej; + + memset(&rej, 0, sizeof(rej)); + + rej.reason = cpu_to_le16(0); + hdr = (void *) skb->data; + + BT_DBG("Send A2MP Rej: cmd 0x%2.2x err %d", hdr->code, err); + + a2mp_send(mgr, A2MP_COMMAND_REJ, hdr->ident, sizeof(rej), + &rej); + } + + /* Always free skb and return success error code to prevent + from sending L2CAP Disconnect over A2MP channel */ + kfree_skb(skb); + + amp_mgr_put(mgr); + + return 0; +} + +static void a2mp_chan_close_cb(struct l2cap_chan *chan) +{ + l2cap_chan_put(chan); +} + +static void a2mp_chan_state_change_cb(struct l2cap_chan *chan, int state, + int err) +{ + struct amp_mgr *mgr = chan->data; + + if (!mgr) + return; + + BT_DBG("chan %p state %s", chan, state_to_string(state)); + + chan->state = state; + + switch (state) { + case BT_CLOSED: + if (mgr) + amp_mgr_put(mgr); + break; + } +} + +static struct sk_buff *a2mp_chan_alloc_skb_cb(struct l2cap_chan *chan, + unsigned long hdr_len, + unsigned long len, int nb) +{ + struct sk_buff *skb; + + skb = bt_skb_alloc(hdr_len + len, GFP_KERNEL); + if (!skb) + return ERR_PTR(-ENOMEM); + + return skb; +} + +static const struct l2cap_ops a2mp_chan_ops = { + .name = "L2CAP A2MP channel", + .recv = a2mp_chan_recv_cb, + .close = a2mp_chan_close_cb, + .state_change = a2mp_chan_state_change_cb, + .alloc_skb = a2mp_chan_alloc_skb_cb, + + /* Not implemented for A2MP */ + .new_connection = l2cap_chan_no_new_connection, + .teardown = l2cap_chan_no_teardown, + .ready = l2cap_chan_no_ready, + .defer = l2cap_chan_no_defer, + .resume = l2cap_chan_no_resume, + .set_shutdown = l2cap_chan_no_set_shutdown, + .get_sndtimeo = l2cap_chan_no_get_sndtimeo, +}; + +static struct l2cap_chan *a2mp_chan_open(struct l2cap_conn *conn, bool locked) +{ + struct l2cap_chan *chan; + int err; + + chan = l2cap_chan_create(); + if (!chan) + return NULL; + + BT_DBG("chan %p", chan); + + chan->chan_type = L2CAP_CHAN_FIXED; + chan->scid = L2CAP_CID_A2MP; + chan->dcid = L2CAP_CID_A2MP; + chan->omtu = L2CAP_A2MP_DEFAULT_MTU; + chan->imtu = L2CAP_A2MP_DEFAULT_MTU; + chan->flush_to = L2CAP_DEFAULT_FLUSH_TO; + + chan->ops = &a2mp_chan_ops; + + l2cap_chan_set_defaults(chan); + chan->remote_max_tx = chan->max_tx; + chan->remote_tx_win = chan->tx_win; + + chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO; + chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO; + + skb_queue_head_init(&chan->tx_q); + + chan->mode = L2CAP_MODE_ERTM; + + err = l2cap_ertm_init(chan); + if (err < 0) { + l2cap_chan_del(chan, 0); + return NULL; + } + + chan->conf_state = 0; + + if (locked) + __l2cap_chan_add(conn, chan); + else + l2cap_chan_add(conn, chan); + + chan->remote_mps = chan->omtu; + chan->mps = chan->omtu; + + chan->state = BT_CONNECTED; + + return chan; +} + +/* AMP Manager functions */ +struct amp_mgr *amp_mgr_get(struct amp_mgr *mgr) +{ + BT_DBG("mgr %p orig refcnt %d", mgr, kref_read(&mgr->kref)); + + kref_get(&mgr->kref); + + return mgr; +} + +static void amp_mgr_destroy(struct kref *kref) +{ + struct amp_mgr *mgr = container_of(kref, struct amp_mgr, kref); + + BT_DBG("mgr %p", mgr); + + mutex_lock(&_mgr_list_lock); + list_del(&mgr->list); + mutex_unlock(&_mgr_list_lock); + + amp_ctrl_list_flush(mgr); + kfree(mgr); +} + +int amp_mgr_put(struct amp_mgr *mgr) +{ + BT_DBG("mgr %p orig refcnt %d", mgr, kref_read(&mgr->kref)); + + return kref_put(&mgr->kref, &_mgr_destroy); +} + +static struct amp_mgr *amp_mgr_create(struct l2cap_conn *conn, bool locked) +{ + struct amp_mgr *mgr; + struct l2cap_chan *chan; + + mgr = kzalloc(sizeof(*mgr), GFP_KERNEL); + if (!mgr) + return NULL; + + BT_DBG("conn %p mgr %p", conn, mgr); + + mgr->l2cap_conn = conn; + + chan = a2mp_chan_open(conn, locked); + if (!chan) { + kfree(mgr); + return NULL; + } + + mgr->a2mp_chan = chan; + chan->data = mgr; + + conn->hcon->amp_mgr = mgr; + + kref_init(&mgr->kref); + + /* Remote AMP ctrl list initialization */ + INIT_LIST_HEAD(&mgr->amp_ctrls); + mutex_init(&mgr->amp_ctrls_lock); + + mutex_lock(&_mgr_list_lock); + list_add(&mgr->list, &_mgr_list); + mutex_unlock(&_mgr_list_lock); + + return mgr; +} + +struct l2cap_chan *a2mp_channel_create(struct l2cap_conn *conn, + struct sk_buff *skb) +{ + struct amp_mgr *mgr; + + if (conn->hcon->type != ACL_LINK) + return NULL; + + mgr = amp_mgr_create(conn, false); + if (!mgr) { + BT_ERR("Could not create AMP manager"); + return NULL; + } + + BT_DBG("mgr: %p chan %p", mgr, mgr->a2mp_chan); + + return mgr->a2mp_chan; +} + +void a2mp_send_getinfo_rsp(struct hci_dev *hdev) +{ + struct amp_mgr *mgr; + struct a2mp_info_rsp rsp; + + mgr = amp_mgr_lookup_by_state(READ_LOC_AMP_INFO); + if (!mgr) + return; + + BT_DBG("%s mgr %p", hdev->name, mgr); + + memset(&rsp, 0, sizeof(rsp)); + + rsp.id = hdev->id; + rsp.status = A2MP_STATUS_INVALID_CTRL_ID; + + if (hdev->amp_type != AMP_TYPE_BREDR) { + rsp.status = 0; + rsp.total_bw = cpu_to_le32(hdev->amp_total_bw); + rsp.max_bw = cpu_to_le32(hdev->amp_max_bw); + rsp.min_latency = cpu_to_le32(hdev->amp_min_latency); + rsp.pal_cap = cpu_to_le16(hdev->amp_pal_cap); + rsp.assoc_size = cpu_to_le16(hdev->amp_assoc_size); + } + + a2mp_send(mgr, A2MP_GETINFO_RSP, mgr->ident, sizeof(rsp), &rsp); + amp_mgr_put(mgr); +} + +void a2mp_send_getampassoc_rsp(struct hci_dev *hdev, u8 status) +{ + struct amp_mgr *mgr; + struct amp_assoc *loc_assoc = &hdev->loc_assoc; + struct a2mp_amp_assoc_rsp *rsp; + size_t len; + + mgr = amp_mgr_lookup_by_state(READ_LOC_AMP_ASSOC); + if (!mgr) + return; + + BT_DBG("%s mgr %p", hdev->name, mgr); + + len = sizeof(struct a2mp_amp_assoc_rsp) + loc_assoc->len; + rsp = kzalloc(len, GFP_KERNEL); + if (!rsp) { + amp_mgr_put(mgr); + return; + } + + rsp->id = hdev->id; + + if (status) { + rsp->status = A2MP_STATUS_INVALID_CTRL_ID; + } else { + rsp->status = A2MP_STATUS_SUCCESS; + memcpy(rsp->amp_assoc, loc_assoc->data, loc_assoc->len); + } + + a2mp_send(mgr, A2MP_GETAMPASSOC_RSP, mgr->ident, len, rsp); + amp_mgr_put(mgr); + kfree(rsp); +} + +void a2mp_send_create_phy_link_req(struct hci_dev *hdev, u8 status) +{ + struct amp_mgr *mgr; + struct amp_assoc *loc_assoc = &hdev->loc_assoc; + struct a2mp_physlink_req *req; + struct l2cap_chan *bredr_chan; + size_t len; + + mgr = amp_mgr_lookup_by_state(READ_LOC_AMP_ASSOC_FINAL); + if (!mgr) + return; + + len = sizeof(*req) + loc_assoc->len; + + BT_DBG("%s mgr %p assoc_len %zu", hdev->name, mgr, len); + + req = kzalloc(len, GFP_KERNEL); + if (!req) { + amp_mgr_put(mgr); + return; + } + + bredr_chan = mgr->bredr_chan; + if (!bredr_chan) + goto clean; + + req->local_id = hdev->id; + req->remote_id = bredr_chan->remote_amp_id; + memcpy(req->amp_assoc, loc_assoc->data, loc_assoc->len); + + a2mp_send(mgr, A2MP_CREATEPHYSLINK_REQ, __next_ident(mgr), len, req); + +clean: + amp_mgr_put(mgr); + kfree(req); +} + +void a2mp_send_create_phy_link_rsp(struct hci_dev *hdev, u8 status) +{ + struct amp_mgr *mgr; + struct a2mp_physlink_rsp rsp; + struct hci_conn *hs_hcon; + + mgr = amp_mgr_lookup_by_state(WRITE_REMOTE_AMP_ASSOC); + if (!mgr) + return; + + memset(&rsp, 0, sizeof(rsp)); + + hs_hcon = hci_conn_hash_lookup_state(hdev, AMP_LINK, BT_CONNECT); + if (!hs_hcon) { + rsp.status = A2MP_STATUS_UNABLE_START_LINK_CREATION; + } else { + rsp.remote_id = hs_hcon->remote_id; + rsp.status = A2MP_STATUS_SUCCESS; + } + + BT_DBG("%s mgr %p hs_hcon %p status %u", hdev->name, mgr, hs_hcon, + status); + + rsp.local_id = hdev->id; + a2mp_send(mgr, A2MP_CREATEPHYSLINK_RSP, mgr->ident, sizeof(rsp), &rsp); + amp_mgr_put(mgr); +} + +void a2mp_discover_amp(struct l2cap_chan *chan) +{ + struct l2cap_conn *conn = chan->conn; + struct amp_mgr *mgr = conn->hcon->amp_mgr; + struct a2mp_discov_req req; + + BT_DBG("chan %p conn %p mgr %p", chan, conn, mgr); + + if (!mgr) { + mgr = amp_mgr_create(conn, true); + if (!mgr) + return; + } + + mgr->bredr_chan = chan; + + memset(&req, 0, sizeof(req)); + + req.mtu = cpu_to_le16(L2CAP_A2MP_DEFAULT_MTU); + req.ext_feat = 0; + a2mp_send(mgr, A2MP_DISCOVER_REQ, 1, sizeof(req), &req); +} diff --git a/net/bluetooth/a2mp.h b/net/bluetooth/a2mp.h new file mode 100644 index 000000000..2fd253a61 --- /dev/null +++ b/net/bluetooth/a2mp.h @@ -0,0 +1,154 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + Copyright (c) 2010,2011 Code Aurora Forum. All rights reserved. + Copyright (c) 2011,2012 Intel Corp. + +*/ + +#ifndef __A2MP_H +#define __A2MP_H + +#include <net/bluetooth/l2cap.h> + +enum amp_mgr_state { + READ_LOC_AMP_INFO, + READ_LOC_AMP_ASSOC, + READ_LOC_AMP_ASSOC_FINAL, + WRITE_REMOTE_AMP_ASSOC, +}; + +struct amp_mgr { + struct list_head list; + struct l2cap_conn *l2cap_conn; + struct l2cap_chan *a2mp_chan; + struct l2cap_chan *bredr_chan; + struct kref kref; + __u8 ident; + __u8 handle; + unsigned long state; + unsigned long flags; + + struct list_head amp_ctrls; + struct mutex amp_ctrls_lock; +}; + +struct a2mp_cmd { + __u8 code; + __u8 ident; + __le16 len; + __u8 data[]; +} __packed; + +/* A2MP command codes */ +#define A2MP_COMMAND_REJ 0x01 +struct a2mp_cmd_rej { + __le16 reason; + __u8 data[]; +} __packed; + +#define A2MP_DISCOVER_REQ 0x02 +struct a2mp_discov_req { + __le16 mtu; + __le16 ext_feat; +} __packed; + +struct a2mp_cl { + __u8 id; + __u8 type; + __u8 status; +} __packed; + +#define A2MP_DISCOVER_RSP 0x03 +struct a2mp_discov_rsp { + __le16 mtu; + __le16 ext_feat; + struct a2mp_cl cl[]; +} __packed; + +#define A2MP_CHANGE_NOTIFY 0x04 +#define A2MP_CHANGE_RSP 0x05 + +#define A2MP_GETINFO_REQ 0x06 +struct a2mp_info_req { + __u8 id; +} __packed; + +#define A2MP_GETINFO_RSP 0x07 +struct a2mp_info_rsp { + __u8 id; + __u8 status; + __le32 total_bw; + __le32 max_bw; + __le32 min_latency; + __le16 pal_cap; + __le16 assoc_size; +} __packed; + +#define A2MP_GETAMPASSOC_REQ 0x08 +struct a2mp_amp_assoc_req { + __u8 id; +} __packed; + +#define A2MP_GETAMPASSOC_RSP 0x09 +struct a2mp_amp_assoc_rsp { + __u8 id; + __u8 status; + __u8 amp_assoc[]; +} __packed; + +#define A2MP_CREATEPHYSLINK_REQ 0x0A +#define A2MP_DISCONNPHYSLINK_REQ 0x0C +struct a2mp_physlink_req { + __u8 local_id; + __u8 remote_id; + __u8 amp_assoc[]; +} __packed; + +#define A2MP_CREATEPHYSLINK_RSP 0x0B +#define A2MP_DISCONNPHYSLINK_RSP 0x0D +struct a2mp_physlink_rsp { + __u8 local_id; + __u8 remote_id; + __u8 status; +} __packed; + +/* A2MP response status */ +#define A2MP_STATUS_SUCCESS 0x00 +#define A2MP_STATUS_INVALID_CTRL_ID 0x01 +#define A2MP_STATUS_UNABLE_START_LINK_CREATION 0x02 +#define A2MP_STATUS_NO_PHYSICAL_LINK_EXISTS 0x02 +#define A2MP_STATUS_COLLISION_OCCURED 0x03 +#define A2MP_STATUS_DISCONN_REQ_RECVD 0x04 +#define A2MP_STATUS_PHYS_LINK_EXISTS 0x05 +#define A2MP_STATUS_SECURITY_VIOLATION 0x06 + +struct amp_mgr *amp_mgr_get(struct amp_mgr *mgr); + +#if IS_ENABLED(CONFIG_BT_HS) +int amp_mgr_put(struct amp_mgr *mgr); +struct l2cap_chan *a2mp_channel_create(struct l2cap_conn *conn, + struct sk_buff *skb); +void a2mp_discover_amp(struct l2cap_chan *chan); +#else +static inline int amp_mgr_put(struct amp_mgr *mgr) +{ + return 0; +} + +static inline struct l2cap_chan *a2mp_channel_create(struct l2cap_conn *conn, + struct sk_buff *skb) +{ + return NULL; +} + +static inline void a2mp_discover_amp(struct l2cap_chan *chan) +{ +} +#endif + +void a2mp_send_getinfo_rsp(struct hci_dev *hdev); +void a2mp_send_getampassoc_rsp(struct hci_dev *hdev, u8 status); +void a2mp_send_create_phy_link_req(struct hci_dev *hdev, u8 status); +void a2mp_send_create_phy_link_rsp(struct hci_dev *hdev, u8 status); + +#endif /* __A2MP_H */ diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c new file mode 100644 index 000000000..f1b751035 --- /dev/null +++ b/net/bluetooth/af_bluetooth.c @@ -0,0 +1,812 @@ +/* + BlueZ - Bluetooth protocol stack for Linux + Copyright (C) 2000-2001 Qualcomm Incorporated + + Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License version 2 as + published by the Free Software Foundation; + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + SOFTWARE IS DISCLAIMED. +*/ + +/* Bluetooth address family and sockets. */ + +#include <linux/module.h> +#include <linux/debugfs.h> +#include <linux/stringify.h> +#include <linux/sched/signal.h> + +#include <asm/ioctls.h> + +#include <net/bluetooth/bluetooth.h> +#include <linux/proc_fs.h> + +#include "leds.h" +#include "selftest.h" + +/* Bluetooth sockets */ +#define BT_MAX_PROTO (BTPROTO_LAST + 1) +static const struct net_proto_family *bt_proto[BT_MAX_PROTO]; +static DEFINE_RWLOCK(bt_proto_lock); + +static struct lock_class_key bt_lock_key[BT_MAX_PROTO]; +static const char *const bt_key_strings[BT_MAX_PROTO] = { + "sk_lock-AF_BLUETOOTH-BTPROTO_L2CAP", + "sk_lock-AF_BLUETOOTH-BTPROTO_HCI", + "sk_lock-AF_BLUETOOTH-BTPROTO_SCO", + "sk_lock-AF_BLUETOOTH-BTPROTO_RFCOMM", + "sk_lock-AF_BLUETOOTH-BTPROTO_BNEP", + "sk_lock-AF_BLUETOOTH-BTPROTO_CMTP", + "sk_lock-AF_BLUETOOTH-BTPROTO_HIDP", + "sk_lock-AF_BLUETOOTH-BTPROTO_AVDTP", + "sk_lock-AF_BLUETOOTH-BTPROTO_ISO", +}; + +static struct lock_class_key bt_slock_key[BT_MAX_PROTO]; +static const char *const bt_slock_key_strings[BT_MAX_PROTO] = { + "slock-AF_BLUETOOTH-BTPROTO_L2CAP", + "slock-AF_BLUETOOTH-BTPROTO_HCI", + "slock-AF_BLUETOOTH-BTPROTO_SCO", + "slock-AF_BLUETOOTH-BTPROTO_RFCOMM", + "slock-AF_BLUETOOTH-BTPROTO_BNEP", + "slock-AF_BLUETOOTH-BTPROTO_CMTP", + "slock-AF_BLUETOOTH-BTPROTO_HIDP", + "slock-AF_BLUETOOTH-BTPROTO_AVDTP", + "slock-AF_BLUETOOTH-BTPROTO_ISO", +}; + +void bt_sock_reclassify_lock(struct sock *sk, int proto) +{ + BUG_ON(!sk); + BUG_ON(!sock_allow_reclassification(sk)); + + sock_lock_init_class_and_name(sk, + bt_slock_key_strings[proto], &bt_slock_key[proto], + bt_key_strings[proto], &bt_lock_key[proto]); +} +EXPORT_SYMBOL(bt_sock_reclassify_lock); + +int bt_sock_register(int proto, const struct net_proto_family *ops) +{ + int err = 0; + + if (proto < 0 || proto >= BT_MAX_PROTO) + return -EINVAL; + + write_lock(&bt_proto_lock); + + if (bt_proto[proto]) + err = -EEXIST; + else + bt_proto[proto] = ops; + + write_unlock(&bt_proto_lock); + + return err; +} +EXPORT_SYMBOL(bt_sock_register); + +void bt_sock_unregister(int proto) +{ + if (proto < 0 || proto >= BT_MAX_PROTO) + return; + + write_lock(&bt_proto_lock); + bt_proto[proto] = NULL; + write_unlock(&bt_proto_lock); +} +EXPORT_SYMBOL(bt_sock_unregister); + +static int bt_sock_create(struct net *net, struct socket *sock, int proto, + int kern) +{ + int err; + + if (net != &init_net) + return -EAFNOSUPPORT; + + if (proto < 0 || proto >= BT_MAX_PROTO) + return -EINVAL; + + if (!bt_proto[proto]) + request_module("bt-proto-%d", proto); + + err = -EPROTONOSUPPORT; + + read_lock(&bt_proto_lock); + + if (bt_proto[proto] && try_module_get(bt_proto[proto]->owner)) { + err = bt_proto[proto]->create(net, sock, proto, kern); + if (!err) + bt_sock_reclassify_lock(sock->sk, proto); + module_put(bt_proto[proto]->owner); + } + + read_unlock(&bt_proto_lock); + + return err; +} + +void bt_sock_link(struct bt_sock_list *l, struct sock *sk) +{ + write_lock(&l->lock); + sk_add_node(sk, &l->head); + write_unlock(&l->lock); +} +EXPORT_SYMBOL(bt_sock_link); + +void bt_sock_unlink(struct bt_sock_list *l, struct sock *sk) +{ + write_lock(&l->lock); + sk_del_node_init(sk); + write_unlock(&l->lock); +} +EXPORT_SYMBOL(bt_sock_unlink); + +void bt_accept_enqueue(struct sock *parent, struct sock *sk, bool bh) +{ + BT_DBG("parent %p, sk %p", parent, sk); + + sock_hold(sk); + + if (bh) + bh_lock_sock_nested(sk); + else + lock_sock_nested(sk, SINGLE_DEPTH_NESTING); + + list_add_tail(&bt_sk(sk)->accept_q, &bt_sk(parent)->accept_q); + bt_sk(sk)->parent = parent; + + if (bh) + bh_unlock_sock(sk); + else + release_sock(sk); + + sk_acceptq_added(parent); +} +EXPORT_SYMBOL(bt_accept_enqueue); + +/* Calling function must hold the sk lock. + * bt_sk(sk)->parent must be non-NULL meaning sk is in the parent list. + */ +void bt_accept_unlink(struct sock *sk) +{ + BT_DBG("sk %p state %d", sk, sk->sk_state); + + list_del_init(&bt_sk(sk)->accept_q); + sk_acceptq_removed(bt_sk(sk)->parent); + bt_sk(sk)->parent = NULL; + sock_put(sk); +} +EXPORT_SYMBOL(bt_accept_unlink); + +struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock) +{ + struct bt_sock *s, *n; + struct sock *sk; + + BT_DBG("parent %p", parent); + +restart: + list_for_each_entry_safe(s, n, &bt_sk(parent)->accept_q, accept_q) { + sk = (struct sock *)s; + + /* Prevent early freeing of sk due to unlink and sock_kill */ + sock_hold(sk); + lock_sock(sk); + + /* Check sk has not already been unlinked via + * bt_accept_unlink() due to serialisation caused by sk locking + */ + if (!bt_sk(sk)->parent) { + BT_DBG("sk %p, already unlinked", sk); + release_sock(sk); + sock_put(sk); + + /* Restart the loop as sk is no longer in the list + * and also avoid a potential infinite loop because + * list_for_each_entry_safe() is not thread safe. + */ + goto restart; + } + + /* sk is safely in the parent list so reduce reference count */ + sock_put(sk); + + /* FIXME: Is this check still needed */ + if (sk->sk_state == BT_CLOSED) { + bt_accept_unlink(sk); + release_sock(sk); + continue; + } + + if (sk->sk_state == BT_CONNECTED || !newsock || + test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags)) { + bt_accept_unlink(sk); + if (newsock) + sock_graft(sk, newsock); + + release_sock(sk); + return sk; + } + + release_sock(sk); + } + + return NULL; +} +EXPORT_SYMBOL(bt_accept_dequeue); + +int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len, + int flags) +{ + struct sock *sk = sock->sk; + struct sk_buff *skb; + size_t copied; + size_t skblen; + int err; + + BT_DBG("sock %p sk %p len %zu", sock, sk, len); + + if (flags & MSG_OOB) + return -EOPNOTSUPP; + + lock_sock(sk); + + skb = skb_recv_datagram(sk, flags, &err); + if (!skb) { + if (sk->sk_shutdown & RCV_SHUTDOWN) + err = 0; + + release_sock(sk); + return err; + } + + skblen = skb->len; + copied = skb->len; + if (len < copied) { + msg->msg_flags |= MSG_TRUNC; + copied = len; + } + + skb_reset_transport_header(skb); + err = skb_copy_datagram_msg(skb, 0, msg, copied); + if (err == 0) { + sock_recv_cmsgs(msg, sk, skb); + + if (msg->msg_name && bt_sk(sk)->skb_msg_name) + bt_sk(sk)->skb_msg_name(skb, msg->msg_name, + &msg->msg_namelen); + + if (bt_sk(sk)->skb_put_cmsg) + bt_sk(sk)->skb_put_cmsg(skb, msg, sk); + } + + skb_free_datagram(sk, skb); + + release_sock(sk); + + if (flags & MSG_TRUNC) + copied = skblen; + + return err ? : copied; +} +EXPORT_SYMBOL(bt_sock_recvmsg); + +static long bt_sock_data_wait(struct sock *sk, long timeo) +{ + DECLARE_WAITQUEUE(wait, current); + + add_wait_queue(sk_sleep(sk), &wait); + for (;;) { + set_current_state(TASK_INTERRUPTIBLE); + + if (!skb_queue_empty(&sk->sk_receive_queue)) + break; + + if (sk->sk_err || (sk->sk_shutdown & RCV_SHUTDOWN)) + break; + + if (signal_pending(current) || !timeo) + break; + + sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); + release_sock(sk); + timeo = schedule_timeout(timeo); + lock_sock(sk); + sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); + } + + __set_current_state(TASK_RUNNING); + remove_wait_queue(sk_sleep(sk), &wait); + return timeo; +} + +int bt_sock_stream_recvmsg(struct socket *sock, struct msghdr *msg, + size_t size, int flags) +{ + struct sock *sk = sock->sk; + int err = 0; + size_t target, copied = 0; + long timeo; + + if (flags & MSG_OOB) + return -EOPNOTSUPP; + + BT_DBG("sk %p size %zu", sk, size); + + lock_sock(sk); + + target = sock_rcvlowat(sk, flags & MSG_WAITALL, size); + timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); + + do { + struct sk_buff *skb; + int chunk; + + skb = skb_dequeue(&sk->sk_receive_queue); + if (!skb) { + if (copied >= target) + break; + + err = sock_error(sk); + if (err) + break; + if (sk->sk_shutdown & RCV_SHUTDOWN) + break; + + err = -EAGAIN; + if (!timeo) + break; + + timeo = bt_sock_data_wait(sk, timeo); + + if (signal_pending(current)) { + err = sock_intr_errno(timeo); + goto out; + } + continue; + } + + chunk = min_t(unsigned int, skb->len, size); + if (skb_copy_datagram_msg(skb, 0, msg, chunk)) { + skb_queue_head(&sk->sk_receive_queue, skb); + if (!copied) + copied = -EFAULT; + break; + } + copied += chunk; + size -= chunk; + + sock_recv_cmsgs(msg, sk, skb); + + if (!(flags & MSG_PEEK)) { + int skb_len = skb_headlen(skb); + + if (chunk <= skb_len) { + __skb_pull(skb, chunk); + } else { + struct sk_buff *frag; + + __skb_pull(skb, skb_len); + chunk -= skb_len; + + skb_walk_frags(skb, frag) { + if (chunk <= frag->len) { + /* Pulling partial data */ + skb->len -= chunk; + skb->data_len -= chunk; + __skb_pull(frag, chunk); + break; + } else if (frag->len) { + /* Pulling all frag data */ + chunk -= frag->len; + skb->len -= frag->len; + skb->data_len -= frag->len; + __skb_pull(frag, frag->len); + } + } + } + + if (skb->len) { + skb_queue_head(&sk->sk_receive_queue, skb); + break; + } + kfree_skb(skb); + + } else { + /* put message back and return */ + skb_queue_head(&sk->sk_receive_queue, skb); + break; + } + } while (size); + +out: + release_sock(sk); + return copied ? : err; +} +EXPORT_SYMBOL(bt_sock_stream_recvmsg); + +static inline __poll_t bt_accept_poll(struct sock *parent) +{ + struct bt_sock *s, *n; + struct sock *sk; + + list_for_each_entry_safe(s, n, &bt_sk(parent)->accept_q, accept_q) { + sk = (struct sock *)s; + if (sk->sk_state == BT_CONNECTED || + (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags) && + sk->sk_state == BT_CONNECT2)) + return EPOLLIN | EPOLLRDNORM; + } + + return 0; +} + +__poll_t bt_sock_poll(struct file *file, struct socket *sock, + poll_table *wait) +{ + struct sock *sk = sock->sk; + __poll_t mask = 0; + + poll_wait(file, sk_sleep(sk), wait); + + if (sk->sk_state == BT_LISTEN) + return bt_accept_poll(sk); + + if (sk->sk_err || !skb_queue_empty_lockless(&sk->sk_error_queue)) + mask |= EPOLLERR | + (sock_flag(sk, SOCK_SELECT_ERR_QUEUE) ? EPOLLPRI : 0); + + if (sk->sk_shutdown & RCV_SHUTDOWN) + mask |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM; + + if (sk->sk_shutdown == SHUTDOWN_MASK) + mask |= EPOLLHUP; + + if (!skb_queue_empty_lockless(&sk->sk_receive_queue)) + mask |= EPOLLIN | EPOLLRDNORM; + + if (sk->sk_state == BT_CLOSED) + mask |= EPOLLHUP; + + if (sk->sk_state == BT_CONNECT || + sk->sk_state == BT_CONNECT2 || + sk->sk_state == BT_CONFIG) + return mask; + + if (!test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags) && sock_writeable(sk)) + mask |= EPOLLOUT | EPOLLWRNORM | EPOLLWRBAND; + else + sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); + + return mask; +} +EXPORT_SYMBOL(bt_sock_poll); + +int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) +{ + struct sock *sk = sock->sk; + struct sk_buff *skb; + long amount; + int err; + + BT_DBG("sk %p cmd %x arg %lx", sk, cmd, arg); + + switch (cmd) { + case TIOCOUTQ: + if (sk->sk_state == BT_LISTEN) + return -EINVAL; + + amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk); + if (amount < 0) + amount = 0; + err = put_user(amount, (int __user *)arg); + break; + + case TIOCINQ: + if (sk->sk_state == BT_LISTEN) + return -EINVAL; + + lock_sock(sk); + skb = skb_peek(&sk->sk_receive_queue); + amount = skb ? skb->len : 0; + release_sock(sk); + err = put_user(amount, (int __user *)arg); + break; + + default: + err = -ENOIOCTLCMD; + break; + } + + return err; +} +EXPORT_SYMBOL(bt_sock_ioctl); + +/* This function expects the sk lock to be held when called */ +int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo) +{ + DECLARE_WAITQUEUE(wait, current); + int err = 0; + + BT_DBG("sk %p", sk); + + add_wait_queue(sk_sleep(sk), &wait); + set_current_state(TASK_INTERRUPTIBLE); + while (sk->sk_state != state) { + if (!timeo) { + err = -EINPROGRESS; + break; + } + + if (signal_pending(current)) { + err = sock_intr_errno(timeo); + break; + } + + release_sock(sk); + timeo = schedule_timeout(timeo); + lock_sock(sk); + set_current_state(TASK_INTERRUPTIBLE); + + err = sock_error(sk); + if (err) + break; + } + __set_current_state(TASK_RUNNING); + remove_wait_queue(sk_sleep(sk), &wait); + return err; +} +EXPORT_SYMBOL(bt_sock_wait_state); + +/* This function expects the sk lock to be held when called */ +int bt_sock_wait_ready(struct sock *sk, unsigned int msg_flags) +{ + DECLARE_WAITQUEUE(wait, current); + unsigned long timeo; + int err = 0; + + BT_DBG("sk %p", sk); + + timeo = sock_sndtimeo(sk, !!(msg_flags & MSG_DONTWAIT)); + + add_wait_queue(sk_sleep(sk), &wait); + set_current_state(TASK_INTERRUPTIBLE); + while (test_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags)) { + if (!timeo) { + err = -EAGAIN; + break; + } + + if (signal_pending(current)) { + err = sock_intr_errno(timeo); + break; + } + + release_sock(sk); + timeo = schedule_timeout(timeo); + lock_sock(sk); + set_current_state(TASK_INTERRUPTIBLE); + + err = sock_error(sk); + if (err) + break; + } + __set_current_state(TASK_RUNNING); + remove_wait_queue(sk_sleep(sk), &wait); + + return err; +} +EXPORT_SYMBOL(bt_sock_wait_ready); + +#ifdef CONFIG_PROC_FS +static void *bt_seq_start(struct seq_file *seq, loff_t *pos) + __acquires(seq->private->l->lock) +{ + struct bt_sock_list *l = pde_data(file_inode(seq->file)); + + read_lock(&l->lock); + return seq_hlist_start_head(&l->head, *pos); +} + +static void *bt_seq_next(struct seq_file *seq, void *v, loff_t *pos) +{ + struct bt_sock_list *l = pde_data(file_inode(seq->file)); + + return seq_hlist_next(v, &l->head, pos); +} + +static void bt_seq_stop(struct seq_file *seq, void *v) + __releases(seq->private->l->lock) +{ + struct bt_sock_list *l = pde_data(file_inode(seq->file)); + + read_unlock(&l->lock); +} + +static int bt_seq_show(struct seq_file *seq, void *v) +{ + struct bt_sock_list *l = pde_data(file_inode(seq->file)); + + if (v == SEQ_START_TOKEN) { + seq_puts(seq, "sk RefCnt Rmem Wmem User Inode Parent"); + + if (l->custom_seq_show) { + seq_putc(seq, ' '); + l->custom_seq_show(seq, v); + } + + seq_putc(seq, '\n'); + } else { + struct sock *sk = sk_entry(v); + struct bt_sock *bt = bt_sk(sk); + + seq_printf(seq, + "%pK %-6d %-6u %-6u %-6u %-6lu %-6lu", + sk, + refcount_read(&sk->sk_refcnt), + sk_rmem_alloc_get(sk), + sk_wmem_alloc_get(sk), + from_kuid(seq_user_ns(seq), sock_i_uid(sk)), + sock_i_ino(sk), + bt->parent ? sock_i_ino(bt->parent) : 0LU); + + if (l->custom_seq_show) { + seq_putc(seq, ' '); + l->custom_seq_show(seq, v); + } + + seq_putc(seq, '\n'); + } + return 0; +} + +static const struct seq_operations bt_seq_ops = { + .start = bt_seq_start, + .next = bt_seq_next, + .stop = bt_seq_stop, + .show = bt_seq_show, +}; + +int bt_procfs_init(struct net *net, const char *name, + struct bt_sock_list *sk_list, + int (*seq_show)(struct seq_file *, void *)) +{ + sk_list->custom_seq_show = seq_show; + + if (!proc_create_seq_data(name, 0, net->proc_net, &bt_seq_ops, sk_list)) + return -ENOMEM; + return 0; +} + +void bt_procfs_cleanup(struct net *net, const char *name) +{ + remove_proc_entry(name, net->proc_net); +} +#else +int bt_procfs_init(struct net *net, const char *name, + struct bt_sock_list *sk_list, + int (*seq_show)(struct seq_file *, void *)) +{ + return 0; +} + +void bt_procfs_cleanup(struct net *net, const char *name) +{ +} +#endif +EXPORT_SYMBOL(bt_procfs_init); +EXPORT_SYMBOL(bt_procfs_cleanup); + +static const struct net_proto_family bt_sock_family_ops = { + .owner = THIS_MODULE, + .family = PF_BLUETOOTH, + .create = bt_sock_create, +}; + +struct dentry *bt_debugfs; +EXPORT_SYMBOL_GPL(bt_debugfs); + +#define VERSION __stringify(BT_SUBSYS_VERSION) "." \ + __stringify(BT_SUBSYS_REVISION) + +static int __init bt_init(void) +{ + int err; + + sock_skb_cb_check_size(sizeof(struct bt_skb_cb)); + + BT_INFO("Core ver %s", VERSION); + + err = bt_selftest(); + if (err < 0) + return err; + + bt_debugfs = debugfs_create_dir("bluetooth", NULL); + + bt_leds_init(); + + err = bt_sysfs_init(); + if (err < 0) + goto cleanup_led; + + err = sock_register(&bt_sock_family_ops); + if (err) + goto cleanup_sysfs; + + BT_INFO("HCI device and connection manager initialized"); + + err = hci_sock_init(); + if (err) + goto unregister_socket; + + err = l2cap_init(); + if (err) + goto cleanup_socket; + + err = sco_init(); + if (err) + goto cleanup_cap; + + err = mgmt_init(); + if (err) + goto cleanup_sco; + + return 0; + +cleanup_sco: + sco_exit(); +cleanup_cap: + l2cap_exit(); +cleanup_socket: + hci_sock_cleanup(); +unregister_socket: + sock_unregister(PF_BLUETOOTH); +cleanup_sysfs: + bt_sysfs_cleanup(); +cleanup_led: + bt_leds_cleanup(); + return err; +} + +static void __exit bt_exit(void) +{ + mgmt_exit(); + + sco_exit(); + + l2cap_exit(); + + hci_sock_cleanup(); + + sock_unregister(PF_BLUETOOTH); + + bt_sysfs_cleanup(); + + bt_leds_cleanup(); + + debugfs_remove_recursive(bt_debugfs); +} + +subsys_initcall(bt_init); +module_exit(bt_exit); + +MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>"); +MODULE_DESCRIPTION("Bluetooth Core ver " VERSION); +MODULE_VERSION(VERSION); +MODULE_LICENSE("GPL"); +MODULE_ALIAS_NETPROTO(PF_BLUETOOTH); diff --git a/net/bluetooth/amp.c b/net/bluetooth/amp.c new file mode 100644 index 000000000..2134f92bd --- /dev/null +++ b/net/bluetooth/amp.c @@ -0,0 +1,591 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + Copyright (c) 2011,2012 Intel Corp. + +*/ + +#include <net/bluetooth/bluetooth.h> +#include <net/bluetooth/hci.h> +#include <net/bluetooth/hci_core.h> +#include <crypto/hash.h> + +#include "hci_request.h" +#include "a2mp.h" +#include "amp.h" + +/* Remote AMP Controllers interface */ +void amp_ctrl_get(struct amp_ctrl *ctrl) +{ + BT_DBG("ctrl %p orig refcnt %d", ctrl, + kref_read(&ctrl->kref)); + + kref_get(&ctrl->kref); +} + +static void amp_ctrl_destroy(struct kref *kref) +{ + struct amp_ctrl *ctrl = container_of(kref, struct amp_ctrl, kref); + + BT_DBG("ctrl %p", ctrl); + + kfree(ctrl->assoc); + kfree(ctrl); +} + +int amp_ctrl_put(struct amp_ctrl *ctrl) +{ + BT_DBG("ctrl %p orig refcnt %d", ctrl, + kref_read(&ctrl->kref)); + + return kref_put(&ctrl->kref, &_ctrl_destroy); +} + +struct amp_ctrl *amp_ctrl_add(struct amp_mgr *mgr, u8 id) +{ + struct amp_ctrl *ctrl; + + ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL); + if (!ctrl) + return NULL; + + kref_init(&ctrl->kref); + ctrl->id = id; + + mutex_lock(&mgr->amp_ctrls_lock); + list_add(&ctrl->list, &mgr->amp_ctrls); + mutex_unlock(&mgr->amp_ctrls_lock); + + BT_DBG("mgr %p ctrl %p", mgr, ctrl); + + return ctrl; +} + +void amp_ctrl_list_flush(struct amp_mgr *mgr) +{ + struct amp_ctrl *ctrl, *n; + + BT_DBG("mgr %p", mgr); + + mutex_lock(&mgr->amp_ctrls_lock); + list_for_each_entry_safe(ctrl, n, &mgr->amp_ctrls, list) { + list_del(&ctrl->list); + amp_ctrl_put(ctrl); + } + mutex_unlock(&mgr->amp_ctrls_lock); +} + +struct amp_ctrl *amp_ctrl_lookup(struct amp_mgr *mgr, u8 id) +{ + struct amp_ctrl *ctrl; + + BT_DBG("mgr %p id %u", mgr, id); + + mutex_lock(&mgr->amp_ctrls_lock); + list_for_each_entry(ctrl, &mgr->amp_ctrls, list) { + if (ctrl->id == id) { + amp_ctrl_get(ctrl); + mutex_unlock(&mgr->amp_ctrls_lock); + return ctrl; + } + } + mutex_unlock(&mgr->amp_ctrls_lock); + + return NULL; +} + +/* Physical Link interface */ +static u8 __next_handle(struct amp_mgr *mgr) +{ + if (++mgr->handle == 0) + mgr->handle = 1; + + return mgr->handle; +} + +struct hci_conn *phylink_add(struct hci_dev *hdev, struct amp_mgr *mgr, + u8 remote_id, bool out) +{ + bdaddr_t *dst = &mgr->l2cap_conn->hcon->dst; + struct hci_conn *hcon; + u8 role = out ? HCI_ROLE_MASTER : HCI_ROLE_SLAVE; + + hcon = hci_conn_add(hdev, AMP_LINK, dst, role); + if (!hcon) + return NULL; + + BT_DBG("hcon %p dst %pMR", hcon, dst); + + hcon->state = BT_CONNECT; + hcon->attempt++; + hcon->handle = __next_handle(mgr); + hcon->remote_id = remote_id; + hcon->amp_mgr = amp_mgr_get(mgr); + + return hcon; +} + +/* AMP crypto key generation interface */ +static int hmac_sha256(u8 *key, u8 ksize, char *plaintext, u8 psize, u8 *output) +{ + struct crypto_shash *tfm; + struct shash_desc *shash; + int ret; + + if (!ksize) + return -EINVAL; + + tfm = crypto_alloc_shash("hmac(sha256)", 0, 0); + if (IS_ERR(tfm)) { + BT_DBG("crypto_alloc_ahash failed: err %ld", PTR_ERR(tfm)); + return PTR_ERR(tfm); + } + + ret = crypto_shash_setkey(tfm, key, ksize); + if (ret) { + BT_DBG("crypto_ahash_setkey failed: err %d", ret); + goto failed; + } + + shash = kzalloc(sizeof(*shash) + crypto_shash_descsize(tfm), + GFP_KERNEL); + if (!shash) { + ret = -ENOMEM; + goto failed; + } + + shash->tfm = tfm; + + ret = crypto_shash_digest(shash, plaintext, psize, output); + + kfree(shash); + +failed: + crypto_free_shash(tfm); + return ret; +} + +int phylink_gen_key(struct hci_conn *conn, u8 *data, u8 *len, u8 *type) +{ + struct hci_dev *hdev = conn->hdev; + struct link_key *key; + u8 keybuf[HCI_AMP_LINK_KEY_SIZE]; + u8 gamp_key[HCI_AMP_LINK_KEY_SIZE]; + int err; + + if (!hci_conn_check_link_mode(conn)) + return -EACCES; + + BT_DBG("conn %p key_type %d", conn, conn->key_type); + + /* Legacy key */ + if (conn->key_type < 3) { + bt_dev_err(hdev, "legacy key type %u", conn->key_type); + return -EACCES; + } + + *type = conn->key_type; + *len = HCI_AMP_LINK_KEY_SIZE; + + key = hci_find_link_key(hdev, &conn->dst); + if (!key) { + BT_DBG("No Link key for conn %p dst %pMR", conn, &conn->dst); + return -EACCES; + } + + /* BR/EDR Link Key concatenated together with itself */ + memcpy(&keybuf[0], key->val, HCI_LINK_KEY_SIZE); + memcpy(&keybuf[HCI_LINK_KEY_SIZE], key->val, HCI_LINK_KEY_SIZE); + + /* Derive Generic AMP Link Key (gamp) */ + err = hmac_sha256(keybuf, HCI_AMP_LINK_KEY_SIZE, "gamp", 4, gamp_key); + if (err) { + bt_dev_err(hdev, "could not derive Generic AMP Key: err %d", err); + return err; + } + + if (conn->key_type == HCI_LK_DEBUG_COMBINATION) { + BT_DBG("Use Generic AMP Key (gamp)"); + memcpy(data, gamp_key, HCI_AMP_LINK_KEY_SIZE); + return err; + } + + /* Derive Dedicated AMP Link Key: "802b" is 802.11 PAL keyID */ + return hmac_sha256(gamp_key, HCI_AMP_LINK_KEY_SIZE, "802b", 4, data); +} + +static void read_local_amp_assoc_complete(struct hci_dev *hdev, u8 status, + u16 opcode, struct sk_buff *skb) +{ + struct hci_rp_read_local_amp_assoc *rp = (void *)skb->data; + struct amp_assoc *assoc = &hdev->loc_assoc; + size_t rem_len, frag_len; + + BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); + + if (rp->status) + goto send_rsp; + + frag_len = skb->len - sizeof(*rp); + rem_len = __le16_to_cpu(rp->rem_len); + + if (rem_len > frag_len) { + BT_DBG("frag_len %zu rem_len %zu", frag_len, rem_len); + + memcpy(assoc->data + assoc->offset, rp->frag, frag_len); + assoc->offset += frag_len; + + /* Read other fragments */ + amp_read_loc_assoc_frag(hdev, rp->phy_handle); + + return; + } + + memcpy(assoc->data + assoc->offset, rp->frag, rem_len); + assoc->len = assoc->offset + rem_len; + assoc->offset = 0; + +send_rsp: + /* Send A2MP Rsp when all fragments are received */ + a2mp_send_getampassoc_rsp(hdev, rp->status); + a2mp_send_create_phy_link_req(hdev, rp->status); +} + +void amp_read_loc_assoc_frag(struct hci_dev *hdev, u8 phy_handle) +{ + struct hci_cp_read_local_amp_assoc cp; + struct amp_assoc *loc_assoc = &hdev->loc_assoc; + struct hci_request req; + int err; + + BT_DBG("%s handle %u", hdev->name, phy_handle); + + cp.phy_handle = phy_handle; + cp.max_len = cpu_to_le16(hdev->amp_assoc_size); + cp.len_so_far = cpu_to_le16(loc_assoc->offset); + + hci_req_init(&req, hdev); + hci_req_add(&req, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp); + err = hci_req_run_skb(&req, read_local_amp_assoc_complete); + if (err < 0) + a2mp_send_getampassoc_rsp(hdev, A2MP_STATUS_INVALID_CTRL_ID); +} + +void amp_read_loc_assoc(struct hci_dev *hdev, struct amp_mgr *mgr) +{ + struct hci_cp_read_local_amp_assoc cp; + struct hci_request req; + int err; + + memset(&hdev->loc_assoc, 0, sizeof(struct amp_assoc)); + memset(&cp, 0, sizeof(cp)); + + cp.max_len = cpu_to_le16(hdev->amp_assoc_size); + + set_bit(READ_LOC_AMP_ASSOC, &mgr->state); + hci_req_init(&req, hdev); + hci_req_add(&req, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp); + err = hci_req_run_skb(&req, read_local_amp_assoc_complete); + if (err < 0) + a2mp_send_getampassoc_rsp(hdev, A2MP_STATUS_INVALID_CTRL_ID); +} + +void amp_read_loc_assoc_final_data(struct hci_dev *hdev, + struct hci_conn *hcon) +{ + struct hci_cp_read_local_amp_assoc cp; + struct amp_mgr *mgr = hcon->amp_mgr; + struct hci_request req; + int err; + + if (!mgr) + return; + + cp.phy_handle = hcon->handle; + cp.len_so_far = cpu_to_le16(0); + cp.max_len = cpu_to_le16(hdev->amp_assoc_size); + + set_bit(READ_LOC_AMP_ASSOC_FINAL, &mgr->state); + + /* Read Local AMP Assoc final link information data */ + hci_req_init(&req, hdev); + hci_req_add(&req, HCI_OP_READ_LOCAL_AMP_ASSOC, sizeof(cp), &cp); + err = hci_req_run_skb(&req, read_local_amp_assoc_complete); + if (err < 0) + a2mp_send_getampassoc_rsp(hdev, A2MP_STATUS_INVALID_CTRL_ID); +} + +static void write_remote_amp_assoc_complete(struct hci_dev *hdev, u8 status, + u16 opcode, struct sk_buff *skb) +{ + struct hci_rp_write_remote_amp_assoc *rp = (void *)skb->data; + + BT_DBG("%s status 0x%2.2x phy_handle 0x%2.2x", + hdev->name, rp->status, rp->phy_handle); + + if (rp->status) + return; + + amp_write_rem_assoc_continue(hdev, rp->phy_handle); +} + +/* Write AMP Assoc data fragments, returns true with last fragment written*/ +static bool amp_write_rem_assoc_frag(struct hci_dev *hdev, + struct hci_conn *hcon) +{ + struct hci_cp_write_remote_amp_assoc *cp; + struct amp_mgr *mgr = hcon->amp_mgr; + struct amp_ctrl *ctrl; + struct hci_request req; + u16 frag_len, len; + + ctrl = amp_ctrl_lookup(mgr, hcon->remote_id); + if (!ctrl) + return false; + + if (!ctrl->assoc_rem_len) { + BT_DBG("all fragments are written"); + ctrl->assoc_rem_len = ctrl->assoc_len; + ctrl->assoc_len_so_far = 0; + + amp_ctrl_put(ctrl); + return true; + } + + frag_len = min_t(u16, 248, ctrl->assoc_rem_len); + len = frag_len + sizeof(*cp); + + cp = kzalloc(len, GFP_KERNEL); + if (!cp) { + amp_ctrl_put(ctrl); + return false; + } + + BT_DBG("hcon %p ctrl %p frag_len %u assoc_len %u rem_len %u", + hcon, ctrl, frag_len, ctrl->assoc_len, ctrl->assoc_rem_len); + + cp->phy_handle = hcon->handle; + cp->len_so_far = cpu_to_le16(ctrl->assoc_len_so_far); + cp->rem_len = cpu_to_le16(ctrl->assoc_rem_len); + memcpy(cp->frag, ctrl->assoc, frag_len); + + ctrl->assoc_len_so_far += frag_len; + ctrl->assoc_rem_len -= frag_len; + + amp_ctrl_put(ctrl); + + hci_req_init(&req, hdev); + hci_req_add(&req, HCI_OP_WRITE_REMOTE_AMP_ASSOC, len, cp); + hci_req_run_skb(&req, write_remote_amp_assoc_complete); + + kfree(cp); + + return false; +} + +void amp_write_rem_assoc_continue(struct hci_dev *hdev, u8 handle) +{ + struct hci_conn *hcon; + + BT_DBG("%s phy handle 0x%2.2x", hdev->name, handle); + + hcon = hci_conn_hash_lookup_handle(hdev, handle); + if (!hcon) + return; + + /* Send A2MP create phylink rsp when all fragments are written */ + if (amp_write_rem_assoc_frag(hdev, hcon)) + a2mp_send_create_phy_link_rsp(hdev, 0); +} + +void amp_write_remote_assoc(struct hci_dev *hdev, u8 handle) +{ + struct hci_conn *hcon; + + BT_DBG("%s phy handle 0x%2.2x", hdev->name, handle); + + hcon = hci_conn_hash_lookup_handle(hdev, handle); + if (!hcon) + return; + + BT_DBG("%s phy handle 0x%2.2x hcon %p", hdev->name, handle, hcon); + + amp_write_rem_assoc_frag(hdev, hcon); +} + +static void create_phylink_complete(struct hci_dev *hdev, u8 status, + u16 opcode) +{ + struct hci_cp_create_phy_link *cp; + + BT_DBG("%s status 0x%2.2x", hdev->name, status); + + cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_PHY_LINK); + if (!cp) + return; + + hci_dev_lock(hdev); + + if (status) { + struct hci_conn *hcon; + + hcon = hci_conn_hash_lookup_handle(hdev, cp->phy_handle); + if (hcon) + hci_conn_del(hcon); + } else { + amp_write_remote_assoc(hdev, cp->phy_handle); + } + + hci_dev_unlock(hdev); +} + +void amp_create_phylink(struct hci_dev *hdev, struct amp_mgr *mgr, + struct hci_conn *hcon) +{ + struct hci_cp_create_phy_link cp; + struct hci_request req; + + cp.phy_handle = hcon->handle; + + BT_DBG("%s hcon %p phy handle 0x%2.2x", hdev->name, hcon, + hcon->handle); + + if (phylink_gen_key(mgr->l2cap_conn->hcon, cp.key, &cp.key_len, + &cp.key_type)) { + BT_DBG("Cannot create link key"); + return; + } + + hci_req_init(&req, hdev); + hci_req_add(&req, HCI_OP_CREATE_PHY_LINK, sizeof(cp), &cp); + hci_req_run(&req, create_phylink_complete); +} + +static void accept_phylink_complete(struct hci_dev *hdev, u8 status, + u16 opcode) +{ + struct hci_cp_accept_phy_link *cp; + + BT_DBG("%s status 0x%2.2x", hdev->name, status); + + if (status) + return; + + cp = hci_sent_cmd_data(hdev, HCI_OP_ACCEPT_PHY_LINK); + if (!cp) + return; + + amp_write_remote_assoc(hdev, cp->phy_handle); +} + +void amp_accept_phylink(struct hci_dev *hdev, struct amp_mgr *mgr, + struct hci_conn *hcon) +{ + struct hci_cp_accept_phy_link cp; + struct hci_request req; + + cp.phy_handle = hcon->handle; + + BT_DBG("%s hcon %p phy handle 0x%2.2x", hdev->name, hcon, + hcon->handle); + + if (phylink_gen_key(mgr->l2cap_conn->hcon, cp.key, &cp.key_len, + &cp.key_type)) { + BT_DBG("Cannot create link key"); + return; + } + + hci_req_init(&req, hdev); + hci_req_add(&req, HCI_OP_ACCEPT_PHY_LINK, sizeof(cp), &cp); + hci_req_run(&req, accept_phylink_complete); +} + +void amp_physical_cfm(struct hci_conn *bredr_hcon, struct hci_conn *hs_hcon) +{ + struct hci_dev *bredr_hdev = hci_dev_hold(bredr_hcon->hdev); + struct amp_mgr *mgr = hs_hcon->amp_mgr; + struct l2cap_chan *bredr_chan; + + BT_DBG("bredr_hcon %p hs_hcon %p mgr %p", bredr_hcon, hs_hcon, mgr); + + if (!bredr_hdev || !mgr || !mgr->bredr_chan) + return; + + bredr_chan = mgr->bredr_chan; + + l2cap_chan_lock(bredr_chan); + + set_bit(FLAG_EFS_ENABLE, &bredr_chan->flags); + bredr_chan->remote_amp_id = hs_hcon->remote_id; + bredr_chan->local_amp_id = hs_hcon->hdev->id; + bredr_chan->hs_hcon = hs_hcon; + bredr_chan->conn->mtu = hs_hcon->hdev->block_mtu; + + __l2cap_physical_cfm(bredr_chan, 0); + + l2cap_chan_unlock(bredr_chan); + + hci_dev_put(bredr_hdev); +} + +void amp_create_logical_link(struct l2cap_chan *chan) +{ + struct hci_conn *hs_hcon = chan->hs_hcon; + struct hci_cp_create_accept_logical_link cp; + struct hci_dev *hdev; + + BT_DBG("chan %p hs_hcon %p dst %pMR", chan, hs_hcon, + &chan->conn->hcon->dst); + + if (!hs_hcon) + return; + + hdev = hci_dev_hold(chan->hs_hcon->hdev); + if (!hdev) + return; + + cp.phy_handle = hs_hcon->handle; + + cp.tx_flow_spec.id = chan->local_id; + cp.tx_flow_spec.stype = chan->local_stype; + cp.tx_flow_spec.msdu = cpu_to_le16(chan->local_msdu); + cp.tx_flow_spec.sdu_itime = cpu_to_le32(chan->local_sdu_itime); + cp.tx_flow_spec.acc_lat = cpu_to_le32(chan->local_acc_lat); + cp.tx_flow_spec.flush_to = cpu_to_le32(chan->local_flush_to); + + cp.rx_flow_spec.id = chan->remote_id; + cp.rx_flow_spec.stype = chan->remote_stype; + cp.rx_flow_spec.msdu = cpu_to_le16(chan->remote_msdu); + cp.rx_flow_spec.sdu_itime = cpu_to_le32(chan->remote_sdu_itime); + cp.rx_flow_spec.acc_lat = cpu_to_le32(chan->remote_acc_lat); + cp.rx_flow_spec.flush_to = cpu_to_le32(chan->remote_flush_to); + + if (hs_hcon->out) + hci_send_cmd(hdev, HCI_OP_CREATE_LOGICAL_LINK, sizeof(cp), + &cp); + else + hci_send_cmd(hdev, HCI_OP_ACCEPT_LOGICAL_LINK, sizeof(cp), + &cp); + + hci_dev_put(hdev); +} + +void amp_disconnect_logical_link(struct hci_chan *hchan) +{ + struct hci_conn *hcon = hchan->conn; + struct hci_cp_disconn_logical_link cp; + + if (hcon->state != BT_CONNECTED) { + BT_DBG("hchan %p not connected", hchan); + return; + } + + cp.log_handle = cpu_to_le16(hchan->handle); + hci_send_cmd(hcon->hdev, HCI_OP_DISCONN_LOGICAL_LINK, sizeof(cp), &cp); +} + +void amp_destroy_logical_link(struct hci_chan *hchan, u8 reason) +{ + BT_DBG("hchan %p", hchan); + + hci_chan_del(hchan); +} diff --git a/net/bluetooth/amp.h b/net/bluetooth/amp.h new file mode 100644 index 000000000..832764dfb --- /dev/null +++ b/net/bluetooth/amp.h @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + Copyright (c) 2011,2012 Intel Corp. + +*/ + +#ifndef __AMP_H +#define __AMP_H + +struct amp_ctrl { + struct list_head list; + struct kref kref; + __u8 id; + __u16 assoc_len_so_far; + __u16 assoc_rem_len; + __u16 assoc_len; + __u8 *assoc; +}; + +int amp_ctrl_put(struct amp_ctrl *ctrl); +void amp_ctrl_get(struct amp_ctrl *ctrl); +struct amp_ctrl *amp_ctrl_add(struct amp_mgr *mgr, u8 id); +struct amp_ctrl *amp_ctrl_lookup(struct amp_mgr *mgr, u8 id); +void amp_ctrl_list_flush(struct amp_mgr *mgr); + +struct hci_conn *phylink_add(struct hci_dev *hdev, struct amp_mgr *mgr, + u8 remote_id, bool out); + +int phylink_gen_key(struct hci_conn *hcon, u8 *data, u8 *len, u8 *type); + +void amp_read_loc_info(struct hci_dev *hdev, struct amp_mgr *mgr); +void amp_read_loc_assoc_frag(struct hci_dev *hdev, u8 phy_handle); +void amp_read_loc_assoc(struct hci_dev *hdev, struct amp_mgr *mgr); +void amp_read_loc_assoc_final_data(struct hci_dev *hdev, + struct hci_conn *hcon); +void amp_create_phylink(struct hci_dev *hdev, struct amp_mgr *mgr, + struct hci_conn *hcon); +void amp_accept_phylink(struct hci_dev *hdev, struct amp_mgr *mgr, + struct hci_conn *hcon); + +#if IS_ENABLED(CONFIG_BT_HS) +void amp_create_logical_link(struct l2cap_chan *chan); +void amp_disconnect_logical_link(struct hci_chan *hchan); +#else +static inline void amp_create_logical_link(struct l2cap_chan *chan) +{ +} + +static inline void amp_disconnect_logical_link(struct hci_chan *hchan) +{ +} +#endif + +void amp_write_remote_assoc(struct hci_dev *hdev, u8 handle); +void amp_write_rem_assoc_continue(struct hci_dev *hdev, u8 handle); +void amp_physical_cfm(struct hci_conn *bredr_hcon, struct hci_conn *hs_hcon); +void amp_create_logical_link(struct l2cap_chan *chan); +void amp_disconnect_logical_link(struct hci_chan *hchan); +void amp_destroy_logical_link(struct hci_chan *hchan, u8 reason); + +#endif /* __AMP_H */ diff --git a/net/bluetooth/aosp.c b/net/bluetooth/aosp.c new file mode 100644 index 000000000..1d67836e9 --- /dev/null +++ b/net/bluetooth/aosp.c @@ -0,0 +1,210 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2021 Intel Corporation + */ + +#include <net/bluetooth/bluetooth.h> +#include <net/bluetooth/hci_core.h> + +#include "aosp.h" + +/* Command complete parameters of LE_Get_Vendor_Capabilities_Command + * The parameters grow over time. The base version that declares the + * version_supported field is v0.95. Refer to + * https://cs.android.com/android/platform/superproject/+/master:system/ + * bt/gd/hci/controller.cc;l=452?q=le_get_vendor_capabilities_handler + */ +struct aosp_rp_le_get_vendor_capa { + /* v0.95: 15 octets */ + __u8 status; + __u8 max_advt_instances; + __u8 offloaded_resolution_of_private_address; + __le16 total_scan_results_storage; + __u8 max_irk_list_sz; + __u8 filtering_support; + __u8 max_filter; + __u8 activity_energy_info_support; + __le16 version_supported; + __le16 total_num_of_advt_tracked; + __u8 extended_scan_support; + __u8 debug_logging_supported; + /* v0.96: 16 octets */ + __u8 le_address_generation_offloading_support; + /* v0.98: 21 octets */ + __le32 a2dp_source_offload_capability_mask; + __u8 bluetooth_quality_report_support; + /* v1.00: 25 octets */ + __le32 dynamic_audio_buffer_support; +} __packed; + +#define VENDOR_CAPA_BASE_SIZE 15 +#define VENDOR_CAPA_0_98_SIZE 21 + +void aosp_do_open(struct hci_dev *hdev) +{ + struct sk_buff *skb; + struct aosp_rp_le_get_vendor_capa *rp; + u16 version_supported; + + if (!hdev->aosp_capable) + return; + + bt_dev_dbg(hdev, "Initialize AOSP extension"); + + /* LE Get Vendor Capabilities Command */ + skb = __hci_cmd_sync(hdev, hci_opcode_pack(0x3f, 0x153), 0, NULL, + HCI_CMD_TIMEOUT); + if (IS_ERR_OR_NULL(skb)) { + if (!skb) + skb = ERR_PTR(-EIO); + + bt_dev_err(hdev, "AOSP get vendor capabilities (%ld)", + PTR_ERR(skb)); + return; + } + + /* A basic length check */ + if (skb->len < VENDOR_CAPA_BASE_SIZE) + goto length_error; + + rp = (struct aosp_rp_le_get_vendor_capa *)skb->data; + + version_supported = le16_to_cpu(rp->version_supported); + /* AOSP displays the verion number like v0.98, v1.00, etc. */ + bt_dev_info(hdev, "AOSP extensions version v%u.%02u", + version_supported >> 8, version_supported & 0xff); + + /* Do not support very old versions. */ + if (version_supported < 95) { + bt_dev_warn(hdev, "AOSP capabilities version %u too old", + version_supported); + goto done; + } + + if (version_supported < 98) { + bt_dev_warn(hdev, "AOSP quality report is not supported"); + goto done; + } + + if (skb->len < VENDOR_CAPA_0_98_SIZE) + goto length_error; + + /* The bluetooth_quality_report_support is defined at version + * v0.98. Refer to + * https://cs.android.com/android/platform/superproject/+/ + * master:system/bt/gd/hci/controller.cc;l=477 + */ + if (rp->bluetooth_quality_report_support) { + hdev->aosp_quality_report = true; + bt_dev_info(hdev, "AOSP quality report is supported"); + } + + goto done; + +length_error: + bt_dev_err(hdev, "AOSP capabilities length %d too short", skb->len); + +done: + kfree_skb(skb); +} + +void aosp_do_close(struct hci_dev *hdev) +{ + if (!hdev->aosp_capable) + return; + + bt_dev_dbg(hdev, "Cleanup of AOSP extension"); +} + +/* BQR command */ +#define BQR_OPCODE hci_opcode_pack(0x3f, 0x015e) + +/* BQR report action */ +#define REPORT_ACTION_ADD 0x00 +#define REPORT_ACTION_DELETE 0x01 +#define REPORT_ACTION_CLEAR 0x02 + +/* BQR event masks */ +#define QUALITY_MONITORING BIT(0) +#define APPRAOCHING_LSTO BIT(1) +#define A2DP_AUDIO_CHOPPY BIT(2) +#define SCO_VOICE_CHOPPY BIT(3) + +#define DEFAULT_BQR_EVENT_MASK (QUALITY_MONITORING | APPRAOCHING_LSTO | \ + A2DP_AUDIO_CHOPPY | SCO_VOICE_CHOPPY) + +/* Reporting at milliseconds so as not to stress the controller too much. + * Range: 0 ~ 65535 ms + */ +#define DEFALUT_REPORT_INTERVAL_MS 5000 + +struct aosp_bqr_cp { + __u8 report_action; + __u32 event_mask; + __u16 min_report_interval; +} __packed; + +static int enable_quality_report(struct hci_dev *hdev) +{ + struct sk_buff *skb; + struct aosp_bqr_cp cp; + + cp.report_action = REPORT_ACTION_ADD; + cp.event_mask = DEFAULT_BQR_EVENT_MASK; + cp.min_report_interval = DEFALUT_REPORT_INTERVAL_MS; + + skb = __hci_cmd_sync(hdev, BQR_OPCODE, sizeof(cp), &cp, + HCI_CMD_TIMEOUT); + if (IS_ERR_OR_NULL(skb)) { + if (!skb) + skb = ERR_PTR(-EIO); + + bt_dev_err(hdev, "Enabling Android BQR failed (%ld)", + PTR_ERR(skb)); + return PTR_ERR(skb); + } + + kfree_skb(skb); + return 0; +} + +static int disable_quality_report(struct hci_dev *hdev) +{ + struct sk_buff *skb; + struct aosp_bqr_cp cp = { 0 }; + + cp.report_action = REPORT_ACTION_CLEAR; + + skb = __hci_cmd_sync(hdev, BQR_OPCODE, sizeof(cp), &cp, + HCI_CMD_TIMEOUT); + if (IS_ERR_OR_NULL(skb)) { + if (!skb) + skb = ERR_PTR(-EIO); + + bt_dev_err(hdev, "Disabling Android BQR failed (%ld)", + PTR_ERR(skb)); + return PTR_ERR(skb); + } + + kfree_skb(skb); + return 0; +} + +bool aosp_has_quality_report(struct hci_dev *hdev) +{ + return hdev->aosp_quality_report; +} + +int aosp_set_quality_report(struct hci_dev *hdev, bool enable) +{ + if (!aosp_has_quality_report(hdev)) + return -EOPNOTSUPP; + + bt_dev_dbg(hdev, "quality report enable %d", enable); + + /* Enable or disable the quality report feature. */ + if (enable) + return enable_quality_report(hdev); + else + return disable_quality_report(hdev); +} diff --git a/net/bluetooth/aosp.h b/net/bluetooth/aosp.h new file mode 100644 index 000000000..2fd8886d5 --- /dev/null +++ b/net/bluetooth/aosp.h @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2021 Intel Corporation + */ + +#if IS_ENABLED(CONFIG_BT_AOSPEXT) + +void aosp_do_open(struct hci_dev *hdev); +void aosp_do_close(struct hci_dev *hdev); + +bool aosp_has_quality_report(struct hci_dev *hdev); +int aosp_set_quality_report(struct hci_dev *hdev, bool enable); + +#else + +static inline void aosp_do_open(struct hci_dev *hdev) {} +static inline void aosp_do_close(struct hci_dev *hdev) {} + +static inline bool aosp_has_quality_report(struct hci_dev *hdev) +{ + return false; +} + +static inline int aosp_set_quality_report(struct hci_dev *hdev, bool enable) +{ + return -EOPNOTSUPP; +} + +#endif diff --git a/net/bluetooth/bnep/Kconfig b/net/bluetooth/bnep/Kconfig new file mode 100644 index 000000000..aac02b5b0 --- /dev/null +++ b/net/bluetooth/bnep/Kconfig @@ -0,0 +1,25 @@ +# SPDX-License-Identifier: GPL-2.0-only +config BT_BNEP + tristate "BNEP protocol support" + depends on BT_BREDR + select CRC32 + help + BNEP (Bluetooth Network Encapsulation Protocol) is Ethernet + emulation layer on top of Bluetooth. BNEP is required for + Bluetooth PAN (Personal Area Network). + + Say Y here to compile BNEP support into the kernel or say M to + compile it as module (bnep). + +config BT_BNEP_MC_FILTER + bool "Multicast filter support" + depends on BT_BNEP + help + This option enables the multicast filter support for BNEP. + +config BT_BNEP_PROTO_FILTER + bool "Protocol filter support" + depends on BT_BNEP + help + This option enables the protocol filter support for BNEP. + diff --git a/net/bluetooth/bnep/Makefile b/net/bluetooth/bnep/Makefile new file mode 100644 index 000000000..8af9d56bb --- /dev/null +++ b/net/bluetooth/bnep/Makefile @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Makefile for the Linux Bluetooth BNEP layer. +# + +obj-$(CONFIG_BT_BNEP) += bnep.o + +bnep-objs := core.o sock.o netdev.o diff --git a/net/bluetooth/bnep/bnep.h b/net/bluetooth/bnep/bnep.h new file mode 100644 index 000000000..9680473ed --- /dev/null +++ b/net/bluetooth/bnep/bnep.h @@ -0,0 +1,173 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + BNEP protocol definition for Linux Bluetooth stack (BlueZ). + Copyright (C) 2002 Maxim Krasnyansky <maxk@qualcomm.com> + +*/ + +#ifndef _BNEP_H +#define _BNEP_H + +#include <linux/types.h> +#include <linux/crc32.h> +#include <net/bluetooth/bluetooth.h> + +/* Limits */ +#define BNEP_MAX_PROTO_FILTERS 5 +#define BNEP_MAX_MULTICAST_FILTERS 20 + +/* UUIDs */ +#define BNEP_BASE_UUID 0x0000000000001000800000805F9B34FB +#define BNEP_UUID16 0x02 +#define BNEP_UUID32 0x04 +#define BNEP_UUID128 0x16 + +#define BNEP_SVC_PANU 0x1115 +#define BNEP_SVC_NAP 0x1116 +#define BNEP_SVC_GN 0x1117 + +/* Packet types */ +#define BNEP_GENERAL 0x00 +#define BNEP_CONTROL 0x01 +#define BNEP_COMPRESSED 0x02 +#define BNEP_COMPRESSED_SRC_ONLY 0x03 +#define BNEP_COMPRESSED_DST_ONLY 0x04 + +/* Control types */ +#define BNEP_CMD_NOT_UNDERSTOOD 0x00 +#define BNEP_SETUP_CONN_REQ 0x01 +#define BNEP_SETUP_CONN_RSP 0x02 +#define BNEP_FILTER_NET_TYPE_SET 0x03 +#define BNEP_FILTER_NET_TYPE_RSP 0x04 +#define BNEP_FILTER_MULTI_ADDR_SET 0x05 +#define BNEP_FILTER_MULTI_ADDR_RSP 0x06 + +/* Extension types */ +#define BNEP_EXT_CONTROL 0x00 + +/* Response messages */ +#define BNEP_SUCCESS 0x00 + +#define BNEP_CONN_INVALID_DST 0x01 +#define BNEP_CONN_INVALID_SRC 0x02 +#define BNEP_CONN_INVALID_SVC 0x03 +#define BNEP_CONN_NOT_ALLOWED 0x04 + +#define BNEP_FILTER_UNSUPPORTED_REQ 0x01 +#define BNEP_FILTER_INVALID_RANGE 0x02 +#define BNEP_FILTER_INVALID_MCADDR 0x02 +#define BNEP_FILTER_LIMIT_REACHED 0x03 +#define BNEP_FILTER_DENIED_SECURITY 0x04 + +/* L2CAP settings */ +#define BNEP_MTU 1691 +#define BNEP_PSM 0x0f +#define BNEP_FLUSH_TO 0xffff +#define BNEP_CONNECT_TO 15 +#define BNEP_FILTER_TO 15 + +/* Headers */ +#define BNEP_TYPE_MASK 0x7f +#define BNEP_EXT_HEADER 0x80 + +struct bnep_setup_conn_req { + __u8 type; + __u8 ctrl; + __u8 uuid_size; + __u8 service[]; +} __packed; + +struct bnep_set_filter_req { + __u8 type; + __u8 ctrl; + __be16 len; + __u8 list[]; +} __packed; + +struct bnep_control_rsp { + __u8 type; + __u8 ctrl; + __be16 resp; +} __packed; + +struct bnep_ext_hdr { + __u8 type; + __u8 len; + __u8 data[]; +} __packed; + +/* BNEP ioctl defines */ +#define BNEPCONNADD _IOW('B', 200, int) +#define BNEPCONNDEL _IOW('B', 201, int) +#define BNEPGETCONNLIST _IOR('B', 210, int) +#define BNEPGETCONNINFO _IOR('B', 211, int) +#define BNEPGETSUPPFEAT _IOR('B', 212, int) + +#define BNEP_SETUP_RESPONSE 0 +#define BNEP_SETUP_RSP_SENT 10 + +struct bnep_connadd_req { + int sock; /* Connected socket */ + __u32 flags; + __u16 role; + char device[16]; /* Name of the Ethernet device */ +}; + +struct bnep_conndel_req { + __u32 flags; + __u8 dst[ETH_ALEN]; +}; + +struct bnep_conninfo { + __u32 flags; + __u16 role; + __u16 state; + __u8 dst[ETH_ALEN]; + char device[16]; +}; + +struct bnep_connlist_req { + __u32 cnum; + struct bnep_conninfo __user *ci; +}; + +struct bnep_proto_filter { + __u16 start; + __u16 end; +}; + +int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock); +int bnep_del_connection(struct bnep_conndel_req *req); +int bnep_get_connlist(struct bnep_connlist_req *req); +int bnep_get_conninfo(struct bnep_conninfo *ci); + +/* BNEP sessions */ +struct bnep_session { + struct list_head list; + + unsigned int role; + unsigned long state; + unsigned long flags; + atomic_t terminate; + struct task_struct *task; + + struct ethhdr eh; + struct msghdr msg; + + struct bnep_proto_filter proto_filter[BNEP_MAX_PROTO_FILTERS]; + unsigned long long mc_filter; + + struct socket *sock; + struct net_device *dev; +}; + +void bnep_net_setup(struct net_device *dev); +int bnep_sock_init(void); +void bnep_sock_cleanup(void); + +static inline int bnep_mc_hash(__u8 *addr) +{ + return crc32_be(~0, addr, ETH_ALEN) >> 26; +} + +#endif diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c new file mode 100644 index 000000000..5a6a49885 --- /dev/null +++ b/net/bluetooth/bnep/core.c @@ -0,0 +1,769 @@ +/* + BNEP implementation for Linux Bluetooth stack (BlueZ). + Copyright (C) 2001-2002 Inventel Systemes + Written 2001-2002 by + Clément Moreau <clement.moreau@inventel.fr> + David Libault <david.libault@inventel.fr> + + Copyright (C) 2002 Maxim Krasnyansky <maxk@qualcomm.com> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License version 2 as + published by the Free Software Foundation; + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + SOFTWARE IS DISCLAIMED. +*/ + +#include <linux/module.h> +#include <linux/kthread.h> +#include <linux/file.h> +#include <linux/etherdevice.h> +#include <asm/unaligned.h> + +#include <net/bluetooth/bluetooth.h> +#include <net/bluetooth/l2cap.h> +#include <net/bluetooth/hci_core.h> + +#include "bnep.h" + +#define VERSION "1.3" + +static bool compress_src = true; +static bool compress_dst = true; + +static LIST_HEAD(bnep_session_list); +static DECLARE_RWSEM(bnep_session_sem); + +static struct bnep_session *__bnep_get_session(u8 *dst) +{ + struct bnep_session *s; + + BT_DBG(""); + + list_for_each_entry(s, &bnep_session_list, list) + if (ether_addr_equal(dst, s->eh.h_source)) + return s; + + return NULL; +} + +static void __bnep_link_session(struct bnep_session *s) +{ + list_add(&s->list, &bnep_session_list); +} + +static void __bnep_unlink_session(struct bnep_session *s) +{ + list_del(&s->list); +} + +static int bnep_send(struct bnep_session *s, void *data, size_t len) +{ + struct socket *sock = s->sock; + struct kvec iv = { data, len }; + + return kernel_sendmsg(sock, &s->msg, &iv, 1, len); +} + +static int bnep_send_rsp(struct bnep_session *s, u8 ctrl, u16 resp) +{ + struct bnep_control_rsp rsp; + rsp.type = BNEP_CONTROL; + rsp.ctrl = ctrl; + rsp.resp = htons(resp); + return bnep_send(s, &rsp, sizeof(rsp)); +} + +#ifdef CONFIG_BT_BNEP_PROTO_FILTER +static inline void bnep_set_default_proto_filter(struct bnep_session *s) +{ + /* (IPv4, ARP) */ + s->proto_filter[0].start = ETH_P_IP; + s->proto_filter[0].end = ETH_P_ARP; + /* (RARP, AppleTalk) */ + s->proto_filter[1].start = ETH_P_RARP; + s->proto_filter[1].end = ETH_P_AARP; + /* (IPX, IPv6) */ + s->proto_filter[2].start = ETH_P_IPX; + s->proto_filter[2].end = ETH_P_IPV6; +} +#endif + +static int bnep_ctrl_set_netfilter(struct bnep_session *s, __be16 *data, int len) +{ + int n; + + if (len < 2) + return -EILSEQ; + + n = get_unaligned_be16(data); + data++; + len -= 2; + + if (len < n) + return -EILSEQ; + + BT_DBG("filter len %d", n); + +#ifdef CONFIG_BT_BNEP_PROTO_FILTER + n /= 4; + if (n <= BNEP_MAX_PROTO_FILTERS) { + struct bnep_proto_filter *f = s->proto_filter; + int i; + + for (i = 0; i < n; i++) { + f[i].start = get_unaligned_be16(data++); + f[i].end = get_unaligned_be16(data++); + + BT_DBG("proto filter start %u end %u", + f[i].start, f[i].end); + } + + if (i < BNEP_MAX_PROTO_FILTERS) + memset(f + i, 0, sizeof(*f)); + + if (n == 0) + bnep_set_default_proto_filter(s); + + bnep_send_rsp(s, BNEP_FILTER_NET_TYPE_RSP, BNEP_SUCCESS); + } else { + bnep_send_rsp(s, BNEP_FILTER_NET_TYPE_RSP, BNEP_FILTER_LIMIT_REACHED); + } +#else + bnep_send_rsp(s, BNEP_FILTER_NET_TYPE_RSP, BNEP_FILTER_UNSUPPORTED_REQ); +#endif + return 0; +} + +static int bnep_ctrl_set_mcfilter(struct bnep_session *s, u8 *data, int len) +{ + int n; + + if (len < 2) + return -EILSEQ; + + n = get_unaligned_be16(data); + data += 2; + len -= 2; + + if (len < n) + return -EILSEQ; + + BT_DBG("filter len %d", n); + +#ifdef CONFIG_BT_BNEP_MC_FILTER + n /= (ETH_ALEN * 2); + + if (n > 0) { + int i; + + s->mc_filter = 0; + + /* Always send broadcast */ + set_bit(bnep_mc_hash(s->dev->broadcast), (ulong *) &s->mc_filter); + + /* Add address ranges to the multicast hash */ + for (; n > 0; n--) { + u8 a1[6], *a2; + + memcpy(a1, data, ETH_ALEN); + data += ETH_ALEN; + a2 = data; + data += ETH_ALEN; + + BT_DBG("mc filter %pMR -> %pMR", a1, a2); + + /* Iterate from a1 to a2 */ + set_bit(bnep_mc_hash(a1), (ulong *) &s->mc_filter); + while (memcmp(a1, a2, 6) < 0 && s->mc_filter != ~0LL) { + /* Increment a1 */ + i = 5; + while (i >= 0 && ++a1[i--] == 0) + ; + + set_bit(bnep_mc_hash(a1), (ulong *) &s->mc_filter); + } + } + } + + BT_DBG("mc filter hash 0x%llx", s->mc_filter); + + bnep_send_rsp(s, BNEP_FILTER_MULTI_ADDR_RSP, BNEP_SUCCESS); +#else + bnep_send_rsp(s, BNEP_FILTER_MULTI_ADDR_RSP, BNEP_FILTER_UNSUPPORTED_REQ); +#endif + return 0; +} + +static int bnep_rx_control(struct bnep_session *s, void *data, int len) +{ + u8 cmd = *(u8 *)data; + int err = 0; + + data++; + len--; + + switch (cmd) { + case BNEP_CMD_NOT_UNDERSTOOD: + case BNEP_SETUP_CONN_RSP: + case BNEP_FILTER_NET_TYPE_RSP: + case BNEP_FILTER_MULTI_ADDR_RSP: + /* Ignore these for now */ + break; + + case BNEP_FILTER_NET_TYPE_SET: + err = bnep_ctrl_set_netfilter(s, data, len); + break; + + case BNEP_FILTER_MULTI_ADDR_SET: + err = bnep_ctrl_set_mcfilter(s, data, len); + break; + + case BNEP_SETUP_CONN_REQ: + /* Successful response should be sent only once */ + if (test_bit(BNEP_SETUP_RESPONSE, &s->flags) && + !test_and_set_bit(BNEP_SETUP_RSP_SENT, &s->flags)) + err = bnep_send_rsp(s, BNEP_SETUP_CONN_RSP, + BNEP_SUCCESS); + else + err = bnep_send_rsp(s, BNEP_SETUP_CONN_RSP, + BNEP_CONN_NOT_ALLOWED); + break; + + default: { + u8 pkt[3]; + pkt[0] = BNEP_CONTROL; + pkt[1] = BNEP_CMD_NOT_UNDERSTOOD; + pkt[2] = cmd; + err = bnep_send(s, pkt, sizeof(pkt)); + } + break; + } + + return err; +} + +static int bnep_rx_extension(struct bnep_session *s, struct sk_buff *skb) +{ + struct bnep_ext_hdr *h; + int err = 0; + + do { + h = (void *) skb->data; + if (!skb_pull(skb, sizeof(*h))) { + err = -EILSEQ; + break; + } + + BT_DBG("type 0x%x len %u", h->type, h->len); + + switch (h->type & BNEP_TYPE_MASK) { + case BNEP_EXT_CONTROL: + bnep_rx_control(s, skb->data, skb->len); + break; + + default: + /* Unknown extension, skip it. */ + break; + } + + if (!skb_pull(skb, h->len)) { + err = -EILSEQ; + break; + } + } while (!err && (h->type & BNEP_EXT_HEADER)); + + return err; +} + +static u8 __bnep_rx_hlen[] = { + ETH_HLEN, /* BNEP_GENERAL */ + 0, /* BNEP_CONTROL */ + 2, /* BNEP_COMPRESSED */ + ETH_ALEN + 2, /* BNEP_COMPRESSED_SRC_ONLY */ + ETH_ALEN + 2 /* BNEP_COMPRESSED_DST_ONLY */ +}; + +static int bnep_rx_frame(struct bnep_session *s, struct sk_buff *skb) +{ + struct net_device *dev = s->dev; + struct sk_buff *nskb; + u8 type, ctrl_type; + + dev->stats.rx_bytes += skb->len; + + type = *(u8 *) skb->data; + skb_pull(skb, 1); + ctrl_type = *(u8 *)skb->data; + + if ((type & BNEP_TYPE_MASK) >= sizeof(__bnep_rx_hlen)) + goto badframe; + + if ((type & BNEP_TYPE_MASK) == BNEP_CONTROL) { + if (bnep_rx_control(s, skb->data, skb->len) < 0) { + dev->stats.tx_errors++; + kfree_skb(skb); + return 0; + } + + if (!(type & BNEP_EXT_HEADER)) { + kfree_skb(skb); + return 0; + } + + /* Verify and pull ctrl message since it's already processed */ + switch (ctrl_type) { + case BNEP_SETUP_CONN_REQ: + /* Pull: ctrl type (1 b), len (1 b), data (len bytes) */ + if (!skb_pull(skb, 2 + *(u8 *)(skb->data + 1) * 2)) + goto badframe; + break; + case BNEP_FILTER_MULTI_ADDR_SET: + case BNEP_FILTER_NET_TYPE_SET: + /* Pull: ctrl type (1 b), len (2 b), data (len bytes) */ + if (!skb_pull(skb, 3 + *(u16 *)(skb->data + 1) * 2)) + goto badframe; + break; + default: + kfree_skb(skb); + return 0; + } + } else { + skb_reset_mac_header(skb); + + /* Verify and pull out header */ + if (!skb_pull(skb, __bnep_rx_hlen[type & BNEP_TYPE_MASK])) + goto badframe; + + s->eh.h_proto = get_unaligned((__be16 *) (skb->data - 2)); + } + + if (type & BNEP_EXT_HEADER) { + if (bnep_rx_extension(s, skb) < 0) + goto badframe; + } + + /* Strip 802.1p header */ + if (ntohs(s->eh.h_proto) == ETH_P_8021Q) { + if (!skb_pull(skb, 4)) + goto badframe; + s->eh.h_proto = get_unaligned((__be16 *) (skb->data - 2)); + } + + /* We have to alloc new skb and copy data here :(. Because original skb + * may not be modified and because of the alignment requirements. */ + nskb = alloc_skb(2 + ETH_HLEN + skb->len, GFP_KERNEL); + if (!nskb) { + dev->stats.rx_dropped++; + kfree_skb(skb); + return -ENOMEM; + } + skb_reserve(nskb, 2); + + /* Decompress header and construct ether frame */ + switch (type & BNEP_TYPE_MASK) { + case BNEP_COMPRESSED: + __skb_put_data(nskb, &s->eh, ETH_HLEN); + break; + + case BNEP_COMPRESSED_SRC_ONLY: + __skb_put_data(nskb, s->eh.h_dest, ETH_ALEN); + __skb_put_data(nskb, skb_mac_header(skb), ETH_ALEN); + put_unaligned(s->eh.h_proto, (__be16 *) __skb_put(nskb, 2)); + break; + + case BNEP_COMPRESSED_DST_ONLY: + __skb_put_data(nskb, skb_mac_header(skb), ETH_ALEN); + __skb_put_data(nskb, s->eh.h_source, ETH_ALEN + 2); + break; + + case BNEP_GENERAL: + __skb_put_data(nskb, skb_mac_header(skb), ETH_ALEN * 2); + put_unaligned(s->eh.h_proto, (__be16 *) __skb_put(nskb, 2)); + break; + } + + skb_copy_from_linear_data(skb, __skb_put(nskb, skb->len), skb->len); + kfree_skb(skb); + + dev->stats.rx_packets++; + nskb->ip_summed = CHECKSUM_NONE; + nskb->protocol = eth_type_trans(nskb, dev); + netif_rx(nskb); + return 0; + +badframe: + dev->stats.rx_errors++; + kfree_skb(skb); + return 0; +} + +static u8 __bnep_tx_types[] = { + BNEP_GENERAL, + BNEP_COMPRESSED_SRC_ONLY, + BNEP_COMPRESSED_DST_ONLY, + BNEP_COMPRESSED +}; + +static int bnep_tx_frame(struct bnep_session *s, struct sk_buff *skb) +{ + struct ethhdr *eh = (void *) skb->data; + struct socket *sock = s->sock; + struct kvec iv[3]; + int len = 0, il = 0; + u8 type = 0; + + BT_DBG("skb %p dev %p type %u", skb, skb->dev, skb->pkt_type); + + if (!skb->dev) { + /* Control frame sent by us */ + goto send; + } + + iv[il++] = (struct kvec) { &type, 1 }; + len++; + + if (compress_src && ether_addr_equal(eh->h_dest, s->eh.h_source)) + type |= 0x01; + + if (compress_dst && ether_addr_equal(eh->h_source, s->eh.h_dest)) + type |= 0x02; + + if (type) + skb_pull(skb, ETH_ALEN * 2); + + type = __bnep_tx_types[type]; + switch (type) { + case BNEP_COMPRESSED_SRC_ONLY: + iv[il++] = (struct kvec) { eh->h_source, ETH_ALEN }; + len += ETH_ALEN; + break; + + case BNEP_COMPRESSED_DST_ONLY: + iv[il++] = (struct kvec) { eh->h_dest, ETH_ALEN }; + len += ETH_ALEN; + break; + } + +send: + iv[il++] = (struct kvec) { skb->data, skb->len }; + len += skb->len; + + /* FIXME: linearize skb */ + { + len = kernel_sendmsg(sock, &s->msg, iv, il, len); + } + kfree_skb(skb); + + if (len > 0) { + s->dev->stats.tx_bytes += len; + s->dev->stats.tx_packets++; + return 0; + } + + return len; +} + +static int bnep_session(void *arg) +{ + struct bnep_session *s = arg; + struct net_device *dev = s->dev; + struct sock *sk = s->sock->sk; + struct sk_buff *skb; + DEFINE_WAIT_FUNC(wait, woken_wake_function); + + BT_DBG(""); + + set_user_nice(current, -15); + + add_wait_queue(sk_sleep(sk), &wait); + while (1) { + if (atomic_read(&s->terminate)) + break; + /* RX */ + while ((skb = skb_dequeue(&sk->sk_receive_queue))) { + skb_orphan(skb); + if (!skb_linearize(skb)) + bnep_rx_frame(s, skb); + else + kfree_skb(skb); + } + + if (sk->sk_state != BT_CONNECTED) + break; + + /* TX */ + while ((skb = skb_dequeue(&sk->sk_write_queue))) + if (bnep_tx_frame(s, skb)) + break; + netif_wake_queue(dev); + + /* + * wait_woken() performs the necessary memory barriers + * for us; see the header comment for this primitive. + */ + wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); + } + remove_wait_queue(sk_sleep(sk), &wait); + + /* Cleanup session */ + down_write(&bnep_session_sem); + + /* Delete network device */ + unregister_netdev(dev); + + /* Wakeup user-space polling for socket errors */ + s->sock->sk->sk_err = EUNATCH; + + wake_up_interruptible(sk_sleep(s->sock->sk)); + + /* Release the socket */ + fput(s->sock->file); + + __bnep_unlink_session(s); + + up_write(&bnep_session_sem); + free_netdev(dev); + module_put_and_kthread_exit(0); + return 0; +} + +static struct device *bnep_get_device(struct bnep_session *session) +{ + struct l2cap_conn *conn = l2cap_pi(session->sock->sk)->chan->conn; + + if (!conn || !conn->hcon) + return NULL; + + return &conn->hcon->dev; +} + +static struct device_type bnep_type = { + .name = "bluetooth", +}; + +int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock) +{ + u32 valid_flags = BIT(BNEP_SETUP_RESPONSE); + struct net_device *dev; + struct bnep_session *s, *ss; + u8 dst[ETH_ALEN], src[ETH_ALEN]; + int err; + + BT_DBG(""); + + if (!l2cap_is_socket(sock)) + return -EBADFD; + + if (req->flags & ~valid_flags) + return -EINVAL; + + baswap((void *) dst, &l2cap_pi(sock->sk)->chan->dst); + baswap((void *) src, &l2cap_pi(sock->sk)->chan->src); + + /* session struct allocated as private part of net_device */ + dev = alloc_netdev(sizeof(struct bnep_session), + (*req->device) ? req->device : "bnep%d", + NET_NAME_UNKNOWN, + bnep_net_setup); + if (!dev) + return -ENOMEM; + + down_write(&bnep_session_sem); + + ss = __bnep_get_session(dst); + if (ss && ss->state == BT_CONNECTED) { + err = -EEXIST; + goto failed; + } + + s = netdev_priv(dev); + + /* This is rx header therefore addresses are swapped. + * ie. eh.h_dest is our local address. */ + memcpy(s->eh.h_dest, &src, ETH_ALEN); + memcpy(s->eh.h_source, &dst, ETH_ALEN); + eth_hw_addr_set(dev, s->eh.h_dest); + + s->dev = dev; + s->sock = sock; + s->role = req->role; + s->state = BT_CONNECTED; + s->flags = req->flags; + + s->msg.msg_flags = MSG_NOSIGNAL; + +#ifdef CONFIG_BT_BNEP_MC_FILTER + /* Set default mc filter to not filter out any mc addresses + * as defined in the BNEP specification (revision 0.95a) + * http://grouper.ieee.org/groups/802/15/Bluetooth/BNEP.pdf + */ + s->mc_filter = ~0LL; +#endif + +#ifdef CONFIG_BT_BNEP_PROTO_FILTER + /* Set default protocol filter */ + bnep_set_default_proto_filter(s); +#endif + + SET_NETDEV_DEV(dev, bnep_get_device(s)); + SET_NETDEV_DEVTYPE(dev, &bnep_type); + + err = register_netdev(dev); + if (err) + goto failed; + + __bnep_link_session(s); + + __module_get(THIS_MODULE); + s->task = kthread_run(bnep_session, s, "kbnepd %s", dev->name); + if (IS_ERR(s->task)) { + /* Session thread start failed, gotta cleanup. */ + module_put(THIS_MODULE); + unregister_netdev(dev); + __bnep_unlink_session(s); + err = PTR_ERR(s->task); + goto failed; + } + + up_write(&bnep_session_sem); + strcpy(req->device, dev->name); + return 0; + +failed: + up_write(&bnep_session_sem); + free_netdev(dev); + return err; +} + +int bnep_del_connection(struct bnep_conndel_req *req) +{ + u32 valid_flags = 0; + struct bnep_session *s; + int err = 0; + + BT_DBG(""); + + if (req->flags & ~valid_flags) + return -EINVAL; + + down_read(&bnep_session_sem); + + s = __bnep_get_session(req->dst); + if (s) { + atomic_inc(&s->terminate); + wake_up_interruptible(sk_sleep(s->sock->sk)); + } else + err = -ENOENT; + + up_read(&bnep_session_sem); + return err; +} + +static void __bnep_copy_ci(struct bnep_conninfo *ci, struct bnep_session *s) +{ + u32 valid_flags = BIT(BNEP_SETUP_RESPONSE); + + memset(ci, 0, sizeof(*ci)); + memcpy(ci->dst, s->eh.h_source, ETH_ALEN); + strcpy(ci->device, s->dev->name); + ci->flags = s->flags & valid_flags; + ci->state = s->state; + ci->role = s->role; +} + +int bnep_get_connlist(struct bnep_connlist_req *req) +{ + struct bnep_session *s; + int err = 0, n = 0; + + down_read(&bnep_session_sem); + + list_for_each_entry(s, &bnep_session_list, list) { + struct bnep_conninfo ci; + + __bnep_copy_ci(&ci, s); + + if (copy_to_user(req->ci, &ci, sizeof(ci))) { + err = -EFAULT; + break; + } + + if (++n >= req->cnum) + break; + + req->ci++; + } + req->cnum = n; + + up_read(&bnep_session_sem); + return err; +} + +int bnep_get_conninfo(struct bnep_conninfo *ci) +{ + struct bnep_session *s; + int err = 0; + + down_read(&bnep_session_sem); + + s = __bnep_get_session(ci->dst); + if (s) + __bnep_copy_ci(ci, s); + else + err = -ENOENT; + + up_read(&bnep_session_sem); + return err; +} + +static int __init bnep_init(void) +{ + char flt[50] = ""; + +#ifdef CONFIG_BT_BNEP_PROTO_FILTER + strcat(flt, "protocol "); +#endif + +#ifdef CONFIG_BT_BNEP_MC_FILTER + strcat(flt, "multicast"); +#endif + + BT_INFO("BNEP (Ethernet Emulation) ver %s", VERSION); + if (flt[0]) + BT_INFO("BNEP filters: %s", flt); + + bnep_sock_init(); + return 0; +} + +static void __exit bnep_exit(void) +{ + bnep_sock_cleanup(); +} + +module_init(bnep_init); +module_exit(bnep_exit); + +module_param(compress_src, bool, 0644); +MODULE_PARM_DESC(compress_src, "Compress sources headers"); + +module_param(compress_dst, bool, 0644); +MODULE_PARM_DESC(compress_dst, "Compress destination headers"); + +MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>"); +MODULE_DESCRIPTION("Bluetooth BNEP ver " VERSION); +MODULE_VERSION(VERSION); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("bt-proto-4"); diff --git a/net/bluetooth/bnep/netdev.c b/net/bluetooth/bnep/netdev.c new file mode 100644 index 000000000..cc1cff631 --- /dev/null +++ b/net/bluetooth/bnep/netdev.c @@ -0,0 +1,230 @@ +/* + BNEP implementation for Linux Bluetooth stack (BlueZ). + Copyright (C) 2001-2002 Inventel Systemes + Written 2001-2002 by + Clément Moreau <clement.moreau@inventel.fr> + David Libault <david.libault@inventel.fr> + + Copyright (C) 2002 Maxim Krasnyansky <maxk@qualcomm.com> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License version 2 as + published by the Free Software Foundation; + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + SOFTWARE IS DISCLAIMED. +*/ + +#include <linux/etherdevice.h> + +#include <net/bluetooth/bluetooth.h> +#include <net/bluetooth/hci_core.h> +#include <net/bluetooth/l2cap.h> + +#include "bnep.h" + +#define BNEP_TX_QUEUE_LEN 20 + +static int bnep_net_open(struct net_device *dev) +{ + netif_start_queue(dev); + return 0; +} + +static int bnep_net_close(struct net_device *dev) +{ + netif_stop_queue(dev); + return 0; +} + +static void bnep_net_set_mc_list(struct net_device *dev) +{ +#ifdef CONFIG_BT_BNEP_MC_FILTER + struct bnep_session *s = netdev_priv(dev); + struct sock *sk = s->sock->sk; + struct bnep_set_filter_req *r; + struct sk_buff *skb; + int size; + + BT_DBG("%s mc_count %d", dev->name, netdev_mc_count(dev)); + + size = sizeof(*r) + (BNEP_MAX_MULTICAST_FILTERS + 1) * ETH_ALEN * 2; + skb = alloc_skb(size, GFP_ATOMIC); + if (!skb) { + BT_ERR("%s Multicast list allocation failed", dev->name); + return; + } + + r = (void *) skb->data; + __skb_put(skb, sizeof(*r)); + + r->type = BNEP_CONTROL; + r->ctrl = BNEP_FILTER_MULTI_ADDR_SET; + + if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) { + u8 start[ETH_ALEN] = { 0x01 }; + + /* Request all addresses */ + __skb_put_data(skb, start, ETH_ALEN); + __skb_put_data(skb, dev->broadcast, ETH_ALEN); + r->len = htons(ETH_ALEN * 2); + } else { + struct netdev_hw_addr *ha; + int i, len = skb->len; + + if (dev->flags & IFF_BROADCAST) { + __skb_put_data(skb, dev->broadcast, ETH_ALEN); + __skb_put_data(skb, dev->broadcast, ETH_ALEN); + } + + /* FIXME: We should group addresses here. */ + + i = 0; + netdev_for_each_mc_addr(ha, dev) { + if (i == BNEP_MAX_MULTICAST_FILTERS) + break; + __skb_put_data(skb, ha->addr, ETH_ALEN); + __skb_put_data(skb, ha->addr, ETH_ALEN); + + i++; + } + r->len = htons(skb->len - len); + } + + skb_queue_tail(&sk->sk_write_queue, skb); + wake_up_interruptible(sk_sleep(sk)); +#endif +} + +static int bnep_net_set_mac_addr(struct net_device *dev, void *arg) +{ + BT_DBG("%s", dev->name); + return 0; +} + +static void bnep_net_timeout(struct net_device *dev, unsigned int txqueue) +{ + BT_DBG("net_timeout"); + netif_wake_queue(dev); +} + +#ifdef CONFIG_BT_BNEP_MC_FILTER +static int bnep_net_mc_filter(struct sk_buff *skb, struct bnep_session *s) +{ + struct ethhdr *eh = (void *) skb->data; + + if ((eh->h_dest[0] & 1) && !test_bit(bnep_mc_hash(eh->h_dest), (ulong *) &s->mc_filter)) + return 1; + return 0; +} +#endif + +#ifdef CONFIG_BT_BNEP_PROTO_FILTER +/* Determine ether protocol. Based on eth_type_trans. */ +static u16 bnep_net_eth_proto(struct sk_buff *skb) +{ + struct ethhdr *eh = (void *) skb->data; + u16 proto = ntohs(eh->h_proto); + + if (proto >= ETH_P_802_3_MIN) + return proto; + + if (get_unaligned((__be16 *) skb->data) == htons(0xFFFF)) + return ETH_P_802_3; + + return ETH_P_802_2; +} + +static int bnep_net_proto_filter(struct sk_buff *skb, struct bnep_session *s) +{ + u16 proto = bnep_net_eth_proto(skb); + struct bnep_proto_filter *f = s->proto_filter; + int i; + + for (i = 0; i < BNEP_MAX_PROTO_FILTERS && f[i].end; i++) { + if (proto >= f[i].start && proto <= f[i].end) + return 0; + } + + BT_DBG("BNEP: filtered skb %p, proto 0x%.4x", skb, proto); + return 1; +} +#endif + +static netdev_tx_t bnep_net_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct bnep_session *s = netdev_priv(dev); + struct sock *sk = s->sock->sk; + + BT_DBG("skb %p, dev %p", skb, dev); + +#ifdef CONFIG_BT_BNEP_MC_FILTER + if (bnep_net_mc_filter(skb, s)) { + kfree_skb(skb); + return NETDEV_TX_OK; + } +#endif + +#ifdef CONFIG_BT_BNEP_PROTO_FILTER + if (bnep_net_proto_filter(skb, s)) { + kfree_skb(skb); + return NETDEV_TX_OK; + } +#endif + + /* + * We cannot send L2CAP packets from here as we are potentially in a bh. + * So we have to queue them and wake up session thread which is sleeping + * on the sk_sleep(sk). + */ + netif_trans_update(dev); + skb_queue_tail(&sk->sk_write_queue, skb); + wake_up_interruptible(sk_sleep(sk)); + + if (skb_queue_len(&sk->sk_write_queue) >= BNEP_TX_QUEUE_LEN) { + BT_DBG("tx queue is full"); + + /* Stop queuing. + * Session thread will do netif_wake_queue() */ + netif_stop_queue(dev); + } + + return NETDEV_TX_OK; +} + +static const struct net_device_ops bnep_netdev_ops = { + .ndo_open = bnep_net_open, + .ndo_stop = bnep_net_close, + .ndo_start_xmit = bnep_net_xmit, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_rx_mode = bnep_net_set_mc_list, + .ndo_set_mac_address = bnep_net_set_mac_addr, + .ndo_tx_timeout = bnep_net_timeout, + +}; + +void bnep_net_setup(struct net_device *dev) +{ + + eth_broadcast_addr(dev->broadcast); + dev->addr_len = ETH_ALEN; + + ether_setup(dev); + dev->min_mtu = 0; + dev->max_mtu = ETH_MAX_MTU; + dev->priv_flags &= ~IFF_TX_SKB_SHARING; + dev->netdev_ops = &bnep_netdev_ops; + + dev->watchdog_timeo = HZ * 2; +} diff --git a/net/bluetooth/bnep/sock.c b/net/bluetooth/bnep/sock.c new file mode 100644 index 000000000..57d509d77 --- /dev/null +++ b/net/bluetooth/bnep/sock.c @@ -0,0 +1,268 @@ +/* + BNEP implementation for Linux Bluetooth stack (BlueZ). + Copyright (C) 2001-2002 Inventel Systemes + Written 2001-2002 by + David Libault <david.libault@inventel.fr> + + Copyright (C) 2002 Maxim Krasnyansky <maxk@qualcomm.com> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License version 2 as + published by the Free Software Foundation; + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + SOFTWARE IS DISCLAIMED. +*/ + +#include <linux/compat.h> +#include <linux/export.h> +#include <linux/file.h> + +#include "bnep.h" + +static struct bt_sock_list bnep_sk_list = { + .lock = __RW_LOCK_UNLOCKED(bnep_sk_list.lock) +}; + +static int bnep_sock_release(struct socket *sock) +{ + struct sock *sk = sock->sk; + + BT_DBG("sock %p sk %p", sock, sk); + + if (!sk) + return 0; + + bt_sock_unlink(&bnep_sk_list, sk); + + sock_orphan(sk); + sock_put(sk); + return 0; +} + +static int do_bnep_sock_ioctl(struct socket *sock, unsigned int cmd, void __user *argp) +{ + struct bnep_connlist_req cl; + struct bnep_connadd_req ca; + struct bnep_conndel_req cd; + struct bnep_conninfo ci; + struct socket *nsock; + __u32 supp_feat = BIT(BNEP_SETUP_RESPONSE); + int err; + + BT_DBG("cmd %x arg %p", cmd, argp); + + switch (cmd) { + case BNEPCONNADD: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if (copy_from_user(&ca, argp, sizeof(ca))) + return -EFAULT; + + nsock = sockfd_lookup(ca.sock, &err); + if (!nsock) + return err; + + if (nsock->sk->sk_state != BT_CONNECTED) { + sockfd_put(nsock); + return -EBADFD; + } + ca.device[sizeof(ca.device)-1] = 0; + + err = bnep_add_connection(&ca, nsock); + if (!err) { + if (copy_to_user(argp, &ca, sizeof(ca))) + err = -EFAULT; + } else + sockfd_put(nsock); + + return err; + + case BNEPCONNDEL: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if (copy_from_user(&cd, argp, sizeof(cd))) + return -EFAULT; + + return bnep_del_connection(&cd); + + case BNEPGETCONNLIST: + if (copy_from_user(&cl, argp, sizeof(cl))) + return -EFAULT; + + if (cl.cnum <= 0) + return -EINVAL; + + err = bnep_get_connlist(&cl); + if (!err && copy_to_user(argp, &cl, sizeof(cl))) + return -EFAULT; + + return err; + + case BNEPGETCONNINFO: + if (copy_from_user(&ci, argp, sizeof(ci))) + return -EFAULT; + + err = bnep_get_conninfo(&ci); + if (!err && copy_to_user(argp, &ci, sizeof(ci))) + return -EFAULT; + + return err; + + case BNEPGETSUPPFEAT: + if (copy_to_user(argp, &supp_feat, sizeof(supp_feat))) + return -EFAULT; + + return 0; + + default: + return -EINVAL; + } + + return 0; +} + +static int bnep_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) +{ + return do_bnep_sock_ioctl(sock, cmd, (void __user *)arg); +} + +#ifdef CONFIG_COMPAT +static int bnep_sock_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) +{ + void __user *argp = compat_ptr(arg); + if (cmd == BNEPGETCONNLIST) { + struct bnep_connlist_req cl; + unsigned __user *p = argp; + u32 uci; + int err; + + if (get_user(cl.cnum, p) || get_user(uci, p + 1)) + return -EFAULT; + + cl.ci = compat_ptr(uci); + + if (cl.cnum <= 0) + return -EINVAL; + + err = bnep_get_connlist(&cl); + + if (!err && put_user(cl.cnum, p)) + err = -EFAULT; + + return err; + } + + return do_bnep_sock_ioctl(sock, cmd, argp); +} +#endif + +static const struct proto_ops bnep_sock_ops = { + .family = PF_BLUETOOTH, + .owner = THIS_MODULE, + .release = bnep_sock_release, + .ioctl = bnep_sock_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = bnep_sock_compat_ioctl, +#endif + .bind = sock_no_bind, + .getname = sock_no_getname, + .sendmsg = sock_no_sendmsg, + .recvmsg = sock_no_recvmsg, + .listen = sock_no_listen, + .shutdown = sock_no_shutdown, + .connect = sock_no_connect, + .socketpair = sock_no_socketpair, + .accept = sock_no_accept, + .mmap = sock_no_mmap +}; + +static struct proto bnep_proto = { + .name = "BNEP", + .owner = THIS_MODULE, + .obj_size = sizeof(struct bt_sock) +}; + +static int bnep_sock_create(struct net *net, struct socket *sock, int protocol, + int kern) +{ + struct sock *sk; + + BT_DBG("sock %p", sock); + + if (sock->type != SOCK_RAW) + return -ESOCKTNOSUPPORT; + + sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &bnep_proto, kern); + if (!sk) + return -ENOMEM; + + sock_init_data(sock, sk); + + sock->ops = &bnep_sock_ops; + + sock->state = SS_UNCONNECTED; + + sock_reset_flag(sk, SOCK_ZAPPED); + + sk->sk_protocol = protocol; + sk->sk_state = BT_OPEN; + + bt_sock_link(&bnep_sk_list, sk); + return 0; +} + +static const struct net_proto_family bnep_sock_family_ops = { + .family = PF_BLUETOOTH, + .owner = THIS_MODULE, + .create = bnep_sock_create +}; + +int __init bnep_sock_init(void) +{ + int err; + + err = proto_register(&bnep_proto, 0); + if (err < 0) + return err; + + err = bt_sock_register(BTPROTO_BNEP, &bnep_sock_family_ops); + if (err < 0) { + BT_ERR("Can't register BNEP socket"); + goto error; + } + + err = bt_procfs_init(&init_net, "bnep", &bnep_sk_list, NULL); + if (err < 0) { + BT_ERR("Failed to create BNEP proc file"); + bt_sock_unregister(BTPROTO_BNEP); + goto error; + } + + BT_INFO("BNEP socket layer initialized"); + + return 0; + +error: + proto_unregister(&bnep_proto); + return err; +} + +void __exit bnep_sock_cleanup(void) +{ + bt_procfs_cleanup(&init_net, "bnep"); + bt_sock_unregister(BTPROTO_BNEP); + proto_unregister(&bnep_proto); +} diff --git a/net/bluetooth/cmtp/Kconfig b/net/bluetooth/cmtp/Kconfig new file mode 100644 index 000000000..c8337786d --- /dev/null +++ b/net/bluetooth/cmtp/Kconfig @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-2.0-only +config BT_CMTP + tristate "CMTP protocol support" + depends on BT_BREDR && ISDN_CAPI + help + CMTP (CAPI Message Transport Protocol) is a transport layer + for CAPI messages. CMTP is required for the Bluetooth Common + ISDN Access Profile. + + Say Y here to compile CMTP support into the kernel or say M to + compile it as module (cmtp). + diff --git a/net/bluetooth/cmtp/Makefile b/net/bluetooth/cmtp/Makefile new file mode 100644 index 000000000..b2262ca97 --- /dev/null +++ b/net/bluetooth/cmtp/Makefile @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Makefile for the Linux Bluetooth CMTP layer +# + +obj-$(CONFIG_BT_CMTP) += cmtp.o + +cmtp-objs := core.o sock.o capi.o diff --git a/net/bluetooth/cmtp/capi.c b/net/bluetooth/cmtp/capi.c new file mode 100644 index 000000000..f3bedc3b6 --- /dev/null +++ b/net/bluetooth/cmtp/capi.c @@ -0,0 +1,595 @@ +/* + CMTP implementation for Linux Bluetooth stack (BlueZ). + Copyright (C) 2002-2003 Marcel Holtmann <marcel@holtmann.org> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License version 2 as + published by the Free Software Foundation; + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + SOFTWARE IS DISCLAIMED. +*/ + +#include <linux/export.h> +#include <linux/proc_fs.h> +#include <linux/seq_file.h> +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/sched/signal.h> +#include <linux/slab.h> +#include <linux/poll.h> +#include <linux/fcntl.h> +#include <linux/skbuff.h> +#include <linux/socket.h> +#include <linux/ioctl.h> +#include <linux/file.h> +#include <linux/wait.h> +#include <linux/kthread.h> +#include <net/sock.h> + +#include <linux/isdn/capilli.h> +#include <linux/isdn/capicmd.h> +#include <linux/isdn/capiutil.h> + +#include "cmtp.h" + +#define CAPI_INTEROPERABILITY 0x20 + +#define CAPI_INTEROPERABILITY_REQ CAPICMD(CAPI_INTEROPERABILITY, CAPI_REQ) +#define CAPI_INTEROPERABILITY_CONF CAPICMD(CAPI_INTEROPERABILITY, CAPI_CONF) +#define CAPI_INTEROPERABILITY_IND CAPICMD(CAPI_INTEROPERABILITY, CAPI_IND) +#define CAPI_INTEROPERABILITY_RESP CAPICMD(CAPI_INTEROPERABILITY, CAPI_RESP) + +#define CAPI_INTEROPERABILITY_REQ_LEN (CAPI_MSG_BASELEN + 2) +#define CAPI_INTEROPERABILITY_CONF_LEN (CAPI_MSG_BASELEN + 4) +#define CAPI_INTEROPERABILITY_IND_LEN (CAPI_MSG_BASELEN + 2) +#define CAPI_INTEROPERABILITY_RESP_LEN (CAPI_MSG_BASELEN + 2) + +#define CAPI_FUNCTION_REGISTER 0 +#define CAPI_FUNCTION_RELEASE 1 +#define CAPI_FUNCTION_GET_PROFILE 2 +#define CAPI_FUNCTION_GET_MANUFACTURER 3 +#define CAPI_FUNCTION_GET_VERSION 4 +#define CAPI_FUNCTION_GET_SERIAL_NUMBER 5 +#define CAPI_FUNCTION_MANUFACTURER 6 +#define CAPI_FUNCTION_LOOPBACK 7 + + +#define CMTP_MSGNUM 1 +#define CMTP_APPLID 2 +#define CMTP_MAPPING 3 + +static struct cmtp_application *cmtp_application_add(struct cmtp_session *session, __u16 appl) +{ + struct cmtp_application *app = kzalloc(sizeof(*app), GFP_KERNEL); + + BT_DBG("session %p application %p appl %u", session, app, appl); + + if (!app) + return NULL; + + app->state = BT_OPEN; + app->appl = appl; + + list_add_tail(&app->list, &session->applications); + + return app; +} + +static void cmtp_application_del(struct cmtp_session *session, struct cmtp_application *app) +{ + BT_DBG("session %p application %p", session, app); + + if (app) { + list_del(&app->list); + kfree(app); + } +} + +static struct cmtp_application *cmtp_application_get(struct cmtp_session *session, int pattern, __u16 value) +{ + struct cmtp_application *app; + + list_for_each_entry(app, &session->applications, list) { + switch (pattern) { + case CMTP_MSGNUM: + if (app->msgnum == value) + return app; + break; + case CMTP_APPLID: + if (app->appl == value) + return app; + break; + case CMTP_MAPPING: + if (app->mapping == value) + return app; + break; + } + } + + return NULL; +} + +static int cmtp_msgnum_get(struct cmtp_session *session) +{ + session->msgnum++; + + if ((session->msgnum & 0xff) > 200) + session->msgnum = CMTP_INITIAL_MSGNUM + 1; + + return session->msgnum; +} + +static void cmtp_send_capimsg(struct cmtp_session *session, struct sk_buff *skb) +{ + struct cmtp_scb *scb = (void *) skb->cb; + + BT_DBG("session %p skb %p len %u", session, skb, skb->len); + + scb->id = -1; + scb->data = (CAPIMSG_COMMAND(skb->data) == CAPI_DATA_B3); + + skb_queue_tail(&session->transmit, skb); + + wake_up_interruptible(sk_sleep(session->sock->sk)); +} + +static void cmtp_send_interopmsg(struct cmtp_session *session, + __u8 subcmd, __u16 appl, __u16 msgnum, + __u16 function, unsigned char *buf, int len) +{ + struct sk_buff *skb; + unsigned char *s; + + BT_DBG("session %p subcmd 0x%02x appl %u msgnum %u", session, subcmd, appl, msgnum); + + skb = alloc_skb(CAPI_MSG_BASELEN + 6 + len, GFP_ATOMIC); + if (!skb) { + BT_ERR("Can't allocate memory for interoperability packet"); + return; + } + + s = skb_put(skb, CAPI_MSG_BASELEN + 6 + len); + + capimsg_setu16(s, 0, CAPI_MSG_BASELEN + 6 + len); + capimsg_setu16(s, 2, appl); + capimsg_setu8 (s, 4, CAPI_INTEROPERABILITY); + capimsg_setu8 (s, 5, subcmd); + capimsg_setu16(s, 6, msgnum); + + /* Interoperability selector (Bluetooth Device Management) */ + capimsg_setu16(s, 8, 0x0001); + + capimsg_setu8 (s, 10, 3 + len); + capimsg_setu16(s, 11, function); + capimsg_setu8 (s, 13, len); + + if (len > 0) + memcpy(s + 14, buf, len); + + cmtp_send_capimsg(session, skb); +} + +static void cmtp_recv_interopmsg(struct cmtp_session *session, struct sk_buff *skb) +{ + struct capi_ctr *ctrl = &session->ctrl; + struct cmtp_application *application; + __u16 appl, msgnum, func, info; + __u32 controller; + + BT_DBG("session %p skb %p len %u", session, skb, skb->len); + + switch (CAPIMSG_SUBCOMMAND(skb->data)) { + case CAPI_CONF: + if (skb->len < CAPI_MSG_BASELEN + 10) + break; + + func = CAPIMSG_U16(skb->data, CAPI_MSG_BASELEN + 5); + info = CAPIMSG_U16(skb->data, CAPI_MSG_BASELEN + 8); + + switch (func) { + case CAPI_FUNCTION_REGISTER: + msgnum = CAPIMSG_MSGID(skb->data); + + application = cmtp_application_get(session, CMTP_MSGNUM, msgnum); + if (application) { + application->state = BT_CONNECTED; + application->msgnum = 0; + application->mapping = CAPIMSG_APPID(skb->data); + wake_up_interruptible(&session->wait); + } + + break; + + case CAPI_FUNCTION_RELEASE: + appl = CAPIMSG_APPID(skb->data); + + application = cmtp_application_get(session, CMTP_MAPPING, appl); + if (application) { + application->state = BT_CLOSED; + application->msgnum = 0; + wake_up_interruptible(&session->wait); + } + + break; + + case CAPI_FUNCTION_GET_PROFILE: + if (skb->len < CAPI_MSG_BASELEN + 11 + sizeof(capi_profile)) + break; + + controller = CAPIMSG_U16(skb->data, CAPI_MSG_BASELEN + 11); + msgnum = CAPIMSG_MSGID(skb->data); + + if (!info && (msgnum == CMTP_INITIAL_MSGNUM)) { + session->ncontroller = controller; + wake_up_interruptible(&session->wait); + break; + } + + if (!info && ctrl) { + memcpy(&ctrl->profile, + skb->data + CAPI_MSG_BASELEN + 11, + sizeof(capi_profile)); + session->state = BT_CONNECTED; + capi_ctr_ready(ctrl); + } + + break; + + case CAPI_FUNCTION_GET_MANUFACTURER: + if (skb->len < CAPI_MSG_BASELEN + 15) + break; + + if (!info && ctrl) { + int len = min_t(uint, CAPI_MANUFACTURER_LEN, + skb->data[CAPI_MSG_BASELEN + 14]); + + memset(ctrl->manu, 0, CAPI_MANUFACTURER_LEN); + strncpy(ctrl->manu, + skb->data + CAPI_MSG_BASELEN + 15, len); + } + + break; + + case CAPI_FUNCTION_GET_VERSION: + if (skb->len < CAPI_MSG_BASELEN + 32) + break; + + if (!info && ctrl) { + ctrl->version.majorversion = CAPIMSG_U32(skb->data, CAPI_MSG_BASELEN + 16); + ctrl->version.minorversion = CAPIMSG_U32(skb->data, CAPI_MSG_BASELEN + 20); + ctrl->version.majormanuversion = CAPIMSG_U32(skb->data, CAPI_MSG_BASELEN + 24); + ctrl->version.minormanuversion = CAPIMSG_U32(skb->data, CAPI_MSG_BASELEN + 28); + } + + break; + + case CAPI_FUNCTION_GET_SERIAL_NUMBER: + if (skb->len < CAPI_MSG_BASELEN + 17) + break; + + if (!info && ctrl) { + int len = min_t(uint, CAPI_SERIAL_LEN, + skb->data[CAPI_MSG_BASELEN + 16]); + + memset(ctrl->serial, 0, CAPI_SERIAL_LEN); + strncpy(ctrl->serial, + skb->data + CAPI_MSG_BASELEN + 17, len); + } + + break; + } + + break; + + case CAPI_IND: + if (skb->len < CAPI_MSG_BASELEN + 6) + break; + + func = CAPIMSG_U16(skb->data, CAPI_MSG_BASELEN + 3); + + if (func == CAPI_FUNCTION_LOOPBACK) { + int len = min_t(uint, skb->len - CAPI_MSG_BASELEN - 6, + skb->data[CAPI_MSG_BASELEN + 5]); + appl = CAPIMSG_APPID(skb->data); + msgnum = CAPIMSG_MSGID(skb->data); + cmtp_send_interopmsg(session, CAPI_RESP, appl, msgnum, func, + skb->data + CAPI_MSG_BASELEN + 6, len); + } + + break; + } + + kfree_skb(skb); +} + +void cmtp_recv_capimsg(struct cmtp_session *session, struct sk_buff *skb) +{ + struct capi_ctr *ctrl = &session->ctrl; + struct cmtp_application *application; + __u16 appl; + __u32 contr; + + BT_DBG("session %p skb %p len %u", session, skb, skb->len); + + if (skb->len < CAPI_MSG_BASELEN) + return; + + if (CAPIMSG_COMMAND(skb->data) == CAPI_INTEROPERABILITY) { + cmtp_recv_interopmsg(session, skb); + return; + } + + if (session->flags & BIT(CMTP_LOOPBACK)) { + kfree_skb(skb); + return; + } + + appl = CAPIMSG_APPID(skb->data); + contr = CAPIMSG_CONTROL(skb->data); + + application = cmtp_application_get(session, CMTP_MAPPING, appl); + if (application) { + appl = application->appl; + CAPIMSG_SETAPPID(skb->data, appl); + } else { + BT_ERR("Can't find application with id %u", appl); + kfree_skb(skb); + return; + } + + if ((contr & 0x7f) == 0x01) { + contr = (contr & 0xffffff80) | session->num; + CAPIMSG_SETCONTROL(skb->data, contr); + } + + capi_ctr_handle_message(ctrl, appl, skb); +} + +static int cmtp_load_firmware(struct capi_ctr *ctrl, capiloaddata *data) +{ + BT_DBG("ctrl %p data %p", ctrl, data); + + return 0; +} + +static void cmtp_reset_ctr(struct capi_ctr *ctrl) +{ + struct cmtp_session *session = ctrl->driverdata; + + BT_DBG("ctrl %p", ctrl); + + capi_ctr_down(ctrl); + + atomic_inc(&session->terminate); + wake_up_process(session->task); +} + +static void cmtp_register_appl(struct capi_ctr *ctrl, __u16 appl, capi_register_params *rp) +{ + DECLARE_WAITQUEUE(wait, current); + struct cmtp_session *session = ctrl->driverdata; + struct cmtp_application *application; + unsigned long timeo = CMTP_INTEROP_TIMEOUT; + unsigned char buf[8]; + int err = 0, nconn, want = rp->level3cnt; + + BT_DBG("ctrl %p appl %u level3cnt %u datablkcnt %u datablklen %u", + ctrl, appl, rp->level3cnt, rp->datablkcnt, rp->datablklen); + + application = cmtp_application_add(session, appl); + if (!application) { + BT_ERR("Can't allocate memory for new application"); + return; + } + + if (want < 0) + nconn = ctrl->profile.nbchannel * -want; + else + nconn = want; + + if (nconn == 0) + nconn = ctrl->profile.nbchannel; + + capimsg_setu16(buf, 0, nconn); + capimsg_setu16(buf, 2, rp->datablkcnt); + capimsg_setu16(buf, 4, rp->datablklen); + + application->state = BT_CONFIG; + application->msgnum = cmtp_msgnum_get(session); + + cmtp_send_interopmsg(session, CAPI_REQ, 0x0000, application->msgnum, + CAPI_FUNCTION_REGISTER, buf, 6); + + add_wait_queue(&session->wait, &wait); + while (1) { + set_current_state(TASK_INTERRUPTIBLE); + + if (!timeo) { + err = -EAGAIN; + break; + } + + if (application->state == BT_CLOSED) { + err = -application->err; + break; + } + + if (application->state == BT_CONNECTED) + break; + + if (signal_pending(current)) { + err = -EINTR; + break; + } + + timeo = schedule_timeout(timeo); + } + set_current_state(TASK_RUNNING); + remove_wait_queue(&session->wait, &wait); + + if (err) { + cmtp_application_del(session, application); + return; + } +} + +static void cmtp_release_appl(struct capi_ctr *ctrl, __u16 appl) +{ + struct cmtp_session *session = ctrl->driverdata; + struct cmtp_application *application; + + BT_DBG("ctrl %p appl %u", ctrl, appl); + + application = cmtp_application_get(session, CMTP_APPLID, appl); + if (!application) { + BT_ERR("Can't find application"); + return; + } + + application->msgnum = cmtp_msgnum_get(session); + + cmtp_send_interopmsg(session, CAPI_REQ, application->mapping, application->msgnum, + CAPI_FUNCTION_RELEASE, NULL, 0); + + wait_event_interruptible_timeout(session->wait, + (application->state == BT_CLOSED), CMTP_INTEROP_TIMEOUT); + + cmtp_application_del(session, application); +} + +static u16 cmtp_send_message(struct capi_ctr *ctrl, struct sk_buff *skb) +{ + struct cmtp_session *session = ctrl->driverdata; + struct cmtp_application *application; + __u16 appl; + __u32 contr; + + BT_DBG("ctrl %p skb %p", ctrl, skb); + + appl = CAPIMSG_APPID(skb->data); + contr = CAPIMSG_CONTROL(skb->data); + + application = cmtp_application_get(session, CMTP_APPLID, appl); + if ((!application) || (application->state != BT_CONNECTED)) { + BT_ERR("Can't find application with id %u", appl); + return CAPI_ILLAPPNR; + } + + CAPIMSG_SETAPPID(skb->data, application->mapping); + + if ((contr & 0x7f) == session->num) { + contr = (contr & 0xffffff80) | 0x01; + CAPIMSG_SETCONTROL(skb->data, contr); + } + + cmtp_send_capimsg(session, skb); + + return CAPI_NOERROR; +} + +static char *cmtp_procinfo(struct capi_ctr *ctrl) +{ + return "CAPI Message Transport Protocol"; +} + +static int cmtp_proc_show(struct seq_file *m, void *v) +{ + struct capi_ctr *ctrl = m->private; + struct cmtp_session *session = ctrl->driverdata; + struct cmtp_application *app; + + seq_printf(m, "%s\n\n", cmtp_procinfo(ctrl)); + seq_printf(m, "addr %s\n", session->name); + seq_printf(m, "ctrl %d\n", session->num); + + list_for_each_entry(app, &session->applications, list) { + seq_printf(m, "appl %u -> %u\n", app->appl, app->mapping); + } + + return 0; +} + +int cmtp_attach_device(struct cmtp_session *session) +{ + unsigned char buf[4]; + long ret; + + BT_DBG("session %p", session); + + capimsg_setu32(buf, 0, 0); + + cmtp_send_interopmsg(session, CAPI_REQ, 0xffff, CMTP_INITIAL_MSGNUM, + CAPI_FUNCTION_GET_PROFILE, buf, 4); + + ret = wait_event_interruptible_timeout(session->wait, + session->ncontroller, CMTP_INTEROP_TIMEOUT); + + BT_INFO("Found %d CAPI controller(s) on device %s", session->ncontroller, session->name); + + if (!ret) + return -ETIMEDOUT; + + if (!session->ncontroller) + return -ENODEV; + + if (session->ncontroller > 1) + BT_INFO("Setting up only CAPI controller 1"); + + session->ctrl.owner = THIS_MODULE; + session->ctrl.driverdata = session; + strcpy(session->ctrl.name, session->name); + + session->ctrl.driver_name = "cmtp"; + session->ctrl.load_firmware = cmtp_load_firmware; + session->ctrl.reset_ctr = cmtp_reset_ctr; + session->ctrl.register_appl = cmtp_register_appl; + session->ctrl.release_appl = cmtp_release_appl; + session->ctrl.send_message = cmtp_send_message; + + session->ctrl.procinfo = cmtp_procinfo; + session->ctrl.proc_show = cmtp_proc_show; + + if (attach_capi_ctr(&session->ctrl) < 0) { + BT_ERR("Can't attach new controller"); + return -EBUSY; + } + + session->num = session->ctrl.cnr; + + BT_DBG("session %p num %d", session, session->num); + + capimsg_setu32(buf, 0, 1); + + cmtp_send_interopmsg(session, CAPI_REQ, 0xffff, cmtp_msgnum_get(session), + CAPI_FUNCTION_GET_MANUFACTURER, buf, 4); + + cmtp_send_interopmsg(session, CAPI_REQ, 0xffff, cmtp_msgnum_get(session), + CAPI_FUNCTION_GET_VERSION, buf, 4); + + cmtp_send_interopmsg(session, CAPI_REQ, 0xffff, cmtp_msgnum_get(session), + CAPI_FUNCTION_GET_SERIAL_NUMBER, buf, 4); + + cmtp_send_interopmsg(session, CAPI_REQ, 0xffff, cmtp_msgnum_get(session), + CAPI_FUNCTION_GET_PROFILE, buf, 4); + + return 0; +} + +void cmtp_detach_device(struct cmtp_session *session) +{ + BT_DBG("session %p", session); + + detach_capi_ctr(&session->ctrl); +} diff --git a/net/bluetooth/cmtp/cmtp.h b/net/bluetooth/cmtp/cmtp.h new file mode 100644 index 000000000..f6b9dc4e4 --- /dev/null +++ b/net/bluetooth/cmtp/cmtp.h @@ -0,0 +1,129 @@ +/* + CMTP implementation for Linux Bluetooth stack (BlueZ). + Copyright (C) 2002-2003 Marcel Holtmann <marcel@holtmann.org> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License version 2 as + published by the Free Software Foundation; + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + SOFTWARE IS DISCLAIMED. +*/ + +#ifndef __CMTP_H +#define __CMTP_H + +#include <linux/types.h> +#include <net/bluetooth/bluetooth.h> + +#define BTNAMSIZ 21 + +/* CMTP ioctl defines */ +#define CMTPCONNADD _IOW('C', 200, int) +#define CMTPCONNDEL _IOW('C', 201, int) +#define CMTPGETCONNLIST _IOR('C', 210, int) +#define CMTPGETCONNINFO _IOR('C', 211, int) + +#define CMTP_LOOPBACK 0 + +struct cmtp_connadd_req { + int sock; /* Connected socket */ + __u32 flags; +}; + +struct cmtp_conndel_req { + bdaddr_t bdaddr; + __u32 flags; +}; + +struct cmtp_conninfo { + bdaddr_t bdaddr; + __u32 flags; + __u16 state; + int num; +}; + +struct cmtp_connlist_req { + __u32 cnum; + struct cmtp_conninfo __user *ci; +}; + +int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock); +int cmtp_del_connection(struct cmtp_conndel_req *req); +int cmtp_get_connlist(struct cmtp_connlist_req *req); +int cmtp_get_conninfo(struct cmtp_conninfo *ci); + +/* CMTP session defines */ +#define CMTP_INTEROP_TIMEOUT (HZ * 5) +#define CMTP_INITIAL_MSGNUM 0xff00 + +struct cmtp_session { + struct list_head list; + + struct socket *sock; + + bdaddr_t bdaddr; + + unsigned long state; + unsigned long flags; + + uint mtu; + + char name[BTNAMSIZ]; + + atomic_t terminate; + struct task_struct *task; + + wait_queue_head_t wait; + + int ncontroller; + int num; + struct capi_ctr ctrl; + + struct list_head applications; + + unsigned long blockids; + int msgnum; + + struct sk_buff_head transmit; + + struct sk_buff *reassembly[16]; +}; + +struct cmtp_application { + struct list_head list; + + unsigned long state; + int err; + + __u16 appl; + __u16 mapping; + + __u16 msgnum; +}; + +struct cmtp_scb { + int id; + int data; +}; + +int cmtp_attach_device(struct cmtp_session *session); +void cmtp_detach_device(struct cmtp_session *session); + +void cmtp_recv_capimsg(struct cmtp_session *session, struct sk_buff *skb); + +/* CMTP init defines */ +int cmtp_init_sockets(void); +void cmtp_cleanup_sockets(void); + +#endif /* __CMTP_H */ diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c new file mode 100644 index 000000000..90d130588 --- /dev/null +++ b/net/bluetooth/cmtp/core.c @@ -0,0 +1,519 @@ +/* + CMTP implementation for Linux Bluetooth stack (BlueZ). + Copyright (C) 2002-2003 Marcel Holtmann <marcel@holtmann.org> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License version 2 as + published by the Free Software Foundation; + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + SOFTWARE IS DISCLAIMED. +*/ + +#include <linux/module.h> + +#include <linux/types.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/poll.h> +#include <linux/fcntl.h> +#include <linux/freezer.h> +#include <linux/skbuff.h> +#include <linux/socket.h> +#include <linux/ioctl.h> +#include <linux/file.h> +#include <linux/init.h> +#include <linux/kthread.h> +#include <net/sock.h> + +#include <linux/isdn/capilli.h> + +#include <net/bluetooth/bluetooth.h> +#include <net/bluetooth/l2cap.h> + +#include "cmtp.h" + +#define VERSION "1.0" + +static DECLARE_RWSEM(cmtp_session_sem); +static LIST_HEAD(cmtp_session_list); + +static struct cmtp_session *__cmtp_get_session(bdaddr_t *bdaddr) +{ + struct cmtp_session *session; + + BT_DBG(""); + + list_for_each_entry(session, &cmtp_session_list, list) + if (!bacmp(bdaddr, &session->bdaddr)) + return session; + + return NULL; +} + +static void __cmtp_link_session(struct cmtp_session *session) +{ + list_add(&session->list, &cmtp_session_list); +} + +static void __cmtp_unlink_session(struct cmtp_session *session) +{ + list_del(&session->list); +} + +static void __cmtp_copy_session(struct cmtp_session *session, struct cmtp_conninfo *ci) +{ + u32 valid_flags = BIT(CMTP_LOOPBACK); + memset(ci, 0, sizeof(*ci)); + bacpy(&ci->bdaddr, &session->bdaddr); + + ci->flags = session->flags & valid_flags; + ci->state = session->state; + + ci->num = session->num; +} + + +static inline int cmtp_alloc_block_id(struct cmtp_session *session) +{ + int i, id = -1; + + for (i = 0; i < 16; i++) + if (!test_and_set_bit(i, &session->blockids)) { + id = i; + break; + } + + return id; +} + +static inline void cmtp_free_block_id(struct cmtp_session *session, int id) +{ + clear_bit(id, &session->blockids); +} + +static inline void cmtp_add_msgpart(struct cmtp_session *session, int id, const unsigned char *buf, int count) +{ + struct sk_buff *skb = session->reassembly[id], *nskb; + int size; + + BT_DBG("session %p buf %p count %d", session, buf, count); + + size = (skb) ? skb->len + count : count; + + nskb = alloc_skb(size, GFP_ATOMIC); + if (!nskb) { + BT_ERR("Can't allocate memory for CAPI message"); + return; + } + + if (skb && (skb->len > 0)) + skb_copy_from_linear_data(skb, skb_put(nskb, skb->len), skb->len); + + skb_put_data(nskb, buf, count); + + session->reassembly[id] = nskb; + + kfree_skb(skb); +} + +static inline int cmtp_recv_frame(struct cmtp_session *session, struct sk_buff *skb) +{ + __u8 hdr, hdrlen, id; + __u16 len; + + BT_DBG("session %p skb %p len %d", session, skb, skb->len); + + while (skb->len > 0) { + hdr = skb->data[0]; + + switch (hdr & 0xc0) { + case 0x40: + hdrlen = 2; + len = skb->data[1]; + break; + case 0x80: + hdrlen = 3; + len = skb->data[1] | (skb->data[2] << 8); + break; + default: + hdrlen = 1; + len = 0; + break; + } + + id = (hdr & 0x3c) >> 2; + + BT_DBG("hdr 0x%02x hdrlen %d len %d id %d", hdr, hdrlen, len, id); + + if (hdrlen + len > skb->len) { + BT_ERR("Wrong size or header information in CMTP frame"); + break; + } + + if (len == 0) { + skb_pull(skb, hdrlen); + continue; + } + + switch (hdr & 0x03) { + case 0x00: + cmtp_add_msgpart(session, id, skb->data + hdrlen, len); + cmtp_recv_capimsg(session, session->reassembly[id]); + session->reassembly[id] = NULL; + break; + case 0x01: + cmtp_add_msgpart(session, id, skb->data + hdrlen, len); + break; + default: + kfree_skb(session->reassembly[id]); + session->reassembly[id] = NULL; + break; + } + + skb_pull(skb, hdrlen + len); + } + + kfree_skb(skb); + return 0; +} + +static int cmtp_send_frame(struct cmtp_session *session, unsigned char *data, int len) +{ + struct socket *sock = session->sock; + struct kvec iv = { data, len }; + struct msghdr msg; + + BT_DBG("session %p data %p len %d", session, data, len); + + if (!len) + return 0; + + memset(&msg, 0, sizeof(msg)); + + return kernel_sendmsg(sock, &msg, &iv, 1, len); +} + +static void cmtp_process_transmit(struct cmtp_session *session) +{ + struct sk_buff *skb, *nskb; + unsigned char *hdr; + unsigned int size, tail; + + BT_DBG("session %p", session); + + nskb = alloc_skb(session->mtu, GFP_ATOMIC); + if (!nskb) { + BT_ERR("Can't allocate memory for new frame"); + return; + } + + while ((skb = skb_dequeue(&session->transmit))) { + struct cmtp_scb *scb = (void *) skb->cb; + + tail = session->mtu - nskb->len; + if (tail < 5) { + cmtp_send_frame(session, nskb->data, nskb->len); + skb_trim(nskb, 0); + tail = session->mtu; + } + + size = min_t(uint, ((tail < 258) ? (tail - 2) : (tail - 3)), skb->len); + + if (scb->id < 0) { + scb->id = cmtp_alloc_block_id(session); + if (scb->id < 0) { + skb_queue_head(&session->transmit, skb); + break; + } + } + + if (size < 256) { + hdr = skb_put(nskb, 2); + hdr[0] = 0x40 + | ((scb->id << 2) & 0x3c) + | ((skb->len == size) ? 0x00 : 0x01); + hdr[1] = size; + } else { + hdr = skb_put(nskb, 3); + hdr[0] = 0x80 + | ((scb->id << 2) & 0x3c) + | ((skb->len == size) ? 0x00 : 0x01); + hdr[1] = size & 0xff; + hdr[2] = size >> 8; + } + + skb_copy_from_linear_data(skb, skb_put(nskb, size), size); + skb_pull(skb, size); + + if (skb->len > 0) { + skb_queue_head(&session->transmit, skb); + } else { + cmtp_free_block_id(session, scb->id); + if (scb->data) { + cmtp_send_frame(session, nskb->data, nskb->len); + skb_trim(nskb, 0); + } + kfree_skb(skb); + } + } + + cmtp_send_frame(session, nskb->data, nskb->len); + + kfree_skb(nskb); +} + +static int cmtp_session(void *arg) +{ + struct cmtp_session *session = arg; + struct sock *sk = session->sock->sk; + struct sk_buff *skb; + DEFINE_WAIT_FUNC(wait, woken_wake_function); + + BT_DBG("session %p", session); + + set_user_nice(current, -15); + + add_wait_queue(sk_sleep(sk), &wait); + while (1) { + if (atomic_read(&session->terminate)) + break; + if (sk->sk_state != BT_CONNECTED) + break; + + while ((skb = skb_dequeue(&sk->sk_receive_queue))) { + skb_orphan(skb); + if (!skb_linearize(skb)) + cmtp_recv_frame(session, skb); + else + kfree_skb(skb); + } + + cmtp_process_transmit(session); + + /* + * wait_woken() performs the necessary memory barriers + * for us; see the header comment for this primitive. + */ + wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); + } + remove_wait_queue(sk_sleep(sk), &wait); + + down_write(&cmtp_session_sem); + + if (!(session->flags & BIT(CMTP_LOOPBACK))) + cmtp_detach_device(session); + + fput(session->sock->file); + + __cmtp_unlink_session(session); + + up_write(&cmtp_session_sem); + + kfree(session); + module_put_and_kthread_exit(0); + return 0; +} + +int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock) +{ + u32 valid_flags = BIT(CMTP_LOOPBACK); + struct cmtp_session *session, *s; + int i, err; + + BT_DBG(""); + + if (!l2cap_is_socket(sock)) + return -EBADFD; + + if (req->flags & ~valid_flags) + return -EINVAL; + + session = kzalloc(sizeof(struct cmtp_session), GFP_KERNEL); + if (!session) + return -ENOMEM; + + down_write(&cmtp_session_sem); + + s = __cmtp_get_session(&l2cap_pi(sock->sk)->chan->dst); + if (s && s->state == BT_CONNECTED) { + err = -EEXIST; + goto failed; + } + + bacpy(&session->bdaddr, &l2cap_pi(sock->sk)->chan->dst); + + session->mtu = min_t(uint, l2cap_pi(sock->sk)->chan->omtu, + l2cap_pi(sock->sk)->chan->imtu); + + BT_DBG("mtu %d", session->mtu); + + sprintf(session->name, "%pMR", &session->bdaddr); + + session->sock = sock; + session->state = BT_CONFIG; + + init_waitqueue_head(&session->wait); + + session->msgnum = CMTP_INITIAL_MSGNUM; + + INIT_LIST_HEAD(&session->applications); + + skb_queue_head_init(&session->transmit); + + for (i = 0; i < 16; i++) + session->reassembly[i] = NULL; + + session->flags = req->flags; + + __cmtp_link_session(session); + + __module_get(THIS_MODULE); + session->task = kthread_run(cmtp_session, session, "kcmtpd_ctr_%d", + session->num); + if (IS_ERR(session->task)) { + module_put(THIS_MODULE); + err = PTR_ERR(session->task); + goto unlink; + } + + if (!(session->flags & BIT(CMTP_LOOPBACK))) { + err = cmtp_attach_device(session); + if (err < 0) { + /* Caller will call fput in case of failure, and so + * will cmtp_session kthread. + */ + get_file(session->sock->file); + + atomic_inc(&session->terminate); + wake_up_interruptible(sk_sleep(session->sock->sk)); + up_write(&cmtp_session_sem); + return err; + } + } + + up_write(&cmtp_session_sem); + return 0; + +unlink: + __cmtp_unlink_session(session); + +failed: + up_write(&cmtp_session_sem); + kfree(session); + return err; +} + +int cmtp_del_connection(struct cmtp_conndel_req *req) +{ + u32 valid_flags = 0; + struct cmtp_session *session; + int err = 0; + + BT_DBG(""); + + if (req->flags & ~valid_flags) + return -EINVAL; + + down_read(&cmtp_session_sem); + + session = __cmtp_get_session(&req->bdaddr); + if (session) { + /* Flush the transmit queue */ + skb_queue_purge(&session->transmit); + + /* Stop session thread */ + atomic_inc(&session->terminate); + + /* + * See the comment preceding the call to wait_woken() + * in cmtp_session(). + */ + wake_up_interruptible(sk_sleep(session->sock->sk)); + } else + err = -ENOENT; + + up_read(&cmtp_session_sem); + return err; +} + +int cmtp_get_connlist(struct cmtp_connlist_req *req) +{ + struct cmtp_session *session; + int err = 0, n = 0; + + BT_DBG(""); + + down_read(&cmtp_session_sem); + + list_for_each_entry(session, &cmtp_session_list, list) { + struct cmtp_conninfo ci; + + __cmtp_copy_session(session, &ci); + + if (copy_to_user(req->ci, &ci, sizeof(ci))) { + err = -EFAULT; + break; + } + + if (++n >= req->cnum) + break; + + req->ci++; + } + req->cnum = n; + + up_read(&cmtp_session_sem); + return err; +} + +int cmtp_get_conninfo(struct cmtp_conninfo *ci) +{ + struct cmtp_session *session; + int err = 0; + + down_read(&cmtp_session_sem); + + session = __cmtp_get_session(&ci->bdaddr); + if (session) + __cmtp_copy_session(session, ci); + else + err = -ENOENT; + + up_read(&cmtp_session_sem); + return err; +} + + +static int __init cmtp_init(void) +{ + BT_INFO("CMTP (CAPI Emulation) ver %s", VERSION); + + return cmtp_init_sockets(); +} + +static void __exit cmtp_exit(void) +{ + cmtp_cleanup_sockets(); +} + +module_init(cmtp_init); +module_exit(cmtp_exit); + +MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>"); +MODULE_DESCRIPTION("Bluetooth CMTP ver " VERSION); +MODULE_VERSION(VERSION); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("bt-proto-5"); diff --git a/net/bluetooth/cmtp/sock.c b/net/bluetooth/cmtp/sock.c new file mode 100644 index 000000000..96d49d9fa --- /dev/null +++ b/net/bluetooth/cmtp/sock.c @@ -0,0 +1,271 @@ +/* + CMTP implementation for Linux Bluetooth stack (BlueZ). + Copyright (C) 2002-2003 Marcel Holtmann <marcel@holtmann.org> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License version 2 as + published by the Free Software Foundation; + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + SOFTWARE IS DISCLAIMED. +*/ + +#include <linux/export.h> + +#include <linux/types.h> +#include <linux/capability.h> +#include <linux/errno.h> +#include <linux/kernel.h> +#include <linux/poll.h> +#include <linux/fcntl.h> +#include <linux/skbuff.h> +#include <linux/socket.h> +#include <linux/ioctl.h> +#include <linux/file.h> +#include <linux/compat.h> +#include <linux/gfp.h> +#include <linux/uaccess.h> +#include <net/sock.h> + +#include <linux/isdn/capilli.h> + + +#include "cmtp.h" + +static struct bt_sock_list cmtp_sk_list = { + .lock = __RW_LOCK_UNLOCKED(cmtp_sk_list.lock) +}; + +static int cmtp_sock_release(struct socket *sock) +{ + struct sock *sk = sock->sk; + + BT_DBG("sock %p sk %p", sock, sk); + + if (!sk) + return 0; + + bt_sock_unlink(&cmtp_sk_list, sk); + + sock_orphan(sk); + sock_put(sk); + + return 0; +} + +static int do_cmtp_sock_ioctl(struct socket *sock, unsigned int cmd, void __user *argp) +{ + struct cmtp_connadd_req ca; + struct cmtp_conndel_req cd; + struct cmtp_connlist_req cl; + struct cmtp_conninfo ci; + struct socket *nsock; + int err; + + BT_DBG("cmd %x arg %p", cmd, argp); + + switch (cmd) { + case CMTPCONNADD: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if (copy_from_user(&ca, argp, sizeof(ca))) + return -EFAULT; + + nsock = sockfd_lookup(ca.sock, &err); + if (!nsock) + return err; + + if (nsock->sk->sk_state != BT_CONNECTED) { + sockfd_put(nsock); + return -EBADFD; + } + + err = cmtp_add_connection(&ca, nsock); + if (!err) { + if (copy_to_user(argp, &ca, sizeof(ca))) + err = -EFAULT; + } else + sockfd_put(nsock); + + return err; + + case CMTPCONNDEL: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if (copy_from_user(&cd, argp, sizeof(cd))) + return -EFAULT; + + return cmtp_del_connection(&cd); + + case CMTPGETCONNLIST: + if (copy_from_user(&cl, argp, sizeof(cl))) + return -EFAULT; + + if (cl.cnum <= 0) + return -EINVAL; + + err = cmtp_get_connlist(&cl); + if (!err && copy_to_user(argp, &cl, sizeof(cl))) + return -EFAULT; + + return err; + + case CMTPGETCONNINFO: + if (copy_from_user(&ci, argp, sizeof(ci))) + return -EFAULT; + + err = cmtp_get_conninfo(&ci); + if (!err && copy_to_user(argp, &ci, sizeof(ci))) + return -EFAULT; + + return err; + } + + return -EINVAL; +} + +static int cmtp_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) +{ + return do_cmtp_sock_ioctl(sock, cmd, (void __user *)arg); +} + +#ifdef CONFIG_COMPAT +static int cmtp_sock_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) +{ + void __user *argp = compat_ptr(arg); + if (cmd == CMTPGETCONNLIST) { + struct cmtp_connlist_req cl; + u32 __user *p = argp; + u32 uci; + int err; + + if (get_user(cl.cnum, p) || get_user(uci, p + 1)) + return -EFAULT; + + cl.ci = compat_ptr(uci); + + if (cl.cnum <= 0) + return -EINVAL; + + err = cmtp_get_connlist(&cl); + + if (!err && put_user(cl.cnum, p)) + err = -EFAULT; + + return err; + } + + return do_cmtp_sock_ioctl(sock, cmd, argp); +} +#endif + +static const struct proto_ops cmtp_sock_ops = { + .family = PF_BLUETOOTH, + .owner = THIS_MODULE, + .release = cmtp_sock_release, + .ioctl = cmtp_sock_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = cmtp_sock_compat_ioctl, +#endif + .bind = sock_no_bind, + .getname = sock_no_getname, + .sendmsg = sock_no_sendmsg, + .recvmsg = sock_no_recvmsg, + .listen = sock_no_listen, + .shutdown = sock_no_shutdown, + .connect = sock_no_connect, + .socketpair = sock_no_socketpair, + .accept = sock_no_accept, + .mmap = sock_no_mmap +}; + +static struct proto cmtp_proto = { + .name = "CMTP", + .owner = THIS_MODULE, + .obj_size = sizeof(struct bt_sock) +}; + +static int cmtp_sock_create(struct net *net, struct socket *sock, int protocol, + int kern) +{ + struct sock *sk; + + BT_DBG("sock %p", sock); + + if (sock->type != SOCK_RAW) + return -ESOCKTNOSUPPORT; + + sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &cmtp_proto, kern); + if (!sk) + return -ENOMEM; + + sock_init_data(sock, sk); + + sock->ops = &cmtp_sock_ops; + + sock->state = SS_UNCONNECTED; + + sock_reset_flag(sk, SOCK_ZAPPED); + + sk->sk_protocol = protocol; + sk->sk_state = BT_OPEN; + + bt_sock_link(&cmtp_sk_list, sk); + + return 0; +} + +static const struct net_proto_family cmtp_sock_family_ops = { + .family = PF_BLUETOOTH, + .owner = THIS_MODULE, + .create = cmtp_sock_create +}; + +int cmtp_init_sockets(void) +{ + int err; + + err = proto_register(&cmtp_proto, 0); + if (err < 0) + return err; + + err = bt_sock_register(BTPROTO_CMTP, &cmtp_sock_family_ops); + if (err < 0) { + BT_ERR("Can't register CMTP socket"); + goto error; + } + + err = bt_procfs_init(&init_net, "cmtp", &cmtp_sk_list, NULL); + if (err < 0) { + BT_ERR("Failed to create CMTP proc file"); + bt_sock_unregister(BTPROTO_HIDP); + goto error; + } + + BT_INFO("CMTP socket layer initialized"); + + return 0; + +error: + proto_unregister(&cmtp_proto); + return err; +} + +void cmtp_cleanup_sockets(void) +{ + bt_procfs_cleanup(&init_net, "cmtp"); + bt_sock_unregister(BTPROTO_CMTP); + proto_unregister(&cmtp_proto); +} diff --git a/net/bluetooth/ecdh_helper.c b/net/bluetooth/ecdh_helper.c new file mode 100644 index 000000000..989401f11 --- /dev/null +++ b/net/bluetooth/ecdh_helper.c @@ -0,0 +1,228 @@ +/* + * ECDH helper functions - KPP wrappings + * + * Copyright (C) 2017 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation; + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + * IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY + * CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + * COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + * SOFTWARE IS DISCLAIMED. + */ +#include "ecdh_helper.h" + +#include <linux/scatterlist.h> +#include <crypto/ecdh.h> + +struct ecdh_completion { + struct completion completion; + int err; +}; + +static void ecdh_complete(struct crypto_async_request *req, int err) +{ + struct ecdh_completion *res = req->data; + + if (err == -EINPROGRESS) + return; + + res->err = err; + complete(&res->completion); +} + +static inline void swap_digits(u64 *in, u64 *out, unsigned int ndigits) +{ + int i; + + for (i = 0; i < ndigits; i++) + out[i] = __swab64(in[ndigits - 1 - i]); +} + +/* compute_ecdh_secret() - function assumes that the private key was + * already set. + * @tfm: KPP tfm handle allocated with crypto_alloc_kpp(). + * @public_key: pair's ecc public key. + * secret: memory where the ecdh computed shared secret will be saved. + * + * Return: zero on success; error code in case of error. + */ +int compute_ecdh_secret(struct crypto_kpp *tfm, const u8 public_key[64], + u8 secret[32]) +{ + struct kpp_request *req; + u8 *tmp; + struct ecdh_completion result; + struct scatterlist src, dst; + int err; + + tmp = kmalloc(64, GFP_KERNEL); + if (!tmp) + return -ENOMEM; + + req = kpp_request_alloc(tfm, GFP_KERNEL); + if (!req) { + err = -ENOMEM; + goto free_tmp; + } + + init_completion(&result.completion); + + swap_digits((u64 *)public_key, (u64 *)tmp, 4); /* x */ + swap_digits((u64 *)&public_key[32], (u64 *)&tmp[32], 4); /* y */ + + sg_init_one(&src, tmp, 64); + sg_init_one(&dst, secret, 32); + kpp_request_set_input(req, &src, 64); + kpp_request_set_output(req, &dst, 32); + kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, + ecdh_complete, &result); + err = crypto_kpp_compute_shared_secret(req); + if (err == -EINPROGRESS) { + wait_for_completion(&result.completion); + err = result.err; + } + if (err < 0) { + pr_err("alg: ecdh: compute shared secret failed. err %d\n", + err); + goto free_all; + } + + swap_digits((u64 *)secret, (u64 *)tmp, 4); + memcpy(secret, tmp, 32); + +free_all: + kpp_request_free(req); +free_tmp: + kfree_sensitive(tmp); + return err; +} + +/* set_ecdh_privkey() - set or generate ecc private key. + * + * Function generates an ecc private key in the crypto subsystem when receiving + * a NULL private key or sets the received key when not NULL. + * + * @tfm: KPP tfm handle allocated with crypto_alloc_kpp(). + * @private_key: user's ecc private key. When not NULL, the key is expected + * in little endian format. + * + * Return: zero on success; error code in case of error. + */ +int set_ecdh_privkey(struct crypto_kpp *tfm, const u8 private_key[32]) +{ + u8 *buf, *tmp = NULL; + unsigned int buf_len; + int err; + struct ecdh p = {0}; + + if (private_key) { + tmp = kmalloc(32, GFP_KERNEL); + if (!tmp) + return -ENOMEM; + swap_digits((u64 *)private_key, (u64 *)tmp, 4); + p.key = tmp; + p.key_size = 32; + } + + buf_len = crypto_ecdh_key_len(&p); + buf = kmalloc(buf_len, GFP_KERNEL); + if (!buf) { + err = -ENOMEM; + goto free_tmp; + } + + err = crypto_ecdh_encode_key(buf, buf_len, &p); + if (err) + goto free_all; + + err = crypto_kpp_set_secret(tfm, buf, buf_len); + /* fall through */ +free_all: + kfree_sensitive(buf); +free_tmp: + kfree_sensitive(tmp); + return err; +} + +/* generate_ecdh_public_key() - function assumes that the private key was + * already set. + * + * @tfm: KPP tfm handle allocated with crypto_alloc_kpp(). + * @public_key: memory where the computed ecc public key will be saved. + * + * Return: zero on success; error code in case of error. + */ +int generate_ecdh_public_key(struct crypto_kpp *tfm, u8 public_key[64]) +{ + struct kpp_request *req; + u8 *tmp; + struct ecdh_completion result; + struct scatterlist dst; + int err; + + tmp = kmalloc(64, GFP_KERNEL); + if (!tmp) + return -ENOMEM; + + req = kpp_request_alloc(tfm, GFP_KERNEL); + if (!req) { + err = -ENOMEM; + goto free_tmp; + } + + init_completion(&result.completion); + sg_init_one(&dst, tmp, 64); + kpp_request_set_input(req, NULL, 0); + kpp_request_set_output(req, &dst, 64); + kpp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, + ecdh_complete, &result); + + err = crypto_kpp_generate_public_key(req); + if (err == -EINPROGRESS) { + wait_for_completion(&result.completion); + err = result.err; + } + if (err < 0) + goto free_all; + + /* The public key is handed back in little endian as expected by + * the Security Manager Protocol. + */ + swap_digits((u64 *)tmp, (u64 *)public_key, 4); /* x */ + swap_digits((u64 *)&tmp[32], (u64 *)&public_key[32], 4); /* y */ + +free_all: + kpp_request_free(req); +free_tmp: + kfree(tmp); + return err; +} + +/* generate_ecdh_keys() - generate ecc key pair. + * + * @tfm: KPP tfm handle allocated with crypto_alloc_kpp(). + * @public_key: memory where the computed ecc public key will be saved. + * + * Return: zero on success; error code in case of error. + */ +int generate_ecdh_keys(struct crypto_kpp *tfm, u8 public_key[64]) +{ + int err; + + err = set_ecdh_privkey(tfm, NULL); + if (err) + return err; + + return generate_ecdh_public_key(tfm, public_key); +} diff --git a/net/bluetooth/ecdh_helper.h b/net/bluetooth/ecdh_helper.h new file mode 100644 index 000000000..830723971 --- /dev/null +++ b/net/bluetooth/ecdh_helper.h @@ -0,0 +1,30 @@ +/* + * ECDH helper functions - KPP wrappings + * + * Copyright (C) 2017 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation; + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + * IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY + * CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * + * ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + * COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + * SOFTWARE IS DISCLAIMED. + */ +#include <crypto/kpp.h> +#include <linux/types.h> + +int compute_ecdh_secret(struct crypto_kpp *tfm, const u8 pair_public_key[64], + u8 secret[32]); +int set_ecdh_privkey(struct crypto_kpp *tfm, const u8 private_key[32]); +int generate_ecdh_public_key(struct crypto_kpp *tfm, u8 public_key[64]); +int generate_ecdh_keys(struct crypto_kpp *tfm, u8 public_key[64]); diff --git a/net/bluetooth/eir.c b/net/bluetooth/eir.c new file mode 100644 index 000000000..8a85f6cdf --- /dev/null +++ b/net/bluetooth/eir.c @@ -0,0 +1,398 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * BlueZ - Bluetooth protocol stack for Linux + * + * Copyright (C) 2021 Intel Corporation + */ + +#include <net/bluetooth/bluetooth.h> +#include <net/bluetooth/hci_core.h> +#include <net/bluetooth/mgmt.h> + +#include "eir.h" + +#define PNP_INFO_SVCLASS_ID 0x1200 + +static u8 eir_append_name(u8 *eir, u16 eir_len, u8 type, u8 *data, u8 data_len) +{ + u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1]; + + /* If data is already NULL terminated just pass it directly */ + if (data[data_len - 1] == '\0') + return eir_append_data(eir, eir_len, type, data, data_len); + + memcpy(name, data, HCI_MAX_SHORT_NAME_LENGTH); + name[HCI_MAX_SHORT_NAME_LENGTH] = '\0'; + + return eir_append_data(eir, eir_len, type, name, sizeof(name)); +} + +u8 eir_append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len) +{ + size_t short_len; + size_t complete_len; + + /* no space left for name (+ NULL + type + len) */ + if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3) + return ad_len; + + /* use complete name if present and fits */ + complete_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name)); + if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH) + return eir_append_name(ptr, ad_len, EIR_NAME_COMPLETE, + hdev->dev_name, complete_len + 1); + + /* use short name if present */ + short_len = strnlen(hdev->short_name, sizeof(hdev->short_name)); + if (short_len) + return eir_append_name(ptr, ad_len, EIR_NAME_SHORT, + hdev->short_name, + short_len == HCI_MAX_SHORT_NAME_LENGTH ? + short_len : short_len + 1); + + /* use shortened full name if present, we already know that name + * is longer then HCI_MAX_SHORT_NAME_LENGTH + */ + if (complete_len) + return eir_append_name(ptr, ad_len, EIR_NAME_SHORT, + hdev->dev_name, + HCI_MAX_SHORT_NAME_LENGTH); + + return ad_len; +} + +u8 eir_append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len) +{ + return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance); +} + +u8 eir_append_service_data(u8 *eir, u16 eir_len, u16 uuid, u8 *data, + u8 data_len) +{ + eir[eir_len++] = sizeof(u8) + sizeof(uuid) + data_len; + eir[eir_len++] = EIR_SERVICE_DATA; + put_unaligned_le16(uuid, &eir[eir_len]); + eir_len += sizeof(uuid); + memcpy(&eir[eir_len], data, data_len); + eir_len += data_len; + + return eir_len; +} + +static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len) +{ + u8 *ptr = data, *uuids_start = NULL; + struct bt_uuid *uuid; + + if (len < 4) + return ptr; + + list_for_each_entry(uuid, &hdev->uuids, list) { + u16 uuid16; + + if (uuid->size != 16) + continue; + + uuid16 = get_unaligned_le16(&uuid->uuid[12]); + if (uuid16 < 0x1100) + continue; + + if (uuid16 == PNP_INFO_SVCLASS_ID) + continue; + + if (!uuids_start) { + uuids_start = ptr; + uuids_start[0] = 1; + uuids_start[1] = EIR_UUID16_ALL; + ptr += 2; + } + + /* Stop if not enough space to put next UUID */ + if ((ptr - data) + sizeof(u16) > len) { + uuids_start[1] = EIR_UUID16_SOME; + break; + } + + *ptr++ = (uuid16 & 0x00ff); + *ptr++ = (uuid16 & 0xff00) >> 8; + uuids_start[0] += sizeof(uuid16); + } + + return ptr; +} + +static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len) +{ + u8 *ptr = data, *uuids_start = NULL; + struct bt_uuid *uuid; + + if (len < 6) + return ptr; + + list_for_each_entry(uuid, &hdev->uuids, list) { + if (uuid->size != 32) + continue; + + if (!uuids_start) { + uuids_start = ptr; + uuids_start[0] = 1; + uuids_start[1] = EIR_UUID32_ALL; + ptr += 2; + } + + /* Stop if not enough space to put next UUID */ + if ((ptr - data) + sizeof(u32) > len) { + uuids_start[1] = EIR_UUID32_SOME; + break; + } + + memcpy(ptr, &uuid->uuid[12], sizeof(u32)); + ptr += sizeof(u32); + uuids_start[0] += sizeof(u32); + } + + return ptr; +} + +static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len) +{ + u8 *ptr = data, *uuids_start = NULL; + struct bt_uuid *uuid; + + if (len < 18) + return ptr; + + list_for_each_entry(uuid, &hdev->uuids, list) { + if (uuid->size != 128) + continue; + + if (!uuids_start) { + uuids_start = ptr; + uuids_start[0] = 1; + uuids_start[1] = EIR_UUID128_ALL; + ptr += 2; + } + + /* Stop if not enough space to put next UUID */ + if ((ptr - data) + 16 > len) { + uuids_start[1] = EIR_UUID128_SOME; + break; + } + + memcpy(ptr, uuid->uuid, 16); + ptr += 16; + uuids_start[0] += 16; + } + + return ptr; +} + +void eir_create(struct hci_dev *hdev, u8 *data) +{ + u8 *ptr = data; + size_t name_len; + + name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name)); + + if (name_len > 0) { + /* EIR Data type */ + if (name_len > 48) { + name_len = 48; + ptr[1] = EIR_NAME_SHORT; + } else { + ptr[1] = EIR_NAME_COMPLETE; + } + + /* EIR Data length */ + ptr[0] = name_len + 1; + + memcpy(ptr + 2, hdev->dev_name, name_len); + + ptr += (name_len + 2); + } + + if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) { + ptr[0] = 2; + ptr[1] = EIR_TX_POWER; + ptr[2] = (u8)hdev->inq_tx_power; + + ptr += 3; + } + + if (hdev->devid_source > 0) { + ptr[0] = 9; + ptr[1] = EIR_DEVICE_ID; + + put_unaligned_le16(hdev->devid_source, ptr + 2); + put_unaligned_le16(hdev->devid_vendor, ptr + 4); + put_unaligned_le16(hdev->devid_product, ptr + 6); + put_unaligned_le16(hdev->devid_version, ptr + 8); + + ptr += 10; + } + + ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data)); + ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data)); + ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data)); +} + +u8 eir_create_per_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr) +{ + struct adv_info *adv = NULL; + u8 ad_len = 0; + + /* Return 0 when the current instance identifier is invalid. */ + if (instance) { + adv = hci_find_adv_instance(hdev, instance); + if (!adv) + return 0; + } + + if (adv) { + memcpy(ptr, adv->per_adv_data, adv->per_adv_data_len); + ad_len += adv->per_adv_data_len; + ptr += adv->per_adv_data_len; + } + + return ad_len; +} + +u8 eir_create_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr) +{ + struct adv_info *adv = NULL; + u8 ad_len = 0, flags = 0; + u32 instance_flags; + + /* Return 0 when the current instance identifier is invalid. */ + if (instance) { + adv = hci_find_adv_instance(hdev, instance); + if (!adv) + return 0; + } + + instance_flags = hci_adv_instance_flags(hdev, instance); + + /* If instance already has the flags set skip adding it once + * again. + */ + if (adv && eir_get_data(adv->adv_data, adv->adv_data_len, EIR_FLAGS, + NULL)) + goto skip_flags; + + /* The Add Advertising command allows userspace to set both the general + * and limited discoverable flags. + */ + if (instance_flags & MGMT_ADV_FLAG_DISCOV) + flags |= LE_AD_GENERAL; + + if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV) + flags |= LE_AD_LIMITED; + + if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) + flags |= LE_AD_NO_BREDR; + + if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) { + /* If a discovery flag wasn't provided, simply use the global + * settings. + */ + if (!flags) + flags |= mgmt_get_adv_discov_flags(hdev); + + /* If flags would still be empty, then there is no need to + * include the "Flags" AD field". + */ + if (flags) { + ptr[0] = 0x02; + ptr[1] = EIR_FLAGS; + ptr[2] = flags; + + ad_len += 3; + ptr += 3; + } + } + +skip_flags: + if (adv) { + memcpy(ptr, adv->adv_data, adv->adv_data_len); + ad_len += adv->adv_data_len; + ptr += adv->adv_data_len; + } + + if (instance_flags & MGMT_ADV_FLAG_TX_POWER) { + s8 adv_tx_power; + + if (ext_adv_capable(hdev)) { + if (adv) + adv_tx_power = adv->tx_power; + else + adv_tx_power = hdev->adv_tx_power; + } else { + adv_tx_power = hdev->adv_tx_power; + } + + /* Provide Tx Power only if we can provide a valid value for it */ + if (adv_tx_power != HCI_TX_POWER_INVALID) { + ptr[0] = 0x02; + ptr[1] = EIR_TX_POWER; + ptr[2] = (u8)adv_tx_power; + + ad_len += 3; + ptr += 3; + } + } + + return ad_len; +} + +static u8 create_default_scan_rsp(struct hci_dev *hdev, u8 *ptr) +{ + u8 scan_rsp_len = 0; + + if (hdev->appearance) + scan_rsp_len = eir_append_appearance(hdev, ptr, scan_rsp_len); + + return eir_append_local_name(hdev, ptr, scan_rsp_len); +} + +u8 eir_create_scan_rsp(struct hci_dev *hdev, u8 instance, u8 *ptr) +{ + struct adv_info *adv; + u8 scan_rsp_len = 0; + + if (!instance) + return create_default_scan_rsp(hdev, ptr); + + adv = hci_find_adv_instance(hdev, instance); + if (!adv) + return 0; + + if ((adv->flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) + scan_rsp_len = eir_append_appearance(hdev, ptr, scan_rsp_len); + + memcpy(&ptr[scan_rsp_len], adv->scan_rsp_data, adv->scan_rsp_len); + + scan_rsp_len += adv->scan_rsp_len; + + if (adv->flags & MGMT_ADV_FLAG_LOCAL_NAME) + scan_rsp_len = eir_append_local_name(hdev, ptr, scan_rsp_len); + + return scan_rsp_len; +} + +void *eir_get_service_data(u8 *eir, size_t eir_len, u16 uuid, size_t *len) +{ + while ((eir = eir_get_data(eir, eir_len, EIR_SERVICE_DATA, len))) { + u16 value = get_unaligned_le16(eir); + + if (uuid == value) { + if (len) + *len -= 2; + return &eir[2]; + } + + eir += *len; + eir_len -= *len; + } + + return NULL; +} diff --git a/net/bluetooth/eir.h b/net/bluetooth/eir.h new file mode 100644 index 000000000..0df19f2f4 --- /dev/null +++ b/net/bluetooth/eir.h @@ -0,0 +1,99 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * BlueZ - Bluetooth protocol stack for Linux + * + * Copyright (C) 2021 Intel Corporation + */ + +#include <asm/unaligned.h> + +void eir_create(struct hci_dev *hdev, u8 *data); + +u8 eir_create_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr); +u8 eir_create_scan_rsp(struct hci_dev *hdev, u8 instance, u8 *ptr); +u8 eir_create_per_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr); + +u8 eir_append_local_name(struct hci_dev *hdev, u8 *eir, u8 ad_len); +u8 eir_append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len); +u8 eir_append_service_data(u8 *eir, u16 eir_len, u16 uuid, u8 *data, + u8 data_len); + +static inline u16 eir_precalc_len(u8 data_len) +{ + return sizeof(u8) * 2 + data_len; +} + +static inline u16 eir_append_data(u8 *eir, u16 eir_len, u8 type, + u8 *data, u8 data_len) +{ + eir[eir_len++] = sizeof(type) + data_len; + eir[eir_len++] = type; + memcpy(&eir[eir_len], data, data_len); + eir_len += data_len; + + return eir_len; +} + +static inline u16 eir_append_le16(u8 *eir, u16 eir_len, u8 type, u16 data) +{ + eir[eir_len++] = sizeof(type) + sizeof(data); + eir[eir_len++] = type; + put_unaligned_le16(data, &eir[eir_len]); + eir_len += sizeof(data); + + return eir_len; +} + +static inline u16 eir_skb_put_data(struct sk_buff *skb, u8 type, u8 *data, u8 data_len) +{ + u8 *eir; + u16 eir_len; + + eir_len = eir_precalc_len(data_len); + eir = skb_put(skb, eir_len); + WARN_ON(sizeof(type) + data_len > U8_MAX); + eir[0] = sizeof(type) + data_len; + eir[1] = type; + memcpy(&eir[2], data, data_len); + + return eir_len; +} + +static inline void *eir_get_data(u8 *eir, size_t eir_len, u8 type, + size_t *data_len) +{ + size_t parsed = 0; + + if (eir_len < 2) + return NULL; + + while (parsed < eir_len - 1) { + u8 field_len = eir[0]; + + if (field_len == 0) + break; + + parsed += field_len + 1; + + if (parsed > eir_len) + break; + + if (eir[1] != type) { + eir += field_len + 1; + continue; + } + + /* Zero length data */ + if (field_len == 1) + return NULL; + + if (data_len) + *data_len = field_len - 1; + + return &eir[2]; + } + + return NULL; +} + +void *eir_get_service_data(u8 *eir, size_t eir_len, u16 uuid, size_t *len); diff --git a/net/bluetooth/hci_codec.c b/net/bluetooth/hci_codec.c new file mode 100644 index 000000000..3cc135bb1 --- /dev/null +++ b/net/bluetooth/hci_codec.c @@ -0,0 +1,253 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* Copyright (C) 2021 Intel Corporation */ + +#include <net/bluetooth/bluetooth.h> +#include <net/bluetooth/hci_core.h> +#include "hci_codec.h" + +static int hci_codec_list_add(struct list_head *list, + struct hci_op_read_local_codec_caps *sent, + struct hci_rp_read_local_codec_caps *rp, + void *caps, + __u32 len) +{ + struct codec_list *entry; + + entry = kzalloc(sizeof(*entry) + len, GFP_KERNEL); + if (!entry) + return -ENOMEM; + + entry->id = sent->id; + if (sent->id == 0xFF) { + entry->cid = __le16_to_cpu(sent->cid); + entry->vid = __le16_to_cpu(sent->vid); + } + entry->transport = sent->transport; + entry->len = len; + entry->num_caps = 0; + if (rp) { + entry->num_caps = rp->num_caps; + memcpy(entry->caps, caps, len); + } + list_add(&entry->list, list); + + return 0; +} + +void hci_codec_list_clear(struct list_head *codec_list) +{ + struct codec_list *c, *n; + + list_for_each_entry_safe(c, n, codec_list, list) { + list_del(&c->list); + kfree(c); + } +} + +static void hci_read_codec_capabilities(struct hci_dev *hdev, __u8 transport, + struct hci_op_read_local_codec_caps + *cmd) +{ + __u8 i; + + for (i = 0; i < TRANSPORT_TYPE_MAX; i++) { + if (transport & BIT(i)) { + struct hci_rp_read_local_codec_caps *rp; + struct hci_codec_caps *caps; + struct sk_buff *skb; + __u8 j; + __u32 len; + + cmd->transport = i; + + /* If Read_Codec_Capabilities command is not supported + * then just add codec to the list without caps + */ + if (!(hdev->commands[45] & 0x08)) { + hci_dev_lock(hdev); + hci_codec_list_add(&hdev->local_codecs, cmd, + NULL, NULL, 0); + hci_dev_unlock(hdev); + continue; + } + + skb = __hci_cmd_sync_sk(hdev, HCI_OP_READ_LOCAL_CODEC_CAPS, + sizeof(*cmd), cmd, 0, HCI_CMD_TIMEOUT, NULL); + if (IS_ERR(skb)) { + bt_dev_err(hdev, "Failed to read codec capabilities (%ld)", + PTR_ERR(skb)); + continue; + } + + if (skb->len < sizeof(*rp)) + goto error; + + rp = (void *)skb->data; + + if (rp->status) + goto error; + + if (!rp->num_caps) { + len = 0; + /* this codec doesn't have capabilities */ + goto skip_caps_parse; + } + + skb_pull(skb, sizeof(*rp)); + + for (j = 0, len = 0; j < rp->num_caps; j++) { + caps = (void *)skb->data; + if (skb->len < sizeof(*caps)) + goto error; + if (skb->len < caps->len) + goto error; + len += sizeof(caps->len) + caps->len; + skb_pull(skb, sizeof(caps->len) + caps->len); + } + +skip_caps_parse: + hci_dev_lock(hdev); + hci_codec_list_add(&hdev->local_codecs, cmd, rp, + (__u8 *)rp + sizeof(*rp), len); + hci_dev_unlock(hdev); +error: + kfree_skb(skb); + } + } +} + +void hci_read_supported_codecs(struct hci_dev *hdev) +{ + struct sk_buff *skb; + struct hci_rp_read_local_supported_codecs *rp; + struct hci_std_codecs *std_codecs; + struct hci_vnd_codecs *vnd_codecs; + struct hci_op_read_local_codec_caps caps; + __u8 i; + + skb = __hci_cmd_sync_sk(hdev, HCI_OP_READ_LOCAL_CODECS, 0, NULL, + 0, HCI_CMD_TIMEOUT, NULL); + + if (IS_ERR(skb)) { + bt_dev_err(hdev, "Failed to read local supported codecs (%ld)", + PTR_ERR(skb)); + return; + } + + if (skb->len < sizeof(*rp)) + goto error; + + rp = (void *)skb->data; + + if (rp->status) + goto error; + + skb_pull(skb, sizeof(rp->status)); + + std_codecs = (void *)skb->data; + + /* validate codecs length before accessing */ + if (skb->len < flex_array_size(std_codecs, codec, std_codecs->num) + + sizeof(std_codecs->num)) + goto error; + + /* enumerate codec capabilities of standard codecs */ + memset(&caps, 0, sizeof(caps)); + for (i = 0; i < std_codecs->num; i++) { + caps.id = std_codecs->codec[i]; + caps.direction = 0x00; + hci_read_codec_capabilities(hdev, + LOCAL_CODEC_ACL_MASK | LOCAL_CODEC_SCO_MASK, &caps); + } + + skb_pull(skb, flex_array_size(std_codecs, codec, std_codecs->num) + + sizeof(std_codecs->num)); + + vnd_codecs = (void *)skb->data; + + /* validate vendor codecs length before accessing */ + if (skb->len < + flex_array_size(vnd_codecs, codec, vnd_codecs->num) + + sizeof(vnd_codecs->num)) + goto error; + + /* enumerate vendor codec capabilities */ + for (i = 0; i < vnd_codecs->num; i++) { + caps.id = 0xFF; + caps.cid = vnd_codecs->codec[i].cid; + caps.vid = vnd_codecs->codec[i].vid; + caps.direction = 0x00; + hci_read_codec_capabilities(hdev, + LOCAL_CODEC_ACL_MASK | LOCAL_CODEC_SCO_MASK, &caps); + } + +error: + kfree_skb(skb); +} + +void hci_read_supported_codecs_v2(struct hci_dev *hdev) +{ + struct sk_buff *skb; + struct hci_rp_read_local_supported_codecs_v2 *rp; + struct hci_std_codecs_v2 *std_codecs; + struct hci_vnd_codecs_v2 *vnd_codecs; + struct hci_op_read_local_codec_caps caps; + __u8 i; + + skb = __hci_cmd_sync_sk(hdev, HCI_OP_READ_LOCAL_CODECS_V2, 0, NULL, + 0, HCI_CMD_TIMEOUT, NULL); + + if (IS_ERR(skb)) { + bt_dev_err(hdev, "Failed to read local supported codecs (%ld)", + PTR_ERR(skb)); + return; + } + + if (skb->len < sizeof(*rp)) + goto error; + + rp = (void *)skb->data; + + if (rp->status) + goto error; + + skb_pull(skb, sizeof(rp->status)); + + std_codecs = (void *)skb->data; + + /* check for payload data length before accessing */ + if (skb->len < flex_array_size(std_codecs, codec, std_codecs->num) + + sizeof(std_codecs->num)) + goto error; + + memset(&caps, 0, sizeof(caps)); + + for (i = 0; i < std_codecs->num; i++) { + caps.id = std_codecs->codec[i].id; + hci_read_codec_capabilities(hdev, std_codecs->codec[i].transport, + &caps); + } + + skb_pull(skb, flex_array_size(std_codecs, codec, std_codecs->num) + + sizeof(std_codecs->num)); + + vnd_codecs = (void *)skb->data; + + /* check for payload data length before accessing */ + if (skb->len < + flex_array_size(vnd_codecs, codec, vnd_codecs->num) + + sizeof(vnd_codecs->num)) + goto error; + + for (i = 0; i < vnd_codecs->num; i++) { + caps.id = 0xFF; + caps.cid = vnd_codecs->codec[i].cid; + caps.vid = vnd_codecs->codec[i].vid; + hci_read_codec_capabilities(hdev, vnd_codecs->codec[i].transport, + &caps); + } + +error: + kfree_skb(skb); +} diff --git a/net/bluetooth/hci_codec.h b/net/bluetooth/hci_codec.h new file mode 100644 index 000000000..a2751930f --- /dev/null +++ b/net/bluetooth/hci_codec.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* Copyright (C) 2014 Intel Corporation */ + +void hci_read_supported_codecs(struct hci_dev *hdev); +void hci_read_supported_codecs_v2(struct hci_dev *hdev); +void hci_codec_list_clear(struct list_head *codec_list); diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c new file mode 100644 index 000000000..12d368753 --- /dev/null +++ b/net/bluetooth/hci_conn.c @@ -0,0 +1,2917 @@ +/* + BlueZ - Bluetooth protocol stack for Linux + Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved. + + Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License version 2 as + published by the Free Software Foundation; + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + SOFTWARE IS DISCLAIMED. +*/ + +/* Bluetooth HCI connection handling. */ + +#include <linux/export.h> +#include <linux/debugfs.h> + +#include <net/bluetooth/bluetooth.h> +#include <net/bluetooth/hci_core.h> +#include <net/bluetooth/l2cap.h> +#include <net/bluetooth/iso.h> +#include <net/bluetooth/mgmt.h> + +#include "hci_request.h" +#include "smp.h" +#include "a2mp.h" +#include "eir.h" + +struct sco_param { + u16 pkt_type; + u16 max_latency; + u8 retrans_effort; +}; + +struct conn_handle_t { + struct hci_conn *conn; + __u16 handle; +}; + +static const struct sco_param esco_param_cvsd[] = { + { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000a, 0x01 }, /* S3 */ + { EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007, 0x01 }, /* S2 */ + { EDR_ESCO_MASK | ESCO_EV3, 0x0007, 0x01 }, /* S1 */ + { EDR_ESCO_MASK | ESCO_HV3, 0xffff, 0x01 }, /* D1 */ + { EDR_ESCO_MASK | ESCO_HV1, 0xffff, 0x01 }, /* D0 */ +}; + +static const struct sco_param sco_param_cvsd[] = { + { EDR_ESCO_MASK | ESCO_HV3, 0xffff, 0xff }, /* D1 */ + { EDR_ESCO_MASK | ESCO_HV1, 0xffff, 0xff }, /* D0 */ +}; + +static const struct sco_param esco_param_msbc[] = { + { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000d, 0x02 }, /* T2 */ + { EDR_ESCO_MASK | ESCO_EV3, 0x0008, 0x02 }, /* T1 */ +}; + +/* This function requires the caller holds hdev->lock */ +static void hci_connect_le_scan_cleanup(struct hci_conn *conn, u8 status) +{ + struct hci_conn_params *params; + struct hci_dev *hdev = conn->hdev; + struct smp_irk *irk; + bdaddr_t *bdaddr; + u8 bdaddr_type; + + bdaddr = &conn->dst; + bdaddr_type = conn->dst_type; + + /* Check if we need to convert to identity address */ + irk = hci_get_irk(hdev, bdaddr, bdaddr_type); + if (irk) { + bdaddr = &irk->bdaddr; + bdaddr_type = irk->addr_type; + } + + params = hci_pend_le_action_lookup(&hdev->pend_le_conns, bdaddr, + bdaddr_type); + if (!params) + return; + + if (params->conn) { + hci_conn_drop(params->conn); + hci_conn_put(params->conn); + params->conn = NULL; + } + + if (!params->explicit_connect) + return; + + /* If the status indicates successful cancellation of + * the attempt (i.e. Unknown Connection Id) there's no point of + * notifying failure since we'll go back to keep trying to + * connect. The only exception is explicit connect requests + * where a timeout + cancel does indicate an actual failure. + */ + if (status && status != HCI_ERROR_UNKNOWN_CONN_ID) + mgmt_connect_failed(hdev, &conn->dst, conn->type, + conn->dst_type, status); + + /* The connection attempt was doing scan for new RPA, and is + * in scan phase. If params are not associated with any other + * autoconnect action, remove them completely. If they are, just unmark + * them as waiting for connection, by clearing explicit_connect field. + */ + params->explicit_connect = false; + + hci_pend_le_list_del_init(params); + + switch (params->auto_connect) { + case HCI_AUTO_CONN_EXPLICIT: + hci_conn_params_del(hdev, bdaddr, bdaddr_type); + /* return instead of break to avoid duplicate scan update */ + return; + case HCI_AUTO_CONN_DIRECT: + case HCI_AUTO_CONN_ALWAYS: + hci_pend_le_list_add(params, &hdev->pend_le_conns); + break; + case HCI_AUTO_CONN_REPORT: + hci_pend_le_list_add(params, &hdev->pend_le_reports); + break; + default: + break; + } + + hci_update_passive_scan(hdev); +} + +static void hci_conn_cleanup(struct hci_conn *conn) +{ + struct hci_dev *hdev = conn->hdev; + + if (test_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags)) + hci_conn_params_del(conn->hdev, &conn->dst, conn->dst_type); + + if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags)) + hci_remove_link_key(hdev, &conn->dst); + + hci_chan_list_flush(conn); + + hci_conn_hash_del(hdev, conn); + + if (conn->cleanup) + conn->cleanup(conn); + + if (conn->type == SCO_LINK || conn->type == ESCO_LINK) { + switch (conn->setting & SCO_AIRMODE_MASK) { + case SCO_AIRMODE_CVSD: + case SCO_AIRMODE_TRANSP: + if (hdev->notify) + hdev->notify(hdev, HCI_NOTIFY_DISABLE_SCO); + break; + } + } else { + if (hdev->notify) + hdev->notify(hdev, HCI_NOTIFY_CONN_DEL); + } + + debugfs_remove_recursive(conn->debugfs); + + hci_conn_del_sysfs(conn); + + hci_dev_put(hdev); +} + +static void le_scan_cleanup(struct work_struct *work) +{ + struct hci_conn *conn = container_of(work, struct hci_conn, + le_scan_cleanup); + struct hci_dev *hdev = conn->hdev; + struct hci_conn *c = NULL; + + BT_DBG("%s hcon %p", hdev->name, conn); + + hci_dev_lock(hdev); + + /* Check that the hci_conn is still around */ + rcu_read_lock(); + list_for_each_entry_rcu(c, &hdev->conn_hash.list, list) { + if (c == conn) + break; + } + rcu_read_unlock(); + + if (c == conn) { + hci_connect_le_scan_cleanup(conn, 0x00); + hci_conn_cleanup(conn); + } + + hci_dev_unlock(hdev); + hci_dev_put(hdev); + hci_conn_put(conn); +} + +static void hci_connect_le_scan_remove(struct hci_conn *conn) +{ + BT_DBG("%s hcon %p", conn->hdev->name, conn); + + /* We can't call hci_conn_del/hci_conn_cleanup here since that + * could deadlock with another hci_conn_del() call that's holding + * hci_dev_lock and doing cancel_delayed_work_sync(&conn->disc_work). + * Instead, grab temporary extra references to the hci_dev and + * hci_conn and perform the necessary cleanup in a separate work + * callback. + */ + + hci_dev_hold(conn->hdev); + hci_conn_get(conn); + + /* Even though we hold a reference to the hdev, many other + * things might get cleaned up meanwhile, including the hdev's + * own workqueue, so we can't use that for scheduling. + */ + schedule_work(&conn->le_scan_cleanup); +} + +static void hci_acl_create_connection(struct hci_conn *conn) +{ + struct hci_dev *hdev = conn->hdev; + struct inquiry_entry *ie; + struct hci_cp_create_conn cp; + + BT_DBG("hcon %p", conn); + + /* Many controllers disallow HCI Create Connection while it is doing + * HCI Inquiry. So we cancel the Inquiry first before issuing HCI Create + * Connection. This may cause the MGMT discovering state to become false + * without user space's request but it is okay since the MGMT Discovery + * APIs do not promise that discovery should be done forever. Instead, + * the user space monitors the status of MGMT discovering and it may + * request for discovery again when this flag becomes false. + */ + if (test_bit(HCI_INQUIRY, &hdev->flags)) { + /* Put this connection to "pending" state so that it will be + * executed after the inquiry cancel command complete event. + */ + conn->state = BT_CONNECT2; + hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL); + return; + } + + conn->state = BT_CONNECT; + conn->out = true; + conn->role = HCI_ROLE_MASTER; + + conn->attempt++; + + conn->link_policy = hdev->link_policy; + + memset(&cp, 0, sizeof(cp)); + bacpy(&cp.bdaddr, &conn->dst); + cp.pscan_rep_mode = 0x02; + + ie = hci_inquiry_cache_lookup(hdev, &conn->dst); + if (ie) { + if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) { + cp.pscan_rep_mode = ie->data.pscan_rep_mode; + cp.pscan_mode = ie->data.pscan_mode; + cp.clock_offset = ie->data.clock_offset | + cpu_to_le16(0x8000); + } + + memcpy(conn->dev_class, ie->data.dev_class, 3); + } + + cp.pkt_type = cpu_to_le16(conn->pkt_type); + if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER)) + cp.role_switch = 0x01; + else + cp.role_switch = 0x00; + + hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp); +} + +int hci_disconnect(struct hci_conn *conn, __u8 reason) +{ + BT_DBG("hcon %p", conn); + + /* When we are central of an established connection and it enters + * the disconnect timeout, then go ahead and try to read the + * current clock offset. Processing of the result is done + * within the event handling and hci_clock_offset_evt function. + */ + if (conn->type == ACL_LINK && conn->role == HCI_ROLE_MASTER && + (conn->state == BT_CONNECTED || conn->state == BT_CONFIG)) { + struct hci_dev *hdev = conn->hdev; + struct hci_cp_read_clock_offset clkoff_cp; + + clkoff_cp.handle = cpu_to_le16(conn->handle); + hci_send_cmd(hdev, HCI_OP_READ_CLOCK_OFFSET, sizeof(clkoff_cp), + &clkoff_cp); + } + + return hci_abort_conn(conn, reason); +} + +static void hci_add_sco(struct hci_conn *conn, __u16 handle) +{ + struct hci_dev *hdev = conn->hdev; + struct hci_cp_add_sco cp; + + BT_DBG("hcon %p", conn); + + conn->state = BT_CONNECT; + conn->out = true; + + conn->attempt++; + + cp.handle = cpu_to_le16(handle); + cp.pkt_type = cpu_to_le16(conn->pkt_type); + + hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp); +} + +static bool find_next_esco_param(struct hci_conn *conn, + const struct sco_param *esco_param, int size) +{ + for (; conn->attempt <= size; conn->attempt++) { + if (lmp_esco_2m_capable(conn->link) || + (esco_param[conn->attempt - 1].pkt_type & ESCO_2EV3)) + break; + BT_DBG("hcon %p skipped attempt %d, eSCO 2M not supported", + conn, conn->attempt); + } + + return conn->attempt <= size; +} + +static int configure_datapath_sync(struct hci_dev *hdev, struct bt_codec *codec) +{ + int err; + __u8 vnd_len, *vnd_data = NULL; + struct hci_op_configure_data_path *cmd = NULL; + + err = hdev->get_codec_config_data(hdev, ESCO_LINK, codec, &vnd_len, + &vnd_data); + if (err < 0) + goto error; + + cmd = kzalloc(sizeof(*cmd) + vnd_len, GFP_KERNEL); + if (!cmd) { + err = -ENOMEM; + goto error; + } + + err = hdev->get_data_path_id(hdev, &cmd->data_path_id); + if (err < 0) + goto error; + + cmd->vnd_len = vnd_len; + memcpy(cmd->vnd_data, vnd_data, vnd_len); + + cmd->direction = 0x00; + __hci_cmd_sync_status(hdev, HCI_CONFIGURE_DATA_PATH, + sizeof(*cmd) + vnd_len, cmd, HCI_CMD_TIMEOUT); + + cmd->direction = 0x01; + err = __hci_cmd_sync_status(hdev, HCI_CONFIGURE_DATA_PATH, + sizeof(*cmd) + vnd_len, cmd, + HCI_CMD_TIMEOUT); +error: + + kfree(cmd); + kfree(vnd_data); + return err; +} + +static int hci_enhanced_setup_sync(struct hci_dev *hdev, void *data) +{ + struct conn_handle_t *conn_handle = data; + struct hci_conn *conn = conn_handle->conn; + __u16 handle = conn_handle->handle; + struct hci_cp_enhanced_setup_sync_conn cp; + const struct sco_param *param; + + kfree(conn_handle); + + bt_dev_dbg(hdev, "hcon %p", conn); + + /* for offload use case, codec needs to configured before opening SCO */ + if (conn->codec.data_path) + configure_datapath_sync(hdev, &conn->codec); + + conn->state = BT_CONNECT; + conn->out = true; + + conn->attempt++; + + memset(&cp, 0x00, sizeof(cp)); + + cp.handle = cpu_to_le16(handle); + + cp.tx_bandwidth = cpu_to_le32(0x00001f40); + cp.rx_bandwidth = cpu_to_le32(0x00001f40); + + switch (conn->codec.id) { + case BT_CODEC_MSBC: + if (!find_next_esco_param(conn, esco_param_msbc, + ARRAY_SIZE(esco_param_msbc))) + return -EINVAL; + + param = &esco_param_msbc[conn->attempt - 1]; + cp.tx_coding_format.id = 0x05; + cp.rx_coding_format.id = 0x05; + cp.tx_codec_frame_size = __cpu_to_le16(60); + cp.rx_codec_frame_size = __cpu_to_le16(60); + cp.in_bandwidth = __cpu_to_le32(32000); + cp.out_bandwidth = __cpu_to_le32(32000); + cp.in_coding_format.id = 0x04; + cp.out_coding_format.id = 0x04; + cp.in_coded_data_size = __cpu_to_le16(16); + cp.out_coded_data_size = __cpu_to_le16(16); + cp.in_pcm_data_format = 2; + cp.out_pcm_data_format = 2; + cp.in_pcm_sample_payload_msb_pos = 0; + cp.out_pcm_sample_payload_msb_pos = 0; + cp.in_data_path = conn->codec.data_path; + cp.out_data_path = conn->codec.data_path; + cp.in_transport_unit_size = 1; + cp.out_transport_unit_size = 1; + break; + + case BT_CODEC_TRANSPARENT: + if (!find_next_esco_param(conn, esco_param_msbc, + ARRAY_SIZE(esco_param_msbc))) + return false; + param = &esco_param_msbc[conn->attempt - 1]; + cp.tx_coding_format.id = 0x03; + cp.rx_coding_format.id = 0x03; + cp.tx_codec_frame_size = __cpu_to_le16(60); + cp.rx_codec_frame_size = __cpu_to_le16(60); + cp.in_bandwidth = __cpu_to_le32(0x1f40); + cp.out_bandwidth = __cpu_to_le32(0x1f40); + cp.in_coding_format.id = 0x03; + cp.out_coding_format.id = 0x03; + cp.in_coded_data_size = __cpu_to_le16(16); + cp.out_coded_data_size = __cpu_to_le16(16); + cp.in_pcm_data_format = 2; + cp.out_pcm_data_format = 2; + cp.in_pcm_sample_payload_msb_pos = 0; + cp.out_pcm_sample_payload_msb_pos = 0; + cp.in_data_path = conn->codec.data_path; + cp.out_data_path = conn->codec.data_path; + cp.in_transport_unit_size = 1; + cp.out_transport_unit_size = 1; + break; + + case BT_CODEC_CVSD: + if (lmp_esco_capable(conn->link)) { + if (!find_next_esco_param(conn, esco_param_cvsd, + ARRAY_SIZE(esco_param_cvsd))) + return -EINVAL; + param = &esco_param_cvsd[conn->attempt - 1]; + } else { + if (conn->attempt > ARRAY_SIZE(sco_param_cvsd)) + return -EINVAL; + param = &sco_param_cvsd[conn->attempt - 1]; + } + cp.tx_coding_format.id = 2; + cp.rx_coding_format.id = 2; + cp.tx_codec_frame_size = __cpu_to_le16(60); + cp.rx_codec_frame_size = __cpu_to_le16(60); + cp.in_bandwidth = __cpu_to_le32(16000); + cp.out_bandwidth = __cpu_to_le32(16000); + cp.in_coding_format.id = 4; + cp.out_coding_format.id = 4; + cp.in_coded_data_size = __cpu_to_le16(16); + cp.out_coded_data_size = __cpu_to_le16(16); + cp.in_pcm_data_format = 2; + cp.out_pcm_data_format = 2; + cp.in_pcm_sample_payload_msb_pos = 0; + cp.out_pcm_sample_payload_msb_pos = 0; + cp.in_data_path = conn->codec.data_path; + cp.out_data_path = conn->codec.data_path; + cp.in_transport_unit_size = 16; + cp.out_transport_unit_size = 16; + break; + default: + return -EINVAL; + } + + cp.retrans_effort = param->retrans_effort; + cp.pkt_type = __cpu_to_le16(param->pkt_type); + cp.max_latency = __cpu_to_le16(param->max_latency); + + if (hci_send_cmd(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0) + return -EIO; + + return 0; +} + +static bool hci_setup_sync_conn(struct hci_conn *conn, __u16 handle) +{ + struct hci_dev *hdev = conn->hdev; + struct hci_cp_setup_sync_conn cp; + const struct sco_param *param; + + bt_dev_dbg(hdev, "hcon %p", conn); + + conn->state = BT_CONNECT; + conn->out = true; + + conn->attempt++; + + cp.handle = cpu_to_le16(handle); + + cp.tx_bandwidth = cpu_to_le32(0x00001f40); + cp.rx_bandwidth = cpu_to_le32(0x00001f40); + cp.voice_setting = cpu_to_le16(conn->setting); + + switch (conn->setting & SCO_AIRMODE_MASK) { + case SCO_AIRMODE_TRANSP: + if (!find_next_esco_param(conn, esco_param_msbc, + ARRAY_SIZE(esco_param_msbc))) + return false; + param = &esco_param_msbc[conn->attempt - 1]; + break; + case SCO_AIRMODE_CVSD: + if (lmp_esco_capable(conn->link)) { + if (!find_next_esco_param(conn, esco_param_cvsd, + ARRAY_SIZE(esco_param_cvsd))) + return false; + param = &esco_param_cvsd[conn->attempt - 1]; + } else { + if (conn->attempt > ARRAY_SIZE(sco_param_cvsd)) + return false; + param = &sco_param_cvsd[conn->attempt - 1]; + } + break; + default: + return false; + } + + cp.retrans_effort = param->retrans_effort; + cp.pkt_type = __cpu_to_le16(param->pkt_type); + cp.max_latency = __cpu_to_le16(param->max_latency); + + if (hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0) + return false; + + return true; +} + +bool hci_setup_sync(struct hci_conn *conn, __u16 handle) +{ + int result; + struct conn_handle_t *conn_handle; + + if (enhanced_sync_conn_capable(conn->hdev)) { + conn_handle = kzalloc(sizeof(*conn_handle), GFP_KERNEL); + + if (!conn_handle) + return false; + + conn_handle->conn = conn; + conn_handle->handle = handle; + result = hci_cmd_sync_queue(conn->hdev, hci_enhanced_setup_sync, + conn_handle, NULL); + if (result < 0) + kfree(conn_handle); + + return result == 0; + } + + return hci_setup_sync_conn(conn, handle); +} + +u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency, + u16 to_multiplier) +{ + struct hci_dev *hdev = conn->hdev; + struct hci_conn_params *params; + struct hci_cp_le_conn_update cp; + + hci_dev_lock(hdev); + + params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type); + if (params) { + params->conn_min_interval = min; + params->conn_max_interval = max; + params->conn_latency = latency; + params->supervision_timeout = to_multiplier; + } + + hci_dev_unlock(hdev); + + memset(&cp, 0, sizeof(cp)); + cp.handle = cpu_to_le16(conn->handle); + cp.conn_interval_min = cpu_to_le16(min); + cp.conn_interval_max = cpu_to_le16(max); + cp.conn_latency = cpu_to_le16(latency); + cp.supervision_timeout = cpu_to_le16(to_multiplier); + cp.min_ce_len = cpu_to_le16(0x0000); + cp.max_ce_len = cpu_to_le16(0x0000); + + hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp); + + if (params) + return 0x01; + + return 0x00; +} + +void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand, + __u8 ltk[16], __u8 key_size) +{ + struct hci_dev *hdev = conn->hdev; + struct hci_cp_le_start_enc cp; + + BT_DBG("hcon %p", conn); + + memset(&cp, 0, sizeof(cp)); + + cp.handle = cpu_to_le16(conn->handle); + cp.rand = rand; + cp.ediv = ediv; + memcpy(cp.ltk, ltk, key_size); + + hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp); +} + +/* Device _must_ be locked */ +void hci_sco_setup(struct hci_conn *conn, __u8 status) +{ + struct hci_conn *sco = conn->link; + + if (!sco) + return; + + BT_DBG("hcon %p", conn); + + if (!status) { + if (lmp_esco_capable(conn->hdev)) + hci_setup_sync(sco, conn->handle); + else + hci_add_sco(sco, conn->handle); + } else { + hci_connect_cfm(sco, status); + hci_conn_del(sco); + } +} + +static void hci_conn_timeout(struct work_struct *work) +{ + struct hci_conn *conn = container_of(work, struct hci_conn, + disc_work.work); + int refcnt = atomic_read(&conn->refcnt); + + BT_DBG("hcon %p state %s", conn, state_to_string(conn->state)); + + WARN_ON(refcnt < 0); + + /* FIXME: It was observed that in pairing failed scenario, refcnt + * drops below 0. Probably this is because l2cap_conn_del calls + * l2cap_chan_del for each channel, and inside l2cap_chan_del conn is + * dropped. After that loop hci_chan_del is called which also drops + * conn. For now make sure that ACL is alive if refcnt is higher then 0, + * otherwise drop it. + */ + if (refcnt > 0) + return; + + /* LE connections in scanning state need special handling */ + if (conn->state == BT_CONNECT && conn->type == LE_LINK && + test_bit(HCI_CONN_SCANNING, &conn->flags)) { + hci_connect_le_scan_remove(conn); + return; + } + + hci_abort_conn(conn, hci_proto_disconn_ind(conn)); +} + +/* Enter sniff mode */ +static void hci_conn_idle(struct work_struct *work) +{ + struct hci_conn *conn = container_of(work, struct hci_conn, + idle_work.work); + struct hci_dev *hdev = conn->hdev; + + BT_DBG("hcon %p mode %d", conn, conn->mode); + + if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn)) + return; + + if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF)) + return; + + if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) { + struct hci_cp_sniff_subrate cp; + cp.handle = cpu_to_le16(conn->handle); + cp.max_latency = cpu_to_le16(0); + cp.min_remote_timeout = cpu_to_le16(0); + cp.min_local_timeout = cpu_to_le16(0); + hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp); + } + + if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) { + struct hci_cp_sniff_mode cp; + cp.handle = cpu_to_le16(conn->handle); + cp.max_interval = cpu_to_le16(hdev->sniff_max_interval); + cp.min_interval = cpu_to_le16(hdev->sniff_min_interval); + cp.attempt = cpu_to_le16(4); + cp.timeout = cpu_to_le16(1); + hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp); + } +} + +static void hci_conn_auto_accept(struct work_struct *work) +{ + struct hci_conn *conn = container_of(work, struct hci_conn, + auto_accept_work.work); + + hci_send_cmd(conn->hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst), + &conn->dst); +} + +static void le_disable_advertising(struct hci_dev *hdev) +{ + if (ext_adv_capable(hdev)) { + struct hci_cp_le_set_ext_adv_enable cp; + + cp.enable = 0x00; + cp.num_of_sets = 0x00; + + hci_send_cmd(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(cp), + &cp); + } else { + u8 enable = 0x00; + hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), + &enable); + } +} + +static void le_conn_timeout(struct work_struct *work) +{ + struct hci_conn *conn = container_of(work, struct hci_conn, + le_conn_timeout.work); + struct hci_dev *hdev = conn->hdev; + + BT_DBG(""); + + /* We could end up here due to having done directed advertising, + * so clean up the state if necessary. This should however only + * happen with broken hardware or if low duty cycle was used + * (which doesn't have a timeout of its own). + */ + if (conn->role == HCI_ROLE_SLAVE) { + /* Disable LE Advertising */ + le_disable_advertising(hdev); + hci_dev_lock(hdev); + hci_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT); + hci_dev_unlock(hdev); + return; + } + + hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM); +} + +struct iso_cig_params { + struct hci_cp_le_set_cig_params cp; + struct hci_cis_params cis[0x1f]; +}; + +struct iso_list_data { + union { + u8 cig; + u8 big; + }; + union { + u8 cis; + u8 bis; + u16 sync_handle; + }; + int count; + struct iso_cig_params pdu; +}; + +static void bis_list(struct hci_conn *conn, void *data) +{ + struct iso_list_data *d = data; + + /* Skip if not broadcast/ANY address */ + if (bacmp(&conn->dst, BDADDR_ANY)) + return; + + if (d->big != conn->iso_qos.big || d->bis == BT_ISO_QOS_BIS_UNSET || + d->bis != conn->iso_qos.bis) + return; + + d->count++; +} + +static void find_bis(struct hci_conn *conn, void *data) +{ + struct iso_list_data *d = data; + + /* Ignore unicast */ + if (bacmp(&conn->dst, BDADDR_ANY)) + return; + + d->count++; +} + +static int terminate_big_sync(struct hci_dev *hdev, void *data) +{ + struct iso_list_data *d = data; + + bt_dev_dbg(hdev, "big 0x%2.2x bis 0x%2.2x", d->big, d->bis); + + hci_remove_ext_adv_instance_sync(hdev, d->bis, NULL); + + /* Check if ISO connection is a BIS and terminate BIG if there are + * no other connections using it. + */ + hci_conn_hash_list_state(hdev, find_bis, ISO_LINK, BT_CONNECTED, d); + if (d->count) + return 0; + + return hci_le_terminate_big_sync(hdev, d->big, + HCI_ERROR_LOCAL_HOST_TERM); +} + +static void terminate_big_destroy(struct hci_dev *hdev, void *data, int err) +{ + kfree(data); +} + +static int hci_le_terminate_big(struct hci_dev *hdev, u8 big, u8 bis) +{ + struct iso_list_data *d; + int ret; + + bt_dev_dbg(hdev, "big 0x%2.2x bis 0x%2.2x", big, bis); + + d = kmalloc(sizeof(*d), GFP_KERNEL); + if (!d) + return -ENOMEM; + + memset(d, 0, sizeof(*d)); + d->big = big; + d->bis = bis; + + ret = hci_cmd_sync_queue(hdev, terminate_big_sync, d, + terminate_big_destroy); + if (ret) + kfree(d); + + return ret; +} + +static int big_terminate_sync(struct hci_dev *hdev, void *data) +{ + struct iso_list_data *d = data; + + bt_dev_dbg(hdev, "big 0x%2.2x sync_handle 0x%4.4x", d->big, + d->sync_handle); + + /* Check if ISO connection is a BIS and terminate BIG if there are + * no other connections using it. + */ + hci_conn_hash_list_state(hdev, find_bis, ISO_LINK, BT_CONNECTED, d); + if (d->count) + return 0; + + hci_le_big_terminate_sync(hdev, d->big); + + return hci_le_pa_terminate_sync(hdev, d->sync_handle); +} + +static int hci_le_big_terminate(struct hci_dev *hdev, u8 big, u16 sync_handle) +{ + struct iso_list_data *d; + int ret; + + bt_dev_dbg(hdev, "big 0x%2.2x sync_handle 0x%4.4x", big, sync_handle); + + d = kmalloc(sizeof(*d), GFP_KERNEL); + if (!d) + return -ENOMEM; + + memset(d, 0, sizeof(*d)); + d->big = big; + d->sync_handle = sync_handle; + + ret = hci_cmd_sync_queue(hdev, big_terminate_sync, d, + terminate_big_destroy); + if (ret) + kfree(d); + + return ret; +} + +/* Cleanup BIS connection + * + * Detects if there any BIS left connected in a BIG + * broadcaster: Remove advertising instance and terminate BIG. + * broadcaster receiver: Teminate BIG sync and terminate PA sync. + */ +static void bis_cleanup(struct hci_conn *conn) +{ + struct hci_dev *hdev = conn->hdev; + + bt_dev_dbg(hdev, "conn %p", conn); + + if (conn->role == HCI_ROLE_MASTER) { + if (!test_and_clear_bit(HCI_CONN_PER_ADV, &conn->flags)) + return; + + hci_le_terminate_big(hdev, conn->iso_qos.big, + conn->iso_qos.bis); + } else { + hci_le_big_terminate(hdev, conn->iso_qos.big, + conn->sync_handle); + } +} + +static int remove_cig_sync(struct hci_dev *hdev, void *data) +{ + u8 handle = PTR_ERR(data); + + return hci_le_remove_cig_sync(hdev, handle); +} + +static int hci_le_remove_cig(struct hci_dev *hdev, u8 handle) +{ + bt_dev_dbg(hdev, "handle 0x%2.2x", handle); + + return hci_cmd_sync_queue(hdev, remove_cig_sync, ERR_PTR(handle), NULL); +} + +static void find_cis(struct hci_conn *conn, void *data) +{ + struct iso_list_data *d = data; + + /* Ignore broadcast */ + if (!bacmp(&conn->dst, BDADDR_ANY)) + return; + + d->count++; +} + +/* Cleanup CIS connection: + * + * Detects if there any CIS left connected in a CIG and remove it. + */ +static void cis_cleanup(struct hci_conn *conn) +{ + struct hci_dev *hdev = conn->hdev; + struct iso_list_data d; + + memset(&d, 0, sizeof(d)); + d.cig = conn->iso_qos.cig; + + /* Check if ISO connection is a CIS and remove CIG if there are + * no other connections using it. + */ + hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_BOUND, &d); + hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_CONNECT, &d); + hci_conn_hash_list_state(hdev, find_cis, ISO_LINK, BT_CONNECTED, &d); + if (d.count) + return; + + hci_le_remove_cig(hdev, conn->iso_qos.cig); +} + +struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst, + u8 role) +{ + struct hci_conn *conn; + + BT_DBG("%s dst %pMR", hdev->name, dst); + + conn = kzalloc(sizeof(*conn), GFP_KERNEL); + if (!conn) + return NULL; + + bacpy(&conn->dst, dst); + bacpy(&conn->src, &hdev->bdaddr); + conn->handle = HCI_CONN_HANDLE_UNSET; + conn->hdev = hdev; + conn->type = type; + conn->role = role; + conn->mode = HCI_CM_ACTIVE; + conn->state = BT_OPEN; + conn->auth_type = HCI_AT_GENERAL_BONDING; + conn->io_capability = hdev->io_capability; + conn->remote_auth = 0xff; + conn->key_type = 0xff; + conn->rssi = HCI_RSSI_INVALID; + conn->tx_power = HCI_TX_POWER_INVALID; + conn->max_tx_power = HCI_TX_POWER_INVALID; + + set_bit(HCI_CONN_POWER_SAVE, &conn->flags); + conn->disc_timeout = HCI_DISCONN_TIMEOUT; + + /* Set Default Authenticated payload timeout to 30s */ + conn->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT; + + if (conn->role == HCI_ROLE_MASTER) + conn->out = true; + + switch (type) { + case ACL_LINK: + conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK; + break; + case LE_LINK: + /* conn->src should reflect the local identity address */ + hci_copy_identity_address(hdev, &conn->src, &conn->src_type); + break; + case ISO_LINK: + /* conn->src should reflect the local identity address */ + hci_copy_identity_address(hdev, &conn->src, &conn->src_type); + + /* set proper cleanup function */ + if (!bacmp(dst, BDADDR_ANY)) + conn->cleanup = bis_cleanup; + else if (conn->role == HCI_ROLE_MASTER) + conn->cleanup = cis_cleanup; + + break; + case SCO_LINK: + if (lmp_esco_capable(hdev)) + conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) | + (hdev->esco_type & EDR_ESCO_MASK); + else + conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK; + break; + case ESCO_LINK: + conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK; + break; + } + + skb_queue_head_init(&conn->data_q); + + INIT_LIST_HEAD(&conn->chan_list); + + INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout); + INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept); + INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle); + INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout); + INIT_WORK(&conn->le_scan_cleanup, le_scan_cleanup); + + atomic_set(&conn->refcnt, 0); + + hci_dev_hold(hdev); + + hci_conn_hash_add(hdev, conn); + + /* The SCO and eSCO connections will only be notified when their + * setup has been completed. This is different to ACL links which + * can be notified right away. + */ + if (conn->type != SCO_LINK && conn->type != ESCO_LINK) { + if (hdev->notify) + hdev->notify(hdev, HCI_NOTIFY_CONN_ADD); + } + + hci_conn_init_sysfs(conn); + + return conn; +} + +static bool hci_conn_unlink(struct hci_conn *conn) +{ + if (!conn->link) + return false; + + conn->link->link = NULL; + conn->link = NULL; + + return true; +} + +int hci_conn_del(struct hci_conn *conn) +{ + struct hci_dev *hdev = conn->hdev; + + BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle); + + cancel_delayed_work_sync(&conn->disc_work); + cancel_delayed_work_sync(&conn->auto_accept_work); + cancel_delayed_work_sync(&conn->idle_work); + + if (conn->type == ACL_LINK) { + struct hci_conn *link = conn->link; + + if (link) { + hci_conn_unlink(conn); + /* Due to race, SCO connection might be not established + * yet at this point. Delete it now, otherwise it is + * possible for it to be stuck and can't be deleted. + */ + if (link->handle == HCI_CONN_HANDLE_UNSET) + hci_conn_del(link); + } + + /* Unacked frames */ + hdev->acl_cnt += conn->sent; + } else if (conn->type == LE_LINK) { + cancel_delayed_work(&conn->le_conn_timeout); + + if (hdev->le_pkts) + hdev->le_cnt += conn->sent; + else + hdev->acl_cnt += conn->sent; + } else { + struct hci_conn *acl = conn->link; + + if (acl) { + hci_conn_unlink(conn); + hci_conn_drop(acl); + } + + /* Unacked ISO frames */ + if (conn->type == ISO_LINK) { + if (hdev->iso_pkts) + hdev->iso_cnt += conn->sent; + else if (hdev->le_pkts) + hdev->le_cnt += conn->sent; + else + hdev->acl_cnt += conn->sent; + } + } + + if (conn->amp_mgr) + amp_mgr_put(conn->amp_mgr); + + skb_queue_purge(&conn->data_q); + + /* Remove the connection from the list and cleanup its remaining + * state. This is a separate function since for some cases like + * BT_CONNECT_SCAN we *only* want the cleanup part without the + * rest of hci_conn_del. + */ + hci_conn_cleanup(conn); + + return 0; +} + +struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src, uint8_t src_type) +{ + int use_src = bacmp(src, BDADDR_ANY); + struct hci_dev *hdev = NULL, *d; + + BT_DBG("%pMR -> %pMR", src, dst); + + read_lock(&hci_dev_list_lock); + + list_for_each_entry(d, &hci_dev_list, list) { + if (!test_bit(HCI_UP, &d->flags) || + hci_dev_test_flag(d, HCI_USER_CHANNEL) || + d->dev_type != HCI_PRIMARY) + continue; + + /* Simple routing: + * No source address - find interface with bdaddr != dst + * Source address - find interface with bdaddr == src + */ + + if (use_src) { + bdaddr_t id_addr; + u8 id_addr_type; + + if (src_type == BDADDR_BREDR) { + if (!lmp_bredr_capable(d)) + continue; + bacpy(&id_addr, &d->bdaddr); + id_addr_type = BDADDR_BREDR; + } else { + if (!lmp_le_capable(d)) + continue; + + hci_copy_identity_address(d, &id_addr, + &id_addr_type); + + /* Convert from HCI to three-value type */ + if (id_addr_type == ADDR_LE_DEV_PUBLIC) + id_addr_type = BDADDR_LE_PUBLIC; + else + id_addr_type = BDADDR_LE_RANDOM; + } + + if (!bacmp(&id_addr, src) && id_addr_type == src_type) { + hdev = d; break; + } + } else { + if (bacmp(&d->bdaddr, dst)) { + hdev = d; break; + } + } + } + + if (hdev) + hdev = hci_dev_hold(hdev); + + read_unlock(&hci_dev_list_lock); + return hdev; +} +EXPORT_SYMBOL(hci_get_route); + +/* This function requires the caller holds hdev->lock */ +static void hci_le_conn_failed(struct hci_conn *conn, u8 status) +{ + struct hci_dev *hdev = conn->hdev; + + hci_connect_le_scan_cleanup(conn, status); + + /* Enable advertising in case this was a failed connection + * attempt as a peripheral. + */ + hci_enable_advertising(hdev); +} + +/* This function requires the caller holds hdev->lock */ +void hci_conn_failed(struct hci_conn *conn, u8 status) +{ + struct hci_dev *hdev = conn->hdev; + + bt_dev_dbg(hdev, "status 0x%2.2x", status); + + switch (conn->type) { + case LE_LINK: + hci_le_conn_failed(conn, status); + break; + case ACL_LINK: + mgmt_connect_failed(hdev, &conn->dst, conn->type, + conn->dst_type, status); + break; + } + + conn->state = BT_CLOSED; + hci_connect_cfm(conn, status); + hci_conn_del(conn); +} + +static void create_le_conn_complete(struct hci_dev *hdev, void *data, int err) +{ + struct hci_conn *conn = data; + + bt_dev_dbg(hdev, "err %d", err); + + hci_dev_lock(hdev); + + if (!err) { + hci_connect_le_scan_cleanup(conn, 0x00); + goto done; + } + + /* Check if connection is still pending */ + if (conn != hci_lookup_le_connect(hdev)) + goto done; + + hci_conn_failed(conn, bt_status(err)); + +done: + hci_dev_unlock(hdev); +} + +static int hci_connect_le_sync(struct hci_dev *hdev, void *data) +{ + struct hci_conn *conn = data; + + bt_dev_dbg(hdev, "conn %p", conn); + + return hci_le_create_conn_sync(hdev, conn); +} + +struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst, + u8 dst_type, bool dst_resolved, u8 sec_level, + u16 conn_timeout, u8 role) +{ + struct hci_conn *conn; + struct smp_irk *irk; + int err; + + /* Let's make sure that le is enabled.*/ + if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) { + if (lmp_le_capable(hdev)) + return ERR_PTR(-ECONNREFUSED); + + return ERR_PTR(-EOPNOTSUPP); + } + + /* Since the controller supports only one LE connection attempt at a + * time, we return -EBUSY if there is any connection attempt running. + */ + if (hci_lookup_le_connect(hdev)) + return ERR_PTR(-EBUSY); + + /* If there's already a connection object but it's not in + * scanning state it means it must already be established, in + * which case we can't do anything else except report a failure + * to connect. + */ + conn = hci_conn_hash_lookup_le(hdev, dst, dst_type); + if (conn && !test_bit(HCI_CONN_SCANNING, &conn->flags)) { + return ERR_PTR(-EBUSY); + } + + /* Check if the destination address has been resolved by the controller + * since if it did then the identity address shall be used. + */ + if (!dst_resolved) { + /* When given an identity address with existing identity + * resolving key, the connection needs to be established + * to a resolvable random address. + * + * Storing the resolvable random address is required here + * to handle connection failures. The address will later + * be resolved back into the original identity address + * from the connect request. + */ + irk = hci_find_irk_by_addr(hdev, dst, dst_type); + if (irk && bacmp(&irk->rpa, BDADDR_ANY)) { + dst = &irk->rpa; + dst_type = ADDR_LE_DEV_RANDOM; + } + } + + if (conn) { + bacpy(&conn->dst, dst); + } else { + conn = hci_conn_add(hdev, LE_LINK, dst, role); + if (!conn) + return ERR_PTR(-ENOMEM); + hci_conn_hold(conn); + conn->pending_sec_level = sec_level; + } + + conn->dst_type = dst_type; + conn->sec_level = BT_SECURITY_LOW; + conn->conn_timeout = conn_timeout; + + conn->state = BT_CONNECT; + clear_bit(HCI_CONN_SCANNING, &conn->flags); + + err = hci_cmd_sync_queue(hdev, hci_connect_le_sync, conn, + create_le_conn_complete); + if (err) { + hci_conn_del(conn); + return ERR_PTR(err); + } + + return conn; +} + +static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type) +{ + struct hci_conn *conn; + + conn = hci_conn_hash_lookup_le(hdev, addr, type); + if (!conn) + return false; + + if (conn->state != BT_CONNECTED) + return false; + + return true; +} + +/* This function requires the caller holds hdev->lock */ +static int hci_explicit_conn_params_set(struct hci_dev *hdev, + bdaddr_t *addr, u8 addr_type) +{ + struct hci_conn_params *params; + + if (is_connected(hdev, addr, addr_type)) + return -EISCONN; + + params = hci_conn_params_lookup(hdev, addr, addr_type); + if (!params) { + params = hci_conn_params_add(hdev, addr, addr_type); + if (!params) + return -ENOMEM; + + /* If we created new params, mark them to be deleted in + * hci_connect_le_scan_cleanup. It's different case than + * existing disabled params, those will stay after cleanup. + */ + params->auto_connect = HCI_AUTO_CONN_EXPLICIT; + } + + /* We're trying to connect, so make sure params are at pend_le_conns */ + if (params->auto_connect == HCI_AUTO_CONN_DISABLED || + params->auto_connect == HCI_AUTO_CONN_REPORT || + params->auto_connect == HCI_AUTO_CONN_EXPLICIT) { + hci_pend_le_list_del_init(params); + hci_pend_le_list_add(params, &hdev->pend_le_conns); + } + + params->explicit_connect = true; + + BT_DBG("addr %pMR (type %u) auto_connect %u", addr, addr_type, + params->auto_connect); + + return 0; +} + +static int qos_set_big(struct hci_dev *hdev, struct bt_iso_qos *qos) +{ + struct iso_list_data data; + + /* Allocate a BIG if not set */ + if (qos->big == BT_ISO_QOS_BIG_UNSET) { + for (data.big = 0x00; data.big < 0xef; data.big++) { + data.count = 0; + data.bis = 0xff; + + hci_conn_hash_list_state(hdev, bis_list, ISO_LINK, + BT_BOUND, &data); + if (!data.count) + break; + } + + if (data.big == 0xef) + return -EADDRNOTAVAIL; + + /* Update BIG */ + qos->big = data.big; + } + + return 0; +} + +static int qos_set_bis(struct hci_dev *hdev, struct bt_iso_qos *qos) +{ + struct iso_list_data data; + + /* Allocate BIS if not set */ + if (qos->bis == BT_ISO_QOS_BIS_UNSET) { + /* Find an unused adv set to advertise BIS, skip instance 0x00 + * since it is reserved as general purpose set. + */ + for (data.bis = 0x01; data.bis < hdev->le_num_of_adv_sets; + data.bis++) { + data.count = 0; + + hci_conn_hash_list_state(hdev, bis_list, ISO_LINK, + BT_BOUND, &data); + if (!data.count) + break; + } + + if (data.bis == hdev->le_num_of_adv_sets) + return -EADDRNOTAVAIL; + + /* Update BIS */ + qos->bis = data.bis; + } + + return 0; +} + +/* This function requires the caller holds hdev->lock */ +static struct hci_conn *hci_add_bis(struct hci_dev *hdev, bdaddr_t *dst, + struct bt_iso_qos *qos) +{ + struct hci_conn *conn; + struct iso_list_data data; + int err; + + /* Let's make sure that le is enabled.*/ + if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) { + if (lmp_le_capable(hdev)) + return ERR_PTR(-ECONNREFUSED); + return ERR_PTR(-EOPNOTSUPP); + } + + err = qos_set_big(hdev, qos); + if (err) + return ERR_PTR(err); + + err = qos_set_bis(hdev, qos); + if (err) + return ERR_PTR(err); + + data.big = qos->big; + data.bis = qos->bis; + data.count = 0; + + /* Check if there is already a matching BIG/BIS */ + hci_conn_hash_list_state(hdev, bis_list, ISO_LINK, BT_BOUND, &data); + if (data.count) + return ERR_PTR(-EADDRINUSE); + + conn = hci_conn_hash_lookup_bis(hdev, dst, qos->big, qos->bis); + if (conn) + return ERR_PTR(-EADDRINUSE); + + conn = hci_conn_add(hdev, ISO_LINK, dst, HCI_ROLE_MASTER); + if (!conn) + return ERR_PTR(-ENOMEM); + + set_bit(HCI_CONN_PER_ADV, &conn->flags); + conn->state = BT_CONNECT; + + hci_conn_hold(conn); + return conn; +} + +/* This function requires the caller holds hdev->lock */ +struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst, + u8 dst_type, u8 sec_level, + u16 conn_timeout, + enum conn_reasons conn_reason) +{ + struct hci_conn *conn; + + /* Let's make sure that le is enabled.*/ + if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) { + if (lmp_le_capable(hdev)) + return ERR_PTR(-ECONNREFUSED); + + return ERR_PTR(-EOPNOTSUPP); + } + + /* Some devices send ATT messages as soon as the physical link is + * established. To be able to handle these ATT messages, the user- + * space first establishes the connection and then starts the pairing + * process. + * + * So if a hci_conn object already exists for the following connection + * attempt, we simply update pending_sec_level and auth_type fields + * and return the object found. + */ + conn = hci_conn_hash_lookup_le(hdev, dst, dst_type); + if (conn) { + if (conn->pending_sec_level < sec_level) + conn->pending_sec_level = sec_level; + goto done; + } + + BT_DBG("requesting refresh of dst_addr"); + + conn = hci_conn_add(hdev, LE_LINK, dst, HCI_ROLE_MASTER); + if (!conn) + return ERR_PTR(-ENOMEM); + + if (hci_explicit_conn_params_set(hdev, dst, dst_type) < 0) { + hci_conn_del(conn); + return ERR_PTR(-EBUSY); + } + + conn->state = BT_CONNECT; + set_bit(HCI_CONN_SCANNING, &conn->flags); + conn->dst_type = dst_type; + conn->sec_level = BT_SECURITY_LOW; + conn->pending_sec_level = sec_level; + conn->conn_timeout = conn_timeout; + conn->conn_reason = conn_reason; + + hci_update_passive_scan(hdev); + +done: + hci_conn_hold(conn); + return conn; +} + +struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst, + u8 sec_level, u8 auth_type, + enum conn_reasons conn_reason) +{ + struct hci_conn *acl; + + if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { + if (lmp_bredr_capable(hdev)) + return ERR_PTR(-ECONNREFUSED); + + return ERR_PTR(-EOPNOTSUPP); + } + + /* Reject outgoing connection to device with same BD ADDR against + * CVE-2020-26555 + */ + if (!bacmp(&hdev->bdaddr, dst)) { + bt_dev_dbg(hdev, "Reject connection with same BD_ADDR %pMR\n", + dst); + return ERR_PTR(-ECONNREFUSED); + } + + acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst); + if (!acl) { + acl = hci_conn_add(hdev, ACL_LINK, dst, HCI_ROLE_MASTER); + if (!acl) + return ERR_PTR(-ENOMEM); + } + + hci_conn_hold(acl); + + acl->conn_reason = conn_reason; + if (acl->state == BT_OPEN || acl->state == BT_CLOSED) { + acl->sec_level = BT_SECURITY_LOW; + acl->pending_sec_level = sec_level; + acl->auth_type = auth_type; + hci_acl_create_connection(acl); + } + + return acl; +} + +struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst, + __u16 setting, struct bt_codec *codec) +{ + struct hci_conn *acl; + struct hci_conn *sco; + + acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING, + CONN_REASON_SCO_CONNECT); + if (IS_ERR(acl)) + return acl; + + sco = hci_conn_hash_lookup_ba(hdev, type, dst); + if (!sco) { + sco = hci_conn_add(hdev, type, dst, HCI_ROLE_MASTER); + if (!sco) { + hci_conn_drop(acl); + return ERR_PTR(-ENOMEM); + } + } + + acl->link = sco; + sco->link = acl; + + hci_conn_hold(sco); + + sco->setting = setting; + sco->codec = *codec; + + if (acl->state == BT_CONNECTED && + (sco->state == BT_OPEN || sco->state == BT_CLOSED)) { + set_bit(HCI_CONN_POWER_SAVE, &acl->flags); + hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON); + + if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) { + /* defer SCO setup until mode change completed */ + set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags); + return sco; + } + + hci_sco_setup(acl, 0x00); + } + + return sco; +} + +static void cis_add(struct iso_list_data *d, struct bt_iso_qos *qos) +{ + struct hci_cis_params *cis = &d->pdu.cis[d->pdu.cp.num_cis]; + + cis->cis_id = qos->cis; + cis->c_sdu = cpu_to_le16(qos->out.sdu); + cis->p_sdu = cpu_to_le16(qos->in.sdu); + cis->c_phy = qos->out.phy ? qos->out.phy : qos->in.phy; + cis->p_phy = qos->in.phy ? qos->in.phy : qos->out.phy; + cis->c_rtn = qos->out.rtn; + cis->p_rtn = qos->in.rtn; + + d->pdu.cp.num_cis++; +} + +static void cis_list(struct hci_conn *conn, void *data) +{ + struct iso_list_data *d = data; + + /* Skip if broadcast/ANY address */ + if (!bacmp(&conn->dst, BDADDR_ANY)) + return; + + if (d->cig != conn->iso_qos.cig || d->cis == BT_ISO_QOS_CIS_UNSET || + d->cis != conn->iso_qos.cis) + return; + + d->count++; + + if (d->pdu.cp.cig_id == BT_ISO_QOS_CIG_UNSET || + d->count >= ARRAY_SIZE(d->pdu.cis)) + return; + + cis_add(d, &conn->iso_qos); +} + +static int hci_le_create_big(struct hci_conn *conn, struct bt_iso_qos *qos) +{ + struct hci_dev *hdev = conn->hdev; + struct hci_cp_le_create_big cp; + + memset(&cp, 0, sizeof(cp)); + + cp.handle = qos->big; + cp.adv_handle = qos->bis; + cp.num_bis = 0x01; + hci_cpu_to_le24(qos->out.interval, cp.bis.sdu_interval); + cp.bis.sdu = cpu_to_le16(qos->out.sdu); + cp.bis.latency = cpu_to_le16(qos->out.latency); + cp.bis.rtn = qos->out.rtn; + cp.bis.phy = qos->out.phy; + cp.bis.packing = qos->packing; + cp.bis.framing = qos->framing; + cp.bis.encryption = 0x00; + memset(&cp.bis.bcode, 0, sizeof(cp.bis.bcode)); + + return hci_send_cmd(hdev, HCI_OP_LE_CREATE_BIG, sizeof(cp), &cp); +} + +static void set_cig_params_complete(struct hci_dev *hdev, void *data, int err) +{ + struct iso_cig_params *pdu = data; + + bt_dev_dbg(hdev, ""); + + if (err) + bt_dev_err(hdev, "Unable to set CIG parameters: %d", err); + + kfree(pdu); +} + +static int set_cig_params_sync(struct hci_dev *hdev, void *data) +{ + struct iso_cig_params *pdu = data; + u32 plen; + + plen = sizeof(pdu->cp) + pdu->cp.num_cis * sizeof(pdu->cis[0]); + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_CIG_PARAMS, plen, pdu, + HCI_CMD_TIMEOUT); +} + +static bool hci_le_set_cig_params(struct hci_conn *conn, struct bt_iso_qos *qos) +{ + struct hci_dev *hdev = conn->hdev; + struct iso_list_data data; + struct iso_cig_params *pdu; + + memset(&data, 0, sizeof(data)); + + /* Allocate a CIG if not set */ + if (qos->cig == BT_ISO_QOS_CIG_UNSET) { + for (data.cig = 0x00; data.cig < 0xff; data.cig++) { + data.count = 0; + data.cis = 0xff; + + hci_conn_hash_list_state(hdev, cis_list, ISO_LINK, + BT_BOUND, &data); + if (data.count) + continue; + + hci_conn_hash_list_state(hdev, cis_list, ISO_LINK, + BT_CONNECTED, &data); + if (!data.count) + break; + } + + if (data.cig == 0xff) + return false; + + /* Update CIG */ + qos->cig = data.cig; + } + + data.pdu.cp.cig_id = qos->cig; + hci_cpu_to_le24(qos->out.interval, data.pdu.cp.c_interval); + hci_cpu_to_le24(qos->in.interval, data.pdu.cp.p_interval); + data.pdu.cp.sca = qos->sca; + data.pdu.cp.packing = qos->packing; + data.pdu.cp.framing = qos->framing; + data.pdu.cp.c_latency = cpu_to_le16(qos->out.latency); + data.pdu.cp.p_latency = cpu_to_le16(qos->in.latency); + + if (qos->cis != BT_ISO_QOS_CIS_UNSET) { + data.count = 0; + data.cig = qos->cig; + data.cis = qos->cis; + + hci_conn_hash_list_state(hdev, cis_list, ISO_LINK, BT_BOUND, + &data); + if (data.count) + return false; + + cis_add(&data, qos); + } + + /* Reprogram all CIS(s) with the same CIG */ + for (data.cig = qos->cig, data.cis = 0x00; data.cis < 0x11; + data.cis++) { + data.count = 0; + + hci_conn_hash_list_state(hdev, cis_list, ISO_LINK, BT_BOUND, + &data); + if (data.count) + continue; + + /* Allocate a CIS if not set */ + if (qos->cis == BT_ISO_QOS_CIS_UNSET) { + /* Update CIS */ + qos->cis = data.cis; + cis_add(&data, qos); + } + } + + if (qos->cis == BT_ISO_QOS_CIS_UNSET || !data.pdu.cp.num_cis) + return false; + + pdu = kzalloc(sizeof(*pdu), GFP_KERNEL); + if (!pdu) + return false; + + memcpy(pdu, &data.pdu, sizeof(*pdu)); + + if (hci_cmd_sync_queue(hdev, set_cig_params_sync, pdu, + set_cig_params_complete) < 0) { + kfree(pdu); + return false; + } + + return true; +} + +struct hci_conn *hci_bind_cis(struct hci_dev *hdev, bdaddr_t *dst, + __u8 dst_type, struct bt_iso_qos *qos) +{ + struct hci_conn *cis; + + cis = hci_conn_hash_lookup_cis(hdev, dst, dst_type); + if (!cis) { + cis = hci_conn_add(hdev, ISO_LINK, dst, HCI_ROLE_MASTER); + if (!cis) + return ERR_PTR(-ENOMEM); + cis->cleanup = cis_cleanup; + cis->dst_type = dst_type; + } + + if (cis->state == BT_CONNECTED) + return cis; + + /* Check if CIS has been set and the settings matches */ + if (cis->state == BT_BOUND && + !memcmp(&cis->iso_qos, qos, sizeof(*qos))) + return cis; + + /* Update LINK PHYs according to QoS preference */ + cis->le_tx_phy = qos->out.phy; + cis->le_rx_phy = qos->in.phy; + + /* If output interval is not set use the input interval as it cannot be + * 0x000000. + */ + if (!qos->out.interval) + qos->out.interval = qos->in.interval; + + /* If input interval is not set use the output interval as it cannot be + * 0x000000. + */ + if (!qos->in.interval) + qos->in.interval = qos->out.interval; + + /* If output latency is not set use the input latency as it cannot be + * 0x0000. + */ + if (!qos->out.latency) + qos->out.latency = qos->in.latency; + + /* If input latency is not set use the output latency as it cannot be + * 0x0000. + */ + if (!qos->in.latency) + qos->in.latency = qos->out.latency; + + if (!hci_le_set_cig_params(cis, qos)) { + hci_conn_drop(cis); + return ERR_PTR(-EINVAL); + } + + cis->iso_qos = *qos; + cis->state = BT_BOUND; + + return cis; +} + +bool hci_iso_setup_path(struct hci_conn *conn) +{ + struct hci_dev *hdev = conn->hdev; + struct hci_cp_le_setup_iso_path cmd; + + memset(&cmd, 0, sizeof(cmd)); + + if (conn->iso_qos.out.sdu) { + cmd.handle = cpu_to_le16(conn->handle); + cmd.direction = 0x00; /* Input (Host to Controller) */ + cmd.path = 0x00; /* HCI path if enabled */ + cmd.codec = 0x03; /* Transparent Data */ + + if (hci_send_cmd(hdev, HCI_OP_LE_SETUP_ISO_PATH, sizeof(cmd), + &cmd) < 0) + return false; + } + + if (conn->iso_qos.in.sdu) { + cmd.handle = cpu_to_le16(conn->handle); + cmd.direction = 0x01; /* Output (Controller to Host) */ + cmd.path = 0x00; /* HCI path if enabled */ + cmd.codec = 0x03; /* Transparent Data */ + + if (hci_send_cmd(hdev, HCI_OP_LE_SETUP_ISO_PATH, sizeof(cmd), + &cmd) < 0) + return false; + } + + return true; +} + +static int hci_create_cis_sync(struct hci_dev *hdev, void *data) +{ + struct { + struct hci_cp_le_create_cis cp; + struct hci_cis cis[0x1f]; + } cmd; + struct hci_conn *conn = data; + u8 cig; + + memset(&cmd, 0, sizeof(cmd)); + cmd.cis[0].acl_handle = cpu_to_le16(conn->link->handle); + cmd.cis[0].cis_handle = cpu_to_le16(conn->handle); + cmd.cp.num_cis++; + cig = conn->iso_qos.cig; + + hci_dev_lock(hdev); + + rcu_read_lock(); + + list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) { + struct hci_cis *cis = &cmd.cis[cmd.cp.num_cis]; + + if (conn == data || conn->type != ISO_LINK || + conn->state == BT_CONNECTED || conn->iso_qos.cig != cig) + continue; + + /* Check if all CIS(s) belonging to a CIG are ready */ + if (!conn->link || conn->link->state != BT_CONNECTED || + conn->state != BT_CONNECT) { + cmd.cp.num_cis = 0; + break; + } + + /* Group all CIS with state BT_CONNECT since the spec don't + * allow to send them individually: + * + * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E + * page 2566: + * + * If the Host issues this command before all the + * HCI_LE_CIS_Established events from the previous use of the + * command have been generated, the Controller shall return the + * error code Command Disallowed (0x0C). + */ + cis->acl_handle = cpu_to_le16(conn->link->handle); + cis->cis_handle = cpu_to_le16(conn->handle); + cmd.cp.num_cis++; + } + + rcu_read_unlock(); + + hci_dev_unlock(hdev); + + if (!cmd.cp.num_cis) + return 0; + + return hci_send_cmd(hdev, HCI_OP_LE_CREATE_CIS, sizeof(cmd.cp) + + sizeof(cmd.cis[0]) * cmd.cp.num_cis, &cmd); +} + +int hci_le_create_cis(struct hci_conn *conn) +{ + struct hci_conn *cis; + struct hci_dev *hdev = conn->hdev; + int err; + + switch (conn->type) { + case LE_LINK: + if (!conn->link || conn->state != BT_CONNECTED) + return -EINVAL; + cis = conn->link; + break; + case ISO_LINK: + cis = conn; + break; + default: + return -EINVAL; + } + + if (cis->state == BT_CONNECT) + return 0; + + /* Queue Create CIS */ + err = hci_cmd_sync_queue(hdev, hci_create_cis_sync, cis, NULL); + if (err) + return err; + + cis->state = BT_CONNECT; + + return 0; +} + +static void hci_iso_qos_setup(struct hci_dev *hdev, struct hci_conn *conn, + struct bt_iso_io_qos *qos, __u8 phy) +{ + /* Only set MTU if PHY is enabled */ + if (!qos->sdu && qos->phy) { + if (hdev->iso_mtu > 0) + qos->sdu = hdev->iso_mtu; + else if (hdev->le_mtu > 0) + qos->sdu = hdev->le_mtu; + else + qos->sdu = hdev->acl_mtu; + } + + /* Use the same PHY as ACL if set to any */ + if (qos->phy == BT_ISO_PHY_ANY) + qos->phy = phy; + + /* Use LE ACL connection interval if not set */ + if (!qos->interval) + /* ACL interval unit in 1.25 ms to us */ + qos->interval = conn->le_conn_interval * 1250; + + /* Use LE ACL connection latency if not set */ + if (!qos->latency) + qos->latency = conn->le_conn_latency; +} + +static void hci_bind_bis(struct hci_conn *conn, + struct bt_iso_qos *qos) +{ + /* Update LINK PHYs according to QoS preference */ + conn->le_tx_phy = qos->out.phy; + conn->le_tx_phy = qos->out.phy; + conn->iso_qos = *qos; + conn->state = BT_BOUND; +} + +static int create_big_sync(struct hci_dev *hdev, void *data) +{ + struct hci_conn *conn = data; + struct bt_iso_qos *qos = &conn->iso_qos; + u16 interval, sync_interval = 0; + u32 flags = 0; + int err; + + if (qos->out.phy == 0x02) + flags |= MGMT_ADV_FLAG_SEC_2M; + + /* Align intervals */ + interval = qos->out.interval / 1250; + + if (qos->bis) + sync_interval = qos->sync_interval * 1600; + + err = hci_start_per_adv_sync(hdev, qos->bis, conn->le_per_adv_data_len, + conn->le_per_adv_data, flags, interval, + interval, sync_interval); + if (err) + return err; + + return hci_le_create_big(conn, &conn->iso_qos); +} + +static void create_pa_complete(struct hci_dev *hdev, void *data, int err) +{ + struct hci_cp_le_pa_create_sync *cp = data; + + bt_dev_dbg(hdev, ""); + + if (err) + bt_dev_err(hdev, "Unable to create PA: %d", err); + + kfree(cp); +} + +static int create_pa_sync(struct hci_dev *hdev, void *data) +{ + struct hci_cp_le_pa_create_sync *cp = data; + int err; + + err = __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_CREATE_SYNC, + sizeof(*cp), cp, HCI_CMD_TIMEOUT); + if (err) { + hci_dev_clear_flag(hdev, HCI_PA_SYNC); + return err; + } + + return hci_update_passive_scan_sync(hdev); +} + +int hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst, __u8 dst_type, + __u8 sid) +{ + struct hci_cp_le_pa_create_sync *cp; + + if (hci_dev_test_and_set_flag(hdev, HCI_PA_SYNC)) + return -EBUSY; + + cp = kmalloc(sizeof(*cp), GFP_KERNEL); + if (!cp) { + hci_dev_clear_flag(hdev, HCI_PA_SYNC); + return -ENOMEM; + } + + /* Convert from ISO socket address type to HCI address type */ + if (dst_type == BDADDR_LE_PUBLIC) + dst_type = ADDR_LE_DEV_PUBLIC; + else + dst_type = ADDR_LE_DEV_RANDOM; + + memset(cp, 0, sizeof(*cp)); + cp->sid = sid; + cp->addr_type = dst_type; + bacpy(&cp->addr, dst); + + /* Queue start pa_create_sync and scan */ + return hci_cmd_sync_queue(hdev, create_pa_sync, cp, create_pa_complete); +} + +int hci_le_big_create_sync(struct hci_dev *hdev, struct bt_iso_qos *qos, + __u16 sync_handle, __u8 num_bis, __u8 bis[]) +{ + struct _packed { + struct hci_cp_le_big_create_sync cp; + __u8 bis[0x11]; + } pdu; + int err; + + if (num_bis > sizeof(pdu.bis)) + return -EINVAL; + + err = qos_set_big(hdev, qos); + if (err) + return err; + + memset(&pdu, 0, sizeof(pdu)); + pdu.cp.handle = qos->big; + pdu.cp.sync_handle = cpu_to_le16(sync_handle); + pdu.cp.num_bis = num_bis; + memcpy(pdu.bis, bis, num_bis); + + return hci_send_cmd(hdev, HCI_OP_LE_BIG_CREATE_SYNC, + sizeof(pdu.cp) + num_bis, &pdu); +} + +static void create_big_complete(struct hci_dev *hdev, void *data, int err) +{ + struct hci_conn *conn = data; + + bt_dev_dbg(hdev, "conn %p", conn); + + if (err) { + bt_dev_err(hdev, "Unable to create BIG: %d", err); + hci_connect_cfm(conn, err); + hci_conn_del(conn); + } +} + +struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst, + __u8 dst_type, struct bt_iso_qos *qos, + __u8 base_len, __u8 *base) +{ + struct hci_conn *conn; + int err; + + /* We need hci_conn object using the BDADDR_ANY as dst */ + conn = hci_add_bis(hdev, dst, qos); + if (IS_ERR(conn)) + return conn; + + hci_bind_bis(conn, qos); + + /* Add Basic Announcement into Peridic Adv Data if BASE is set */ + if (base_len && base) { + base_len = eir_append_service_data(conn->le_per_adv_data, 0, + 0x1851, base, base_len); + conn->le_per_adv_data_len = base_len; + } + + /* Queue start periodic advertising and create BIG */ + err = hci_cmd_sync_queue(hdev, create_big_sync, conn, + create_big_complete); + if (err < 0) { + hci_conn_drop(conn); + return ERR_PTR(err); + } + + hci_iso_qos_setup(hdev, conn, &qos->out, + conn->le_tx_phy ? conn->le_tx_phy : + hdev->le_tx_def_phys); + + return conn; +} + +struct hci_conn *hci_connect_cis(struct hci_dev *hdev, bdaddr_t *dst, + __u8 dst_type, struct bt_iso_qos *qos) +{ + struct hci_conn *le; + struct hci_conn *cis; + + if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) + le = hci_connect_le(hdev, dst, dst_type, false, + BT_SECURITY_LOW, + HCI_LE_CONN_TIMEOUT, + HCI_ROLE_SLAVE); + else + le = hci_connect_le_scan(hdev, dst, dst_type, + BT_SECURITY_LOW, + HCI_LE_CONN_TIMEOUT, + CONN_REASON_ISO_CONNECT); + if (IS_ERR(le)) + return le; + + hci_iso_qos_setup(hdev, le, &qos->out, + le->le_tx_phy ? le->le_tx_phy : hdev->le_tx_def_phys); + hci_iso_qos_setup(hdev, le, &qos->in, + le->le_rx_phy ? le->le_rx_phy : hdev->le_rx_def_phys); + + cis = hci_bind_cis(hdev, dst, dst_type, qos); + if (IS_ERR(cis)) { + hci_conn_drop(le); + return cis; + } + + le->link = cis; + cis->link = le; + + hci_conn_hold(cis); + + /* If LE is already connected and CIS handle is already set proceed to + * Create CIS immediately. + */ + if (le->state == BT_CONNECTED && cis->handle != HCI_CONN_HANDLE_UNSET) + hci_le_create_cis(le); + + return cis; +} + +/* Check link security requirement */ +int hci_conn_check_link_mode(struct hci_conn *conn) +{ + BT_DBG("hcon %p", conn); + + /* In Secure Connections Only mode, it is required that Secure + * Connections is used and the link is encrypted with AES-CCM + * using a P-256 authenticated combination key. + */ + if (hci_dev_test_flag(conn->hdev, HCI_SC_ONLY)) { + if (!hci_conn_sc_enabled(conn) || + !test_bit(HCI_CONN_AES_CCM, &conn->flags) || + conn->key_type != HCI_LK_AUTH_COMBINATION_P256) + return 0; + } + + /* AES encryption is required for Level 4: + * + * BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 3, Part C + * page 1319: + * + * 128-bit equivalent strength for link and encryption keys + * required using FIPS approved algorithms (E0 not allowed, + * SAFER+ not allowed, and P-192 not allowed; encryption key + * not shortened) + */ + if (conn->sec_level == BT_SECURITY_FIPS && + !test_bit(HCI_CONN_AES_CCM, &conn->flags)) { + bt_dev_err(conn->hdev, + "Invalid security: Missing AES-CCM usage"); + return 0; + } + + if (hci_conn_ssp_enabled(conn) && + !test_bit(HCI_CONN_ENCRYPT, &conn->flags)) + return 0; + + return 1; +} + +/* Authenticate remote device */ +static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type) +{ + BT_DBG("hcon %p", conn); + + if (conn->pending_sec_level > sec_level) + sec_level = conn->pending_sec_level; + + if (sec_level > conn->sec_level) + conn->pending_sec_level = sec_level; + else if (test_bit(HCI_CONN_AUTH, &conn->flags)) + return 1; + + /* Make sure we preserve an existing MITM requirement*/ + auth_type |= (conn->auth_type & 0x01); + + conn->auth_type = auth_type; + + if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) { + struct hci_cp_auth_requested cp; + + cp.handle = cpu_to_le16(conn->handle); + hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED, + sizeof(cp), &cp); + + /* Set the ENCRYPT_PEND to trigger encryption after + * authentication. + */ + if (!test_bit(HCI_CONN_ENCRYPT, &conn->flags)) + set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); + } + + return 0; +} + +/* Encrypt the link */ +static void hci_conn_encrypt(struct hci_conn *conn) +{ + BT_DBG("hcon %p", conn); + + if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) { + struct hci_cp_set_conn_encrypt cp; + cp.handle = cpu_to_le16(conn->handle); + cp.encrypt = 0x01; + hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), + &cp); + } +} + +/* Enable security */ +int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type, + bool initiator) +{ + BT_DBG("hcon %p", conn); + + if (conn->type == LE_LINK) + return smp_conn_security(conn, sec_level); + + /* For sdp we don't need the link key. */ + if (sec_level == BT_SECURITY_SDP) + return 1; + + /* For non 2.1 devices and low security level we don't need the link + key. */ + if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn)) + return 1; + + /* For other security levels we need the link key. */ + if (!test_bit(HCI_CONN_AUTH, &conn->flags)) + goto auth; + + switch (conn->key_type) { + case HCI_LK_AUTH_COMBINATION_P256: + /* An authenticated FIPS approved combination key has + * sufficient security for security level 4 or lower. + */ + if (sec_level <= BT_SECURITY_FIPS) + goto encrypt; + break; + case HCI_LK_AUTH_COMBINATION_P192: + /* An authenticated combination key has sufficient security for + * security level 3 or lower. + */ + if (sec_level <= BT_SECURITY_HIGH) + goto encrypt; + break; + case HCI_LK_UNAUTH_COMBINATION_P192: + case HCI_LK_UNAUTH_COMBINATION_P256: + /* An unauthenticated combination key has sufficient security + * for security level 2 or lower. + */ + if (sec_level <= BT_SECURITY_MEDIUM) + goto encrypt; + break; + case HCI_LK_COMBINATION: + /* A combination key has always sufficient security for the + * security levels 2 or lower. High security level requires the + * combination key is generated using maximum PIN code length + * (16). For pre 2.1 units. + */ + if (sec_level <= BT_SECURITY_MEDIUM || conn->pin_length == 16) + goto encrypt; + break; + default: + break; + } + +auth: + if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) + return 0; + + if (initiator) + set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags); + + if (!hci_conn_auth(conn, sec_level, auth_type)) + return 0; + +encrypt: + if (test_bit(HCI_CONN_ENCRYPT, &conn->flags)) { + /* Ensure that the encryption key size has been read, + * otherwise stall the upper layer responses. + */ + if (!conn->enc_key_size) + return 0; + + /* Nothing else needed, all requirements are met */ + return 1; + } + + hci_conn_encrypt(conn); + return 0; +} +EXPORT_SYMBOL(hci_conn_security); + +/* Check secure link requirement */ +int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level) +{ + BT_DBG("hcon %p", conn); + + /* Accept if non-secure or higher security level is required */ + if (sec_level != BT_SECURITY_HIGH && sec_level != BT_SECURITY_FIPS) + return 1; + + /* Accept if secure or higher security level is already present */ + if (conn->sec_level == BT_SECURITY_HIGH || + conn->sec_level == BT_SECURITY_FIPS) + return 1; + + /* Reject not secure link */ + return 0; +} +EXPORT_SYMBOL(hci_conn_check_secure); + +/* Switch role */ +int hci_conn_switch_role(struct hci_conn *conn, __u8 role) +{ + BT_DBG("hcon %p", conn); + + if (role == conn->role) + return 1; + + if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) { + struct hci_cp_switch_role cp; + bacpy(&cp.bdaddr, &conn->dst); + cp.role = role; + hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp); + } + + return 0; +} +EXPORT_SYMBOL(hci_conn_switch_role); + +/* Enter active mode */ +void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active) +{ + struct hci_dev *hdev = conn->hdev; + + BT_DBG("hcon %p mode %d", conn, conn->mode); + + if (conn->mode != HCI_CM_SNIFF) + goto timer; + + if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active) + goto timer; + + if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) { + struct hci_cp_exit_sniff_mode cp; + cp.handle = cpu_to_le16(conn->handle); + hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp); + } + +timer: + if (hdev->idle_timeout > 0) + queue_delayed_work(hdev->workqueue, &conn->idle_work, + msecs_to_jiffies(hdev->idle_timeout)); +} + +/* Drop all connection on the device */ +void hci_conn_hash_flush(struct hci_dev *hdev) +{ + struct hci_conn_hash *h = &hdev->conn_hash; + struct hci_conn *c, *n; + + BT_DBG("hdev %s", hdev->name); + + list_for_each_entry_safe(c, n, &h->list, list) { + c->state = BT_CLOSED; + + hci_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM); + + /* Unlink before deleting otherwise it is possible that + * hci_conn_del removes the link which may cause the list to + * contain items already freed. + */ + hci_conn_unlink(c); + hci_conn_del(c); + } +} + +/* Check pending connect attempts */ +void hci_conn_check_pending(struct hci_dev *hdev) +{ + struct hci_conn *conn; + + BT_DBG("hdev %s", hdev->name); + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2); + if (conn) + hci_acl_create_connection(conn); + + hci_dev_unlock(hdev); +} + +static u32 get_link_mode(struct hci_conn *conn) +{ + u32 link_mode = 0; + + if (conn->role == HCI_ROLE_MASTER) + link_mode |= HCI_LM_MASTER; + + if (test_bit(HCI_CONN_ENCRYPT, &conn->flags)) + link_mode |= HCI_LM_ENCRYPT; + + if (test_bit(HCI_CONN_AUTH, &conn->flags)) + link_mode |= HCI_LM_AUTH; + + if (test_bit(HCI_CONN_SECURE, &conn->flags)) + link_mode |= HCI_LM_SECURE; + + if (test_bit(HCI_CONN_FIPS, &conn->flags)) + link_mode |= HCI_LM_FIPS; + + return link_mode; +} + +int hci_get_conn_list(void __user *arg) +{ + struct hci_conn *c; + struct hci_conn_list_req req, *cl; + struct hci_conn_info *ci; + struct hci_dev *hdev; + int n = 0, size, err; + + if (copy_from_user(&req, arg, sizeof(req))) + return -EFAULT; + + if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci)) + return -EINVAL; + + size = sizeof(req) + req.conn_num * sizeof(*ci); + + cl = kmalloc(size, GFP_KERNEL); + if (!cl) + return -ENOMEM; + + hdev = hci_dev_get(req.dev_id); + if (!hdev) { + kfree(cl); + return -ENODEV; + } + + ci = cl->conn_info; + + hci_dev_lock(hdev); + list_for_each_entry(c, &hdev->conn_hash.list, list) { + bacpy(&(ci + n)->bdaddr, &c->dst); + (ci + n)->handle = c->handle; + (ci + n)->type = c->type; + (ci + n)->out = c->out; + (ci + n)->state = c->state; + (ci + n)->link_mode = get_link_mode(c); + if (++n >= req.conn_num) + break; + } + hci_dev_unlock(hdev); + + cl->dev_id = hdev->id; + cl->conn_num = n; + size = sizeof(req) + n * sizeof(*ci); + + hci_dev_put(hdev); + + err = copy_to_user(arg, cl, size); + kfree(cl); + + return err ? -EFAULT : 0; +} + +int hci_get_conn_info(struct hci_dev *hdev, void __user *arg) +{ + struct hci_conn_info_req req; + struct hci_conn_info ci; + struct hci_conn *conn; + char __user *ptr = arg + sizeof(req); + + if (copy_from_user(&req, arg, sizeof(req))) + return -EFAULT; + + hci_dev_lock(hdev); + conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr); + if (conn) { + bacpy(&ci.bdaddr, &conn->dst); + ci.handle = conn->handle; + ci.type = conn->type; + ci.out = conn->out; + ci.state = conn->state; + ci.link_mode = get_link_mode(conn); + } + hci_dev_unlock(hdev); + + if (!conn) + return -ENOENT; + + return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0; +} + +int hci_get_auth_info(struct hci_dev *hdev, void __user *arg) +{ + struct hci_auth_info_req req; + struct hci_conn *conn; + + if (copy_from_user(&req, arg, sizeof(req))) + return -EFAULT; + + hci_dev_lock(hdev); + conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr); + if (conn) + req.type = conn->auth_type; + hci_dev_unlock(hdev); + + if (!conn) + return -ENOENT; + + return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0; +} + +struct hci_chan *hci_chan_create(struct hci_conn *conn) +{ + struct hci_dev *hdev = conn->hdev; + struct hci_chan *chan; + + BT_DBG("%s hcon %p", hdev->name, conn); + + if (test_bit(HCI_CONN_DROP, &conn->flags)) { + BT_DBG("Refusing to create new hci_chan"); + return NULL; + } + + chan = kzalloc(sizeof(*chan), GFP_KERNEL); + if (!chan) + return NULL; + + chan->conn = hci_conn_get(conn); + skb_queue_head_init(&chan->data_q); + chan->state = BT_CONNECTED; + + list_add_rcu(&chan->list, &conn->chan_list); + + return chan; +} + +void hci_chan_del(struct hci_chan *chan) +{ + struct hci_conn *conn = chan->conn; + struct hci_dev *hdev = conn->hdev; + + BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan); + + list_del_rcu(&chan->list); + + synchronize_rcu(); + + /* Prevent new hci_chan's to be created for this hci_conn */ + set_bit(HCI_CONN_DROP, &conn->flags); + + hci_conn_put(conn); + + skb_queue_purge(&chan->data_q); + kfree(chan); +} + +void hci_chan_list_flush(struct hci_conn *conn) +{ + struct hci_chan *chan, *n; + + BT_DBG("hcon %p", conn); + + list_for_each_entry_safe(chan, n, &conn->chan_list, list) + hci_chan_del(chan); +} + +static struct hci_chan *__hci_chan_lookup_handle(struct hci_conn *hcon, + __u16 handle) +{ + struct hci_chan *hchan; + + list_for_each_entry(hchan, &hcon->chan_list, list) { + if (hchan->handle == handle) + return hchan; + } + + return NULL; +} + +struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle) +{ + struct hci_conn_hash *h = &hdev->conn_hash; + struct hci_conn *hcon; + struct hci_chan *hchan = NULL; + + rcu_read_lock(); + + list_for_each_entry_rcu(hcon, &h->list, list) { + hchan = __hci_chan_lookup_handle(hcon, handle); + if (hchan) + break; + } + + rcu_read_unlock(); + + return hchan; +} + +u32 hci_conn_get_phy(struct hci_conn *conn) +{ + u32 phys = 0; + + /* BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 2, Part B page 471: + * Table 6.2: Packets defined for synchronous, asynchronous, and + * CPB logical transport types. + */ + switch (conn->type) { + case SCO_LINK: + /* SCO logical transport (1 Mb/s): + * HV1, HV2, HV3 and DV. + */ + phys |= BT_PHY_BR_1M_1SLOT; + + break; + + case ACL_LINK: + /* ACL logical transport (1 Mb/s) ptt=0: + * DH1, DM3, DH3, DM5 and DH5. + */ + phys |= BT_PHY_BR_1M_1SLOT; + + if (conn->pkt_type & (HCI_DM3 | HCI_DH3)) + phys |= BT_PHY_BR_1M_3SLOT; + + if (conn->pkt_type & (HCI_DM5 | HCI_DH5)) + phys |= BT_PHY_BR_1M_5SLOT; + + /* ACL logical transport (2 Mb/s) ptt=1: + * 2-DH1, 2-DH3 and 2-DH5. + */ + if (!(conn->pkt_type & HCI_2DH1)) + phys |= BT_PHY_EDR_2M_1SLOT; + + if (!(conn->pkt_type & HCI_2DH3)) + phys |= BT_PHY_EDR_2M_3SLOT; + + if (!(conn->pkt_type & HCI_2DH5)) + phys |= BT_PHY_EDR_2M_5SLOT; + + /* ACL logical transport (3 Mb/s) ptt=1: + * 3-DH1, 3-DH3 and 3-DH5. + */ + if (!(conn->pkt_type & HCI_3DH1)) + phys |= BT_PHY_EDR_3M_1SLOT; + + if (!(conn->pkt_type & HCI_3DH3)) + phys |= BT_PHY_EDR_3M_3SLOT; + + if (!(conn->pkt_type & HCI_3DH5)) + phys |= BT_PHY_EDR_3M_5SLOT; + + break; + + case ESCO_LINK: + /* eSCO logical transport (1 Mb/s): EV3, EV4 and EV5 */ + phys |= BT_PHY_BR_1M_1SLOT; + + if (!(conn->pkt_type & (ESCO_EV4 | ESCO_EV5))) + phys |= BT_PHY_BR_1M_3SLOT; + + /* eSCO logical transport (2 Mb/s): 2-EV3, 2-EV5 */ + if (!(conn->pkt_type & ESCO_2EV3)) + phys |= BT_PHY_EDR_2M_1SLOT; + + if (!(conn->pkt_type & ESCO_2EV5)) + phys |= BT_PHY_EDR_2M_3SLOT; + + /* eSCO logical transport (3 Mb/s): 3-EV3, 3-EV5 */ + if (!(conn->pkt_type & ESCO_3EV3)) + phys |= BT_PHY_EDR_3M_1SLOT; + + if (!(conn->pkt_type & ESCO_3EV5)) + phys |= BT_PHY_EDR_3M_3SLOT; + + break; + + case LE_LINK: + if (conn->le_tx_phy & HCI_LE_SET_PHY_1M) + phys |= BT_PHY_LE_1M_TX; + + if (conn->le_rx_phy & HCI_LE_SET_PHY_1M) + phys |= BT_PHY_LE_1M_RX; + + if (conn->le_tx_phy & HCI_LE_SET_PHY_2M) + phys |= BT_PHY_LE_2M_TX; + + if (conn->le_rx_phy & HCI_LE_SET_PHY_2M) + phys |= BT_PHY_LE_2M_RX; + + if (conn->le_tx_phy & HCI_LE_SET_PHY_CODED) + phys |= BT_PHY_LE_CODED_TX; + + if (conn->le_rx_phy & HCI_LE_SET_PHY_CODED) + phys |= BT_PHY_LE_CODED_RX; + + break; + } + + return phys; +} + +int hci_abort_conn(struct hci_conn *conn, u8 reason) +{ + int r = 0; + + if (test_and_set_bit(HCI_CONN_CANCEL, &conn->flags)) + return 0; + + switch (conn->state) { + case BT_CONNECTED: + case BT_CONFIG: + if (conn->type == AMP_LINK) { + struct hci_cp_disconn_phy_link cp; + + cp.phy_handle = HCI_PHY_HANDLE(conn->handle); + cp.reason = reason; + r = hci_send_cmd(conn->hdev, HCI_OP_DISCONN_PHY_LINK, + sizeof(cp), &cp); + } else { + struct hci_cp_disconnect dc; + + dc.handle = cpu_to_le16(conn->handle); + dc.reason = reason; + r = hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, + sizeof(dc), &dc); + } + + conn->state = BT_DISCONN; + + break; + case BT_CONNECT: + if (conn->type == LE_LINK) { + if (test_bit(HCI_CONN_SCANNING, &conn->flags)) + break; + r = hci_send_cmd(conn->hdev, + HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL); + } else if (conn->type == ACL_LINK) { + if (conn->hdev->hci_ver < BLUETOOTH_VER_1_2) + break; + r = hci_send_cmd(conn->hdev, + HCI_OP_CREATE_CONN_CANCEL, + 6, &conn->dst); + } + break; + case BT_CONNECT2: + if (conn->type == ACL_LINK) { + struct hci_cp_reject_conn_req rej; + + bacpy(&rej.bdaddr, &conn->dst); + rej.reason = reason; + + r = hci_send_cmd(conn->hdev, + HCI_OP_REJECT_CONN_REQ, + sizeof(rej), &rej); + } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) { + struct hci_cp_reject_sync_conn_req rej; + + bacpy(&rej.bdaddr, &conn->dst); + + /* SCO rejection has its own limited set of + * allowed error values (0x0D-0x0F) which isn't + * compatible with most values passed to this + * function. To be safe hard-code one of the + * values that's suitable for SCO. + */ + rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES; + + r = hci_send_cmd(conn->hdev, + HCI_OP_REJECT_SYNC_CONN_REQ, + sizeof(rej), &rej); + } + break; + default: + conn->state = BT_CLOSED; + break; + } + + return r; +} diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c new file mode 100644 index 000000000..6a1db678d --- /dev/null +++ b/net/bluetooth/hci_core.c @@ -0,0 +1,4158 @@ +/* + BlueZ - Bluetooth protocol stack for Linux + Copyright (C) 2000-2001 Qualcomm Incorporated + Copyright (C) 2011 ProFUSION Embedded Systems + + Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License version 2 as + published by the Free Software Foundation; + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + SOFTWARE IS DISCLAIMED. +*/ + +/* Bluetooth HCI core. */ + +#include <linux/export.h> +#include <linux/rfkill.h> +#include <linux/debugfs.h> +#include <linux/crypto.h> +#include <linux/kcov.h> +#include <linux/property.h> +#include <linux/suspend.h> +#include <linux/wait.h> +#include <asm/unaligned.h> + +#include <net/bluetooth/bluetooth.h> +#include <net/bluetooth/hci_core.h> +#include <net/bluetooth/l2cap.h> +#include <net/bluetooth/mgmt.h> + +#include "hci_request.h" +#include "hci_debugfs.h" +#include "smp.h" +#include "leds.h" +#include "msft.h" +#include "aosp.h" +#include "hci_codec.h" + +static void hci_rx_work(struct work_struct *work); +static void hci_cmd_work(struct work_struct *work); +static void hci_tx_work(struct work_struct *work); + +/* HCI device list */ +LIST_HEAD(hci_dev_list); +DEFINE_RWLOCK(hci_dev_list_lock); + +/* HCI callback list */ +LIST_HEAD(hci_cb_list); +DEFINE_MUTEX(hci_cb_list_lock); + +/* HCI ID Numbering */ +static DEFINE_IDA(hci_index_ida); + +static int hci_scan_req(struct hci_request *req, unsigned long opt) +{ + __u8 scan = opt; + + BT_DBG("%s %x", req->hdev->name, scan); + + /* Inquiry and Page scans */ + hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); + return 0; +} + +static int hci_auth_req(struct hci_request *req, unsigned long opt) +{ + __u8 auth = opt; + + BT_DBG("%s %x", req->hdev->name, auth); + + /* Authentication */ + hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth); + return 0; +} + +static int hci_encrypt_req(struct hci_request *req, unsigned long opt) +{ + __u8 encrypt = opt; + + BT_DBG("%s %x", req->hdev->name, encrypt); + + /* Encryption */ + hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt); + return 0; +} + +static int hci_linkpol_req(struct hci_request *req, unsigned long opt) +{ + __le16 policy = cpu_to_le16(opt); + + BT_DBG("%s %x", req->hdev->name, policy); + + /* Default link policy */ + hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy); + return 0; +} + +/* Get HCI device by index. + * Device is held on return. */ +struct hci_dev *hci_dev_get(int index) +{ + struct hci_dev *hdev = NULL, *d; + + BT_DBG("%d", index); + + if (index < 0) + return NULL; + + read_lock(&hci_dev_list_lock); + list_for_each_entry(d, &hci_dev_list, list) { + if (d->id == index) { + hdev = hci_dev_hold(d); + break; + } + } + read_unlock(&hci_dev_list_lock); + return hdev; +} + +/* ---- Inquiry support ---- */ + +bool hci_discovery_active(struct hci_dev *hdev) +{ + struct discovery_state *discov = &hdev->discovery; + + switch (discov->state) { + case DISCOVERY_FINDING: + case DISCOVERY_RESOLVING: + return true; + + default: + return false; + } +} + +void hci_discovery_set_state(struct hci_dev *hdev, int state) +{ + int old_state = hdev->discovery.state; + + BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state); + + if (old_state == state) + return; + + hdev->discovery.state = state; + + switch (state) { + case DISCOVERY_STOPPED: + hci_update_passive_scan(hdev); + + if (old_state != DISCOVERY_STARTING) + mgmt_discovering(hdev, 0); + break; + case DISCOVERY_STARTING: + break; + case DISCOVERY_FINDING: + mgmt_discovering(hdev, 1); + break; + case DISCOVERY_RESOLVING: + break; + case DISCOVERY_STOPPING: + break; + } +} + +void hci_inquiry_cache_flush(struct hci_dev *hdev) +{ + struct discovery_state *cache = &hdev->discovery; + struct inquiry_entry *p, *n; + + list_for_each_entry_safe(p, n, &cache->all, all) { + list_del(&p->all); + kfree(p); + } + + INIT_LIST_HEAD(&cache->unknown); + INIT_LIST_HEAD(&cache->resolve); +} + +struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, + bdaddr_t *bdaddr) +{ + struct discovery_state *cache = &hdev->discovery; + struct inquiry_entry *e; + + BT_DBG("cache %p, %pMR", cache, bdaddr); + + list_for_each_entry(e, &cache->all, all) { + if (!bacmp(&e->data.bdaddr, bdaddr)) + return e; + } + + return NULL; +} + +struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev, + bdaddr_t *bdaddr) +{ + struct discovery_state *cache = &hdev->discovery; + struct inquiry_entry *e; + + BT_DBG("cache %p, %pMR", cache, bdaddr); + + list_for_each_entry(e, &cache->unknown, list) { + if (!bacmp(&e->data.bdaddr, bdaddr)) + return e; + } + + return NULL; +} + +struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev, + bdaddr_t *bdaddr, + int state) +{ + struct discovery_state *cache = &hdev->discovery; + struct inquiry_entry *e; + + BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state); + + list_for_each_entry(e, &cache->resolve, list) { + if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state) + return e; + if (!bacmp(&e->data.bdaddr, bdaddr)) + return e; + } + + return NULL; +} + +void hci_inquiry_cache_update_resolve(struct hci_dev *hdev, + struct inquiry_entry *ie) +{ + struct discovery_state *cache = &hdev->discovery; + struct list_head *pos = &cache->resolve; + struct inquiry_entry *p; + + list_del(&ie->list); + + list_for_each_entry(p, &cache->resolve, list) { + if (p->name_state != NAME_PENDING && + abs(p->data.rssi) >= abs(ie->data.rssi)) + break; + pos = &p->list; + } + + list_add(&ie->list, pos); +} + +u32 hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data, + bool name_known) +{ + struct discovery_state *cache = &hdev->discovery; + struct inquiry_entry *ie; + u32 flags = 0; + + BT_DBG("cache %p, %pMR", cache, &data->bdaddr); + + hci_remove_remote_oob_data(hdev, &data->bdaddr, BDADDR_BREDR); + + if (!data->ssp_mode) + flags |= MGMT_DEV_FOUND_LEGACY_PAIRING; + + ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr); + if (ie) { + if (!ie->data.ssp_mode) + flags |= MGMT_DEV_FOUND_LEGACY_PAIRING; + + if (ie->name_state == NAME_NEEDED && + data->rssi != ie->data.rssi) { + ie->data.rssi = data->rssi; + hci_inquiry_cache_update_resolve(hdev, ie); + } + + goto update; + } + + /* Entry not in the cache. Add new one. */ + ie = kzalloc(sizeof(*ie), GFP_KERNEL); + if (!ie) { + flags |= MGMT_DEV_FOUND_CONFIRM_NAME; + goto done; + } + + list_add(&ie->all, &cache->all); + + if (name_known) { + ie->name_state = NAME_KNOWN; + } else { + ie->name_state = NAME_NOT_KNOWN; + list_add(&ie->list, &cache->unknown); + } + +update: + if (name_known && ie->name_state != NAME_KNOWN && + ie->name_state != NAME_PENDING) { + ie->name_state = NAME_KNOWN; + list_del(&ie->list); + } + + memcpy(&ie->data, data, sizeof(*data)); + ie->timestamp = jiffies; + cache->timestamp = jiffies; + + if (ie->name_state == NAME_NOT_KNOWN) + flags |= MGMT_DEV_FOUND_CONFIRM_NAME; + +done: + return flags; +} + +static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf) +{ + struct discovery_state *cache = &hdev->discovery; + struct inquiry_info *info = (struct inquiry_info *) buf; + struct inquiry_entry *e; + int copied = 0; + + list_for_each_entry(e, &cache->all, all) { + struct inquiry_data *data = &e->data; + + if (copied >= num) + break; + + bacpy(&info->bdaddr, &data->bdaddr); + info->pscan_rep_mode = data->pscan_rep_mode; + info->pscan_period_mode = data->pscan_period_mode; + info->pscan_mode = data->pscan_mode; + memcpy(info->dev_class, data->dev_class, 3); + info->clock_offset = data->clock_offset; + + info++; + copied++; + } + + BT_DBG("cache %p, copied %d", cache, copied); + return copied; +} + +static int hci_inq_req(struct hci_request *req, unsigned long opt) +{ + struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt; + struct hci_dev *hdev = req->hdev; + struct hci_cp_inquiry cp; + + BT_DBG("%s", hdev->name); + + if (test_bit(HCI_INQUIRY, &hdev->flags)) + return 0; + + /* Start Inquiry */ + memcpy(&cp.lap, &ir->lap, 3); + cp.length = ir->length; + cp.num_rsp = ir->num_rsp; + hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp); + + return 0; +} + +int hci_inquiry(void __user *arg) +{ + __u8 __user *ptr = arg; + struct hci_inquiry_req ir; + struct hci_dev *hdev; + int err = 0, do_inquiry = 0, max_rsp; + long timeo; + __u8 *buf; + + if (copy_from_user(&ir, ptr, sizeof(ir))) + return -EFAULT; + + hdev = hci_dev_get(ir.dev_id); + if (!hdev) + return -ENODEV; + + if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { + err = -EBUSY; + goto done; + } + + if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { + err = -EOPNOTSUPP; + goto done; + } + + if (hdev->dev_type != HCI_PRIMARY) { + err = -EOPNOTSUPP; + goto done; + } + + if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { + err = -EOPNOTSUPP; + goto done; + } + + /* Restrict maximum inquiry length to 60 seconds */ + if (ir.length > 60) { + err = -EINVAL; + goto done; + } + + hci_dev_lock(hdev); + if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX || + inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) { + hci_inquiry_cache_flush(hdev); + do_inquiry = 1; + } + hci_dev_unlock(hdev); + + timeo = ir.length * msecs_to_jiffies(2000); + + if (do_inquiry) { + err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir, + timeo, NULL); + if (err < 0) + goto done; + + /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is + * cleared). If it is interrupted by a signal, return -EINTR. + */ + if (wait_on_bit(&hdev->flags, HCI_INQUIRY, + TASK_INTERRUPTIBLE)) { + err = -EINTR; + goto done; + } + } + + /* for unlimited number of responses we will use buffer with + * 255 entries + */ + max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp; + + /* cache_dump can't sleep. Therefore we allocate temp buffer and then + * copy it to the user space. + */ + buf = kmalloc_array(max_rsp, sizeof(struct inquiry_info), GFP_KERNEL); + if (!buf) { + err = -ENOMEM; + goto done; + } + + hci_dev_lock(hdev); + ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf); + hci_dev_unlock(hdev); + + BT_DBG("num_rsp %d", ir.num_rsp); + + if (!copy_to_user(ptr, &ir, sizeof(ir))) { + ptr += sizeof(ir); + if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) * + ir.num_rsp)) + err = -EFAULT; + } else + err = -EFAULT; + + kfree(buf); + +done: + hci_dev_put(hdev); + return err; +} + +static int hci_dev_do_open(struct hci_dev *hdev) +{ + int ret = 0; + + BT_DBG("%s %p", hdev->name, hdev); + + hci_req_sync_lock(hdev); + + ret = hci_dev_open_sync(hdev); + + hci_req_sync_unlock(hdev); + return ret; +} + +/* ---- HCI ioctl helpers ---- */ + +int hci_dev_open(__u16 dev) +{ + struct hci_dev *hdev; + int err; + + hdev = hci_dev_get(dev); + if (!hdev) + return -ENODEV; + + /* Devices that are marked as unconfigured can only be powered + * up as user channel. Trying to bring them up as normal devices + * will result into a failure. Only user channel operation is + * possible. + * + * When this function is called for a user channel, the flag + * HCI_USER_CHANNEL will be set first before attempting to + * open the device. + */ + if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) && + !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { + err = -EOPNOTSUPP; + goto done; + } + + /* We need to ensure that no other power on/off work is pending + * before proceeding to call hci_dev_do_open. This is + * particularly important if the setup procedure has not yet + * completed. + */ + if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) + cancel_delayed_work(&hdev->power_off); + + /* After this call it is guaranteed that the setup procedure + * has finished. This means that error conditions like RFKILL + * or no valid public or static random address apply. + */ + flush_workqueue(hdev->req_workqueue); + + /* For controllers not using the management interface and that + * are brought up using legacy ioctl, set the HCI_BONDABLE bit + * so that pairing works for them. Once the management interface + * is in use this bit will be cleared again and userspace has + * to explicitly enable it. + */ + if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && + !hci_dev_test_flag(hdev, HCI_MGMT)) + hci_dev_set_flag(hdev, HCI_BONDABLE); + + err = hci_dev_do_open(hdev); + +done: + hci_dev_put(hdev); + return err; +} + +int hci_dev_do_close(struct hci_dev *hdev) +{ + int err; + + BT_DBG("%s %p", hdev->name, hdev); + + hci_req_sync_lock(hdev); + + err = hci_dev_close_sync(hdev); + + hci_req_sync_unlock(hdev); + + return err; +} + +int hci_dev_close(__u16 dev) +{ + struct hci_dev *hdev; + int err; + + hdev = hci_dev_get(dev); + if (!hdev) + return -ENODEV; + + if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { + err = -EBUSY; + goto done; + } + + cancel_work_sync(&hdev->power_on); + if (hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) + cancel_delayed_work(&hdev->power_off); + + err = hci_dev_do_close(hdev); + +done: + hci_dev_put(hdev); + return err; +} + +static int hci_dev_do_reset(struct hci_dev *hdev) +{ + int ret; + + BT_DBG("%s %p", hdev->name, hdev); + + hci_req_sync_lock(hdev); + + /* Drop queues */ + skb_queue_purge(&hdev->rx_q); + skb_queue_purge(&hdev->cmd_q); + + /* Cancel these to avoid queueing non-chained pending work */ + hci_dev_set_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE); + /* Wait for + * + * if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE)) + * queue_delayed_work(&hdev->{cmd,ncmd}_timer) + * + * inside RCU section to see the flag or complete scheduling. + */ + synchronize_rcu(); + /* Explicitly cancel works in case scheduled after setting the flag. */ + cancel_delayed_work(&hdev->cmd_timer); + cancel_delayed_work(&hdev->ncmd_timer); + + /* Avoid potential lockdep warnings from the *_flush() calls by + * ensuring the workqueue is empty up front. + */ + drain_workqueue(hdev->workqueue); + + hci_dev_lock(hdev); + hci_inquiry_cache_flush(hdev); + hci_conn_hash_flush(hdev); + hci_dev_unlock(hdev); + + if (hdev->flush) + hdev->flush(hdev); + + hci_dev_clear_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE); + + atomic_set(&hdev->cmd_cnt, 1); + hdev->acl_cnt = 0; + hdev->sco_cnt = 0; + hdev->le_cnt = 0; + hdev->iso_cnt = 0; + + ret = hci_reset_sync(hdev); + + hci_req_sync_unlock(hdev); + return ret; +} + +int hci_dev_reset(__u16 dev) +{ + struct hci_dev *hdev; + int err; + + hdev = hci_dev_get(dev); + if (!hdev) + return -ENODEV; + + if (!test_bit(HCI_UP, &hdev->flags)) { + err = -ENETDOWN; + goto done; + } + + if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { + err = -EBUSY; + goto done; + } + + if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { + err = -EOPNOTSUPP; + goto done; + } + + err = hci_dev_do_reset(hdev); + +done: + hci_dev_put(hdev); + return err; +} + +int hci_dev_reset_stat(__u16 dev) +{ + struct hci_dev *hdev; + int ret = 0; + + hdev = hci_dev_get(dev); + if (!hdev) + return -ENODEV; + + if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { + ret = -EBUSY; + goto done; + } + + if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { + ret = -EOPNOTSUPP; + goto done; + } + + memset(&hdev->stat, 0, sizeof(struct hci_dev_stats)); + +done: + hci_dev_put(hdev); + return ret; +} + +static void hci_update_passive_scan_state(struct hci_dev *hdev, u8 scan) +{ + bool conn_changed, discov_changed; + + BT_DBG("%s scan 0x%02x", hdev->name, scan); + + if ((scan & SCAN_PAGE)) + conn_changed = !hci_dev_test_and_set_flag(hdev, + HCI_CONNECTABLE); + else + conn_changed = hci_dev_test_and_clear_flag(hdev, + HCI_CONNECTABLE); + + if ((scan & SCAN_INQUIRY)) { + discov_changed = !hci_dev_test_and_set_flag(hdev, + HCI_DISCOVERABLE); + } else { + hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); + discov_changed = hci_dev_test_and_clear_flag(hdev, + HCI_DISCOVERABLE); + } + + if (!hci_dev_test_flag(hdev, HCI_MGMT)) + return; + + if (conn_changed || discov_changed) { + /* In case this was disabled through mgmt */ + hci_dev_set_flag(hdev, HCI_BREDR_ENABLED); + + if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) + hci_update_adv_data(hdev, hdev->cur_adv_instance); + + mgmt_new_settings(hdev); + } +} + +int hci_dev_cmd(unsigned int cmd, void __user *arg) +{ + struct hci_dev *hdev; + struct hci_dev_req dr; + int err = 0; + + if (copy_from_user(&dr, arg, sizeof(dr))) + return -EFAULT; + + hdev = hci_dev_get(dr.dev_id); + if (!hdev) + return -ENODEV; + + if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { + err = -EBUSY; + goto done; + } + + if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { + err = -EOPNOTSUPP; + goto done; + } + + if (hdev->dev_type != HCI_PRIMARY) { + err = -EOPNOTSUPP; + goto done; + } + + if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { + err = -EOPNOTSUPP; + goto done; + } + + switch (cmd) { + case HCISETAUTH: + err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt, + HCI_INIT_TIMEOUT, NULL); + break; + + case HCISETENCRYPT: + if (!lmp_encrypt_capable(hdev)) { + err = -EOPNOTSUPP; + break; + } + + if (!test_bit(HCI_AUTH, &hdev->flags)) { + /* Auth must be enabled first */ + err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt, + HCI_INIT_TIMEOUT, NULL); + if (err) + break; + } + + err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt, + HCI_INIT_TIMEOUT, NULL); + break; + + case HCISETSCAN: + err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt, + HCI_INIT_TIMEOUT, NULL); + + /* Ensure that the connectable and discoverable states + * get correctly modified as this was a non-mgmt change. + */ + if (!err) + hci_update_passive_scan_state(hdev, dr.dev_opt); + break; + + case HCISETLINKPOL: + err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt, + HCI_INIT_TIMEOUT, NULL); + break; + + case HCISETLINKMODE: + hdev->link_mode = ((__u16) dr.dev_opt) & + (HCI_LM_MASTER | HCI_LM_ACCEPT); + break; + + case HCISETPTYPE: + if (hdev->pkt_type == (__u16) dr.dev_opt) + break; + + hdev->pkt_type = (__u16) dr.dev_opt; + mgmt_phy_configuration_changed(hdev, NULL); + break; + + case HCISETACLMTU: + hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1); + hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0); + break; + + case HCISETSCOMTU: + hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1); + hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0); + break; + + default: + err = -EINVAL; + break; + } + +done: + hci_dev_put(hdev); + return err; +} + +int hci_get_dev_list(void __user *arg) +{ + struct hci_dev *hdev; + struct hci_dev_list_req *dl; + struct hci_dev_req *dr; + int n = 0, size, err; + __u16 dev_num; + + if (get_user(dev_num, (__u16 __user *) arg)) + return -EFAULT; + + if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr)) + return -EINVAL; + + size = sizeof(*dl) + dev_num * sizeof(*dr); + + dl = kzalloc(size, GFP_KERNEL); + if (!dl) + return -ENOMEM; + + dr = dl->dev_req; + + read_lock(&hci_dev_list_lock); + list_for_each_entry(hdev, &hci_dev_list, list) { + unsigned long flags = hdev->flags; + + /* When the auto-off is configured it means the transport + * is running, but in that case still indicate that the + * device is actually down. + */ + if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) + flags &= ~BIT(HCI_UP); + + (dr + n)->dev_id = hdev->id; + (dr + n)->dev_opt = flags; + + if (++n >= dev_num) + break; + } + read_unlock(&hci_dev_list_lock); + + dl->dev_num = n; + size = sizeof(*dl) + n * sizeof(*dr); + + err = copy_to_user(arg, dl, size); + kfree(dl); + + return err ? -EFAULT : 0; +} + +int hci_get_dev_info(void __user *arg) +{ + struct hci_dev *hdev; + struct hci_dev_info di; + unsigned long flags; + int err = 0; + + if (copy_from_user(&di, arg, sizeof(di))) + return -EFAULT; + + hdev = hci_dev_get(di.dev_id); + if (!hdev) + return -ENODEV; + + /* When the auto-off is configured it means the transport + * is running, but in that case still indicate that the + * device is actually down. + */ + if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) + flags = hdev->flags & ~BIT(HCI_UP); + else + flags = hdev->flags; + + strcpy(di.name, hdev->name); + di.bdaddr = hdev->bdaddr; + di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4); + di.flags = flags; + di.pkt_type = hdev->pkt_type; + if (lmp_bredr_capable(hdev)) { + di.acl_mtu = hdev->acl_mtu; + di.acl_pkts = hdev->acl_pkts; + di.sco_mtu = hdev->sco_mtu; + di.sco_pkts = hdev->sco_pkts; + } else { + di.acl_mtu = hdev->le_mtu; + di.acl_pkts = hdev->le_pkts; + di.sco_mtu = 0; + di.sco_pkts = 0; + } + di.link_policy = hdev->link_policy; + di.link_mode = hdev->link_mode; + + memcpy(&di.stat, &hdev->stat, sizeof(di.stat)); + memcpy(&di.features, &hdev->features, sizeof(di.features)); + + if (copy_to_user(arg, &di, sizeof(di))) + err = -EFAULT; + + hci_dev_put(hdev); + + return err; +} + +/* ---- Interface to HCI drivers ---- */ + +static int hci_rfkill_set_block(void *data, bool blocked) +{ + struct hci_dev *hdev = data; + + BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked); + + if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) + return -EBUSY; + + if (blocked) { + hci_dev_set_flag(hdev, HCI_RFKILLED); + if (!hci_dev_test_flag(hdev, HCI_SETUP) && + !hci_dev_test_flag(hdev, HCI_CONFIG)) + hci_dev_do_close(hdev); + } else { + hci_dev_clear_flag(hdev, HCI_RFKILLED); + } + + return 0; +} + +static const struct rfkill_ops hci_rfkill_ops = { + .set_block = hci_rfkill_set_block, +}; + +static void hci_power_on(struct work_struct *work) +{ + struct hci_dev *hdev = container_of(work, struct hci_dev, power_on); + int err; + + BT_DBG("%s", hdev->name); + + if (test_bit(HCI_UP, &hdev->flags) && + hci_dev_test_flag(hdev, HCI_MGMT) && + hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) { + cancel_delayed_work(&hdev->power_off); + err = hci_powered_update_sync(hdev); + mgmt_power_on(hdev, err); + return; + } + + err = hci_dev_do_open(hdev); + if (err < 0) { + hci_dev_lock(hdev); + mgmt_set_powered_failed(hdev, err); + hci_dev_unlock(hdev); + return; + } + + /* During the HCI setup phase, a few error conditions are + * ignored and they need to be checked now. If they are still + * valid, it is important to turn the device back off. + */ + if (hci_dev_test_flag(hdev, HCI_RFKILLED) || + hci_dev_test_flag(hdev, HCI_UNCONFIGURED) || + (hdev->dev_type == HCI_PRIMARY && + !bacmp(&hdev->bdaddr, BDADDR_ANY) && + !bacmp(&hdev->static_addr, BDADDR_ANY))) { + hci_dev_clear_flag(hdev, HCI_AUTO_OFF); + hci_dev_do_close(hdev); + } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) { + queue_delayed_work(hdev->req_workqueue, &hdev->power_off, + HCI_AUTO_OFF_TIMEOUT); + } + + if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) { + /* For unconfigured devices, set the HCI_RAW flag + * so that userspace can easily identify them. + */ + if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) + set_bit(HCI_RAW, &hdev->flags); + + /* For fully configured devices, this will send + * the Index Added event. For unconfigured devices, + * it will send Unconfigued Index Added event. + * + * Devices with HCI_QUIRK_RAW_DEVICE are ignored + * and no event will be send. + */ + mgmt_index_added(hdev); + } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) { + /* When the controller is now configured, then it + * is important to clear the HCI_RAW flag. + */ + if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) + clear_bit(HCI_RAW, &hdev->flags); + + /* Powering on the controller with HCI_CONFIG set only + * happens with the transition from unconfigured to + * configured. This will send the Index Added event. + */ + mgmt_index_added(hdev); + } +} + +static void hci_power_off(struct work_struct *work) +{ + struct hci_dev *hdev = container_of(work, struct hci_dev, + power_off.work); + + BT_DBG("%s", hdev->name); + + hci_dev_do_close(hdev); +} + +static void hci_error_reset(struct work_struct *work) +{ + struct hci_dev *hdev = container_of(work, struct hci_dev, error_reset); + + BT_DBG("%s", hdev->name); + + if (hdev->hw_error) + hdev->hw_error(hdev, hdev->hw_error_code); + else + bt_dev_err(hdev, "hardware error 0x%2.2x", hdev->hw_error_code); + + if (hci_dev_do_close(hdev)) + return; + + hci_dev_do_open(hdev); +} + +void hci_uuids_clear(struct hci_dev *hdev) +{ + struct bt_uuid *uuid, *tmp; + + list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) { + list_del(&uuid->list); + kfree(uuid); + } +} + +void hci_link_keys_clear(struct hci_dev *hdev) +{ + struct link_key *key, *tmp; + + list_for_each_entry_safe(key, tmp, &hdev->link_keys, list) { + list_del_rcu(&key->list); + kfree_rcu(key, rcu); + } +} + +void hci_smp_ltks_clear(struct hci_dev *hdev) +{ + struct smp_ltk *k, *tmp; + + list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) { + list_del_rcu(&k->list); + kfree_rcu(k, rcu); + } +} + +void hci_smp_irks_clear(struct hci_dev *hdev) +{ + struct smp_irk *k, *tmp; + + list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) { + list_del_rcu(&k->list); + kfree_rcu(k, rcu); + } +} + +void hci_blocked_keys_clear(struct hci_dev *hdev) +{ + struct blocked_key *b, *tmp; + + list_for_each_entry_safe(b, tmp, &hdev->blocked_keys, list) { + list_del_rcu(&b->list); + kfree_rcu(b, rcu); + } +} + +bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16]) +{ + bool blocked = false; + struct blocked_key *b; + + rcu_read_lock(); + list_for_each_entry_rcu(b, &hdev->blocked_keys, list) { + if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) { + blocked = true; + break; + } + } + + rcu_read_unlock(); + return blocked; +} + +struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr) +{ + struct link_key *k; + + rcu_read_lock(); + list_for_each_entry_rcu(k, &hdev->link_keys, list) { + if (bacmp(bdaddr, &k->bdaddr) == 0) { + rcu_read_unlock(); + + if (hci_is_blocked_key(hdev, + HCI_BLOCKED_KEY_TYPE_LINKKEY, + k->val)) { + bt_dev_warn_ratelimited(hdev, + "Link key blocked for %pMR", + &k->bdaddr); + return NULL; + } + + return k; + } + } + rcu_read_unlock(); + + return NULL; +} + +static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn, + u8 key_type, u8 old_key_type) +{ + /* Legacy key */ + if (key_type < 0x03) + return true; + + /* Debug keys are insecure so don't store them persistently */ + if (key_type == HCI_LK_DEBUG_COMBINATION) + return false; + + /* Changed combination key and there's no previous one */ + if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff) + return false; + + /* Security mode 3 case */ + if (!conn) + return true; + + /* BR/EDR key derived using SC from an LE link */ + if (conn->type == LE_LINK) + return true; + + /* Neither local nor remote side had no-bonding as requirement */ + if (conn->auth_type > 0x01 && conn->remote_auth > 0x01) + return true; + + /* Local side had dedicated bonding as requirement */ + if (conn->auth_type == 0x02 || conn->auth_type == 0x03) + return true; + + /* Remote side had dedicated bonding as requirement */ + if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03) + return true; + + /* If none of the above criteria match, then don't store the key + * persistently */ + return false; +} + +static u8 ltk_role(u8 type) +{ + if (type == SMP_LTK) + return HCI_ROLE_MASTER; + + return HCI_ROLE_SLAVE; +} + +struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, + u8 addr_type, u8 role) +{ + struct smp_ltk *k; + + rcu_read_lock(); + list_for_each_entry_rcu(k, &hdev->long_term_keys, list) { + if (addr_type != k->bdaddr_type || bacmp(bdaddr, &k->bdaddr)) + continue; + + if (smp_ltk_is_sc(k) || ltk_role(k->type) == role) { + rcu_read_unlock(); + + if (hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_LTK, + k->val)) { + bt_dev_warn_ratelimited(hdev, + "LTK blocked for %pMR", + &k->bdaddr); + return NULL; + } + + return k; + } + } + rcu_read_unlock(); + + return NULL; +} + +struct smp_irk *hci_find_irk_by_rpa(struct hci_dev *hdev, bdaddr_t *rpa) +{ + struct smp_irk *irk_to_return = NULL; + struct smp_irk *irk; + + rcu_read_lock(); + list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) { + if (!bacmp(&irk->rpa, rpa)) { + irk_to_return = irk; + goto done; + } + } + + list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) { + if (smp_irk_matches(hdev, irk->val, rpa)) { + bacpy(&irk->rpa, rpa); + irk_to_return = irk; + goto done; + } + } + +done: + if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK, + irk_to_return->val)) { + bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR", + &irk_to_return->bdaddr); + irk_to_return = NULL; + } + + rcu_read_unlock(); + + return irk_to_return; +} + +struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr, + u8 addr_type) +{ + struct smp_irk *irk_to_return = NULL; + struct smp_irk *irk; + + /* Identity Address must be public or static random */ + if (addr_type == ADDR_LE_DEV_RANDOM && (bdaddr->b[5] & 0xc0) != 0xc0) + return NULL; + + rcu_read_lock(); + list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) { + if (addr_type == irk->addr_type && + bacmp(bdaddr, &irk->bdaddr) == 0) { + irk_to_return = irk; + goto done; + } + } + +done: + + if (irk_to_return && hci_is_blocked_key(hdev, HCI_BLOCKED_KEY_TYPE_IRK, + irk_to_return->val)) { + bt_dev_warn_ratelimited(hdev, "Identity key blocked for %pMR", + &irk_to_return->bdaddr); + irk_to_return = NULL; + } + + rcu_read_unlock(); + + return irk_to_return; +} + +struct link_key *hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, + bdaddr_t *bdaddr, u8 *val, u8 type, + u8 pin_len, bool *persistent) +{ + struct link_key *key, *old_key; + u8 old_key_type; + + old_key = hci_find_link_key(hdev, bdaddr); + if (old_key) { + old_key_type = old_key->type; + key = old_key; + } else { + old_key_type = conn ? conn->key_type : 0xff; + key = kzalloc(sizeof(*key), GFP_KERNEL); + if (!key) + return NULL; + list_add_rcu(&key->list, &hdev->link_keys); + } + + BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type); + + /* Some buggy controller combinations generate a changed + * combination key for legacy pairing even when there's no + * previous key */ + if (type == HCI_LK_CHANGED_COMBINATION && + (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) { + type = HCI_LK_COMBINATION; + if (conn) + conn->key_type = type; + } + + bacpy(&key->bdaddr, bdaddr); + memcpy(key->val, val, HCI_LINK_KEY_SIZE); + key->pin_len = pin_len; + + if (type == HCI_LK_CHANGED_COMBINATION) + key->type = old_key_type; + else + key->type = type; + + if (persistent) + *persistent = hci_persistent_key(hdev, conn, type, + old_key_type); + + return key; +} + +struct smp_ltk *hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, + u8 addr_type, u8 type, u8 authenticated, + u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand) +{ + struct smp_ltk *key, *old_key; + u8 role = ltk_role(type); + + old_key = hci_find_ltk(hdev, bdaddr, addr_type, role); + if (old_key) + key = old_key; + else { + key = kzalloc(sizeof(*key), GFP_KERNEL); + if (!key) + return NULL; + list_add_rcu(&key->list, &hdev->long_term_keys); + } + + bacpy(&key->bdaddr, bdaddr); + key->bdaddr_type = addr_type; + memcpy(key->val, tk, sizeof(key->val)); + key->authenticated = authenticated; + key->ediv = ediv; + key->rand = rand; + key->enc_size = enc_size; + key->type = type; + + return key; +} + +struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, + u8 addr_type, u8 val[16], bdaddr_t *rpa) +{ + struct smp_irk *irk; + + irk = hci_find_irk_by_addr(hdev, bdaddr, addr_type); + if (!irk) { + irk = kzalloc(sizeof(*irk), GFP_KERNEL); + if (!irk) + return NULL; + + bacpy(&irk->bdaddr, bdaddr); + irk->addr_type = addr_type; + + list_add_rcu(&irk->list, &hdev->identity_resolving_keys); + } + + memcpy(irk->val, val, 16); + bacpy(&irk->rpa, rpa); + + return irk; +} + +int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr) +{ + struct link_key *key; + + key = hci_find_link_key(hdev, bdaddr); + if (!key) + return -ENOENT; + + BT_DBG("%s removing %pMR", hdev->name, bdaddr); + + list_del_rcu(&key->list); + kfree_rcu(key, rcu); + + return 0; +} + +int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 bdaddr_type) +{ + struct smp_ltk *k, *tmp; + int removed = 0; + + list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) { + if (bacmp(bdaddr, &k->bdaddr) || k->bdaddr_type != bdaddr_type) + continue; + + BT_DBG("%s removing %pMR", hdev->name, bdaddr); + + list_del_rcu(&k->list); + kfree_rcu(k, rcu); + removed++; + } + + return removed ? 0 : -ENOENT; +} + +void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type) +{ + struct smp_irk *k, *tmp; + + list_for_each_entry_safe(k, tmp, &hdev->identity_resolving_keys, list) { + if (bacmp(bdaddr, &k->bdaddr) || k->addr_type != addr_type) + continue; + + BT_DBG("%s removing %pMR", hdev->name, bdaddr); + + list_del_rcu(&k->list); + kfree_rcu(k, rcu); + } +} + +bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type) +{ + struct smp_ltk *k; + struct smp_irk *irk; + u8 addr_type; + + if (type == BDADDR_BREDR) { + if (hci_find_link_key(hdev, bdaddr)) + return true; + return false; + } + + /* Convert to HCI addr type which struct smp_ltk uses */ + if (type == BDADDR_LE_PUBLIC) + addr_type = ADDR_LE_DEV_PUBLIC; + else + addr_type = ADDR_LE_DEV_RANDOM; + + irk = hci_get_irk(hdev, bdaddr, addr_type); + if (irk) { + bdaddr = &irk->bdaddr; + addr_type = irk->addr_type; + } + + rcu_read_lock(); + list_for_each_entry_rcu(k, &hdev->long_term_keys, list) { + if (k->bdaddr_type == addr_type && !bacmp(bdaddr, &k->bdaddr)) { + rcu_read_unlock(); + return true; + } + } + rcu_read_unlock(); + + return false; +} + +/* HCI command timer function */ +static void hci_cmd_timeout(struct work_struct *work) +{ + struct hci_dev *hdev = container_of(work, struct hci_dev, + cmd_timer.work); + + if (hdev->sent_cmd) { + struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data; + u16 opcode = __le16_to_cpu(sent->opcode); + + bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode); + } else { + bt_dev_err(hdev, "command tx timeout"); + } + + if (hdev->cmd_timeout) + hdev->cmd_timeout(hdev); + + atomic_set(&hdev->cmd_cnt, 1); + queue_work(hdev->workqueue, &hdev->cmd_work); +} + +/* HCI ncmd timer function */ +static void hci_ncmd_timeout(struct work_struct *work) +{ + struct hci_dev *hdev = container_of(work, struct hci_dev, + ncmd_timer.work); + + bt_dev_err(hdev, "Controller not accepting commands anymore: ncmd = 0"); + + /* During HCI_INIT phase no events can be injected if the ncmd timer + * triggers since the procedure has its own timeout handling. + */ + if (test_bit(HCI_INIT, &hdev->flags)) + return; + + /* This is an irrecoverable state, inject hardware error event */ + hci_reset_dev(hdev); +} + +struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev, + bdaddr_t *bdaddr, u8 bdaddr_type) +{ + struct oob_data *data; + + list_for_each_entry(data, &hdev->remote_oob_data, list) { + if (bacmp(bdaddr, &data->bdaddr) != 0) + continue; + if (data->bdaddr_type != bdaddr_type) + continue; + return data; + } + + return NULL; +} + +int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, + u8 bdaddr_type) +{ + struct oob_data *data; + + data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type); + if (!data) + return -ENOENT; + + BT_DBG("%s removing %pMR (%u)", hdev->name, bdaddr, bdaddr_type); + + list_del(&data->list); + kfree(data); + + return 0; +} + +void hci_remote_oob_data_clear(struct hci_dev *hdev) +{ + struct oob_data *data, *n; + + list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) { + list_del(&data->list); + kfree(data); + } +} + +int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, + u8 bdaddr_type, u8 *hash192, u8 *rand192, + u8 *hash256, u8 *rand256) +{ + struct oob_data *data; + + data = hci_find_remote_oob_data(hdev, bdaddr, bdaddr_type); + if (!data) { + data = kmalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + bacpy(&data->bdaddr, bdaddr); + data->bdaddr_type = bdaddr_type; + list_add(&data->list, &hdev->remote_oob_data); + } + + if (hash192 && rand192) { + memcpy(data->hash192, hash192, sizeof(data->hash192)); + memcpy(data->rand192, rand192, sizeof(data->rand192)); + if (hash256 && rand256) + data->present = 0x03; + } else { + memset(data->hash192, 0, sizeof(data->hash192)); + memset(data->rand192, 0, sizeof(data->rand192)); + if (hash256 && rand256) + data->present = 0x02; + else + data->present = 0x00; + } + + if (hash256 && rand256) { + memcpy(data->hash256, hash256, sizeof(data->hash256)); + memcpy(data->rand256, rand256, sizeof(data->rand256)); + } else { + memset(data->hash256, 0, sizeof(data->hash256)); + memset(data->rand256, 0, sizeof(data->rand256)); + if (hash192 && rand192) + data->present = 0x01; + } + + BT_DBG("%s for %pMR", hdev->name, bdaddr); + + return 0; +} + +/* This function requires the caller holds hdev->lock */ +struct adv_info *hci_find_adv_instance(struct hci_dev *hdev, u8 instance) +{ + struct adv_info *adv_instance; + + list_for_each_entry(adv_instance, &hdev->adv_instances, list) { + if (adv_instance->instance == instance) + return adv_instance; + } + + return NULL; +} + +/* This function requires the caller holds hdev->lock */ +struct adv_info *hci_get_next_instance(struct hci_dev *hdev, u8 instance) +{ + struct adv_info *cur_instance; + + cur_instance = hci_find_adv_instance(hdev, instance); + if (!cur_instance) + return NULL; + + if (cur_instance == list_last_entry(&hdev->adv_instances, + struct adv_info, list)) + return list_first_entry(&hdev->adv_instances, + struct adv_info, list); + else + return list_next_entry(cur_instance, list); +} + +/* This function requires the caller holds hdev->lock */ +int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance) +{ + struct adv_info *adv_instance; + + adv_instance = hci_find_adv_instance(hdev, instance); + if (!adv_instance) + return -ENOENT; + + BT_DBG("%s removing %dMR", hdev->name, instance); + + if (hdev->cur_adv_instance == instance) { + if (hdev->adv_instance_timeout) { + cancel_delayed_work(&hdev->adv_instance_expire); + hdev->adv_instance_timeout = 0; + } + hdev->cur_adv_instance = 0x00; + } + + cancel_delayed_work_sync(&adv_instance->rpa_expired_cb); + + list_del(&adv_instance->list); + kfree(adv_instance); + + hdev->adv_instance_cnt--; + + return 0; +} + +void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired) +{ + struct adv_info *adv_instance, *n; + + list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) + adv_instance->rpa_expired = rpa_expired; +} + +/* This function requires the caller holds hdev->lock */ +void hci_adv_instances_clear(struct hci_dev *hdev) +{ + struct adv_info *adv_instance, *n; + + if (hdev->adv_instance_timeout) { + cancel_delayed_work(&hdev->adv_instance_expire); + hdev->adv_instance_timeout = 0; + } + + list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, list) { + cancel_delayed_work_sync(&adv_instance->rpa_expired_cb); + list_del(&adv_instance->list); + kfree(adv_instance); + } + + hdev->adv_instance_cnt = 0; + hdev->cur_adv_instance = 0x00; +} + +static void adv_instance_rpa_expired(struct work_struct *work) +{ + struct adv_info *adv_instance = container_of(work, struct adv_info, + rpa_expired_cb.work); + + BT_DBG(""); + + adv_instance->rpa_expired = true; +} + +/* This function requires the caller holds hdev->lock */ +struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance, + u32 flags, u16 adv_data_len, u8 *adv_data, + u16 scan_rsp_len, u8 *scan_rsp_data, + u16 timeout, u16 duration, s8 tx_power, + u32 min_interval, u32 max_interval, + u8 mesh_handle) +{ + struct adv_info *adv; + + adv = hci_find_adv_instance(hdev, instance); + if (adv) { + memset(adv->adv_data, 0, sizeof(adv->adv_data)); + memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data)); + memset(adv->per_adv_data, 0, sizeof(adv->per_adv_data)); + } else { + if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets || + instance < 1 || instance > hdev->le_num_of_adv_sets + 1) + return ERR_PTR(-EOVERFLOW); + + adv = kzalloc(sizeof(*adv), GFP_KERNEL); + if (!adv) + return ERR_PTR(-ENOMEM); + + adv->pending = true; + adv->instance = instance; + list_add(&adv->list, &hdev->adv_instances); + hdev->adv_instance_cnt++; + } + + adv->flags = flags; + adv->min_interval = min_interval; + adv->max_interval = max_interval; + adv->tx_power = tx_power; + /* Defining a mesh_handle changes the timing units to ms, + * rather than seconds, and ties the instance to the requested + * mesh_tx queue. + */ + adv->mesh = mesh_handle; + + hci_set_adv_instance_data(hdev, instance, adv_data_len, adv_data, + scan_rsp_len, scan_rsp_data); + + adv->timeout = timeout; + adv->remaining_time = timeout; + + if (duration == 0) + adv->duration = hdev->def_multi_adv_rotation_duration; + else + adv->duration = duration; + + INIT_DELAYED_WORK(&adv->rpa_expired_cb, adv_instance_rpa_expired); + + BT_DBG("%s for %dMR", hdev->name, instance); + + return adv; +} + +/* This function requires the caller holds hdev->lock */ +struct adv_info *hci_add_per_instance(struct hci_dev *hdev, u8 instance, + u32 flags, u8 data_len, u8 *data, + u32 min_interval, u32 max_interval) +{ + struct adv_info *adv; + + adv = hci_add_adv_instance(hdev, instance, flags, 0, NULL, 0, NULL, + 0, 0, HCI_ADV_TX_POWER_NO_PREFERENCE, + min_interval, max_interval, 0); + if (IS_ERR(adv)) + return adv; + + adv->periodic = true; + adv->per_adv_data_len = data_len; + + if (data) + memcpy(adv->per_adv_data, data, data_len); + + return adv; +} + +/* This function requires the caller holds hdev->lock */ +int hci_set_adv_instance_data(struct hci_dev *hdev, u8 instance, + u16 adv_data_len, u8 *adv_data, + u16 scan_rsp_len, u8 *scan_rsp_data) +{ + struct adv_info *adv; + + adv = hci_find_adv_instance(hdev, instance); + + /* If advertisement doesn't exist, we can't modify its data */ + if (!adv) + return -ENOENT; + + if (adv_data_len && ADV_DATA_CMP(adv, adv_data, adv_data_len)) { + memset(adv->adv_data, 0, sizeof(adv->adv_data)); + memcpy(adv->adv_data, adv_data, adv_data_len); + adv->adv_data_len = adv_data_len; + adv->adv_data_changed = true; + } + + if (scan_rsp_len && SCAN_RSP_CMP(adv, scan_rsp_data, scan_rsp_len)) { + memset(adv->scan_rsp_data, 0, sizeof(adv->scan_rsp_data)); + memcpy(adv->scan_rsp_data, scan_rsp_data, scan_rsp_len); + adv->scan_rsp_len = scan_rsp_len; + adv->scan_rsp_changed = true; + } + + /* Mark as changed if there are flags which would affect it */ + if (((adv->flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) || + adv->flags & MGMT_ADV_FLAG_LOCAL_NAME) + adv->scan_rsp_changed = true; + + return 0; +} + +/* This function requires the caller holds hdev->lock */ +u32 hci_adv_instance_flags(struct hci_dev *hdev, u8 instance) +{ + u32 flags; + struct adv_info *adv; + + if (instance == 0x00) { + /* Instance 0 always manages the "Tx Power" and "Flags" + * fields + */ + flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS; + + /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting + * corresponds to the "connectable" instance flag. + */ + if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) + flags |= MGMT_ADV_FLAG_CONNECTABLE; + + if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) + flags |= MGMT_ADV_FLAG_LIMITED_DISCOV; + else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) + flags |= MGMT_ADV_FLAG_DISCOV; + + return flags; + } + + adv = hci_find_adv_instance(hdev, instance); + + /* Return 0 when we got an invalid instance identifier. */ + if (!adv) + return 0; + + return adv->flags; +} + +bool hci_adv_instance_is_scannable(struct hci_dev *hdev, u8 instance) +{ + struct adv_info *adv; + + /* Instance 0x00 always set local name */ + if (instance == 0x00) + return true; + + adv = hci_find_adv_instance(hdev, instance); + if (!adv) + return false; + + if (adv->flags & MGMT_ADV_FLAG_APPEARANCE || + adv->flags & MGMT_ADV_FLAG_LOCAL_NAME) + return true; + + return adv->scan_rsp_len ? true : false; +} + +/* This function requires the caller holds hdev->lock */ +void hci_adv_monitors_clear(struct hci_dev *hdev) +{ + struct adv_monitor *monitor; + int handle; + + idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle) + hci_free_adv_monitor(hdev, monitor); + + idr_destroy(&hdev->adv_monitors_idr); +} + +/* Frees the monitor structure and do some bookkeepings. + * This function requires the caller holds hdev->lock. + */ +void hci_free_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor) +{ + struct adv_pattern *pattern; + struct adv_pattern *tmp; + + if (!monitor) + return; + + list_for_each_entry_safe(pattern, tmp, &monitor->patterns, list) { + list_del(&pattern->list); + kfree(pattern); + } + + if (monitor->handle) + idr_remove(&hdev->adv_monitors_idr, monitor->handle); + + if (monitor->state != ADV_MONITOR_STATE_NOT_REGISTERED) { + hdev->adv_monitors_cnt--; + mgmt_adv_monitor_removed(hdev, monitor->handle); + } + + kfree(monitor); +} + +/* Assigns handle to a monitor, and if offloading is supported and power is on, + * also attempts to forward the request to the controller. + * This function requires the caller holds hci_req_sync_lock. + */ +int hci_add_adv_monitor(struct hci_dev *hdev, struct adv_monitor *monitor) +{ + int min, max, handle; + int status = 0; + + if (!monitor) + return -EINVAL; + + hci_dev_lock(hdev); + + min = HCI_MIN_ADV_MONITOR_HANDLE; + max = HCI_MIN_ADV_MONITOR_HANDLE + HCI_MAX_ADV_MONITOR_NUM_HANDLES; + handle = idr_alloc(&hdev->adv_monitors_idr, monitor, min, max, + GFP_KERNEL); + + hci_dev_unlock(hdev); + + if (handle < 0) + return handle; + + monitor->handle = handle; + + if (!hdev_is_powered(hdev)) + return status; + + switch (hci_get_adv_monitor_offload_ext(hdev)) { + case HCI_ADV_MONITOR_EXT_NONE: + bt_dev_dbg(hdev, "add monitor %d status %d", + monitor->handle, status); + /* Message was not forwarded to controller - not an error */ + break; + + case HCI_ADV_MONITOR_EXT_MSFT: + status = msft_add_monitor_pattern(hdev, monitor); + bt_dev_dbg(hdev, "add monitor %d msft status %d", + handle, status); + break; + } + + return status; +} + +/* Attempts to tell the controller and free the monitor. If somehow the + * controller doesn't have a corresponding handle, remove anyway. + * This function requires the caller holds hci_req_sync_lock. + */ +static int hci_remove_adv_monitor(struct hci_dev *hdev, + struct adv_monitor *monitor) +{ + int status = 0; + int handle; + + switch (hci_get_adv_monitor_offload_ext(hdev)) { + case HCI_ADV_MONITOR_EXT_NONE: /* also goes here when powered off */ + bt_dev_dbg(hdev, "remove monitor %d status %d", + monitor->handle, status); + goto free_monitor; + + case HCI_ADV_MONITOR_EXT_MSFT: + handle = monitor->handle; + status = msft_remove_monitor(hdev, monitor); + bt_dev_dbg(hdev, "remove monitor %d msft status %d", + handle, status); + break; + } + + /* In case no matching handle registered, just free the monitor */ + if (status == -ENOENT) + goto free_monitor; + + return status; + +free_monitor: + if (status == -ENOENT) + bt_dev_warn(hdev, "Removing monitor with no matching handle %d", + monitor->handle); + hci_free_adv_monitor(hdev, monitor); + + return status; +} + +/* This function requires the caller holds hci_req_sync_lock */ +int hci_remove_single_adv_monitor(struct hci_dev *hdev, u16 handle) +{ + struct adv_monitor *monitor = idr_find(&hdev->adv_monitors_idr, handle); + + if (!monitor) + return -EINVAL; + + return hci_remove_adv_monitor(hdev, monitor); +} + +/* This function requires the caller holds hci_req_sync_lock */ +int hci_remove_all_adv_monitor(struct hci_dev *hdev) +{ + struct adv_monitor *monitor; + int idr_next_id = 0; + int status = 0; + + while (1) { + monitor = idr_get_next(&hdev->adv_monitors_idr, &idr_next_id); + if (!monitor) + break; + + status = hci_remove_adv_monitor(hdev, monitor); + if (status) + return status; + + idr_next_id++; + } + + return status; +} + +/* This function requires the caller holds hdev->lock */ +bool hci_is_adv_monitoring(struct hci_dev *hdev) +{ + return !idr_is_empty(&hdev->adv_monitors_idr); +} + +int hci_get_adv_monitor_offload_ext(struct hci_dev *hdev) +{ + if (msft_monitor_supported(hdev)) + return HCI_ADV_MONITOR_EXT_MSFT; + + return HCI_ADV_MONITOR_EXT_NONE; +} + +struct bdaddr_list *hci_bdaddr_list_lookup(struct list_head *bdaddr_list, + bdaddr_t *bdaddr, u8 type) +{ + struct bdaddr_list *b; + + list_for_each_entry(b, bdaddr_list, list) { + if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type) + return b; + } + + return NULL; +} + +struct bdaddr_list_with_irk *hci_bdaddr_list_lookup_with_irk( + struct list_head *bdaddr_list, bdaddr_t *bdaddr, + u8 type) +{ + struct bdaddr_list_with_irk *b; + + list_for_each_entry(b, bdaddr_list, list) { + if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type) + return b; + } + + return NULL; +} + +struct bdaddr_list_with_flags * +hci_bdaddr_list_lookup_with_flags(struct list_head *bdaddr_list, + bdaddr_t *bdaddr, u8 type) +{ + struct bdaddr_list_with_flags *b; + + list_for_each_entry(b, bdaddr_list, list) { + if (!bacmp(&b->bdaddr, bdaddr) && b->bdaddr_type == type) + return b; + } + + return NULL; +} + +void hci_bdaddr_list_clear(struct list_head *bdaddr_list) +{ + struct bdaddr_list *b, *n; + + list_for_each_entry_safe(b, n, bdaddr_list, list) { + list_del(&b->list); + kfree(b); + } +} + +int hci_bdaddr_list_add(struct list_head *list, bdaddr_t *bdaddr, u8 type) +{ + struct bdaddr_list *entry; + + if (!bacmp(bdaddr, BDADDR_ANY)) + return -EBADF; + + if (hci_bdaddr_list_lookup(list, bdaddr, type)) + return -EEXIST; + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return -ENOMEM; + + bacpy(&entry->bdaddr, bdaddr); + entry->bdaddr_type = type; + + list_add(&entry->list, list); + + return 0; +} + +int hci_bdaddr_list_add_with_irk(struct list_head *list, bdaddr_t *bdaddr, + u8 type, u8 *peer_irk, u8 *local_irk) +{ + struct bdaddr_list_with_irk *entry; + + if (!bacmp(bdaddr, BDADDR_ANY)) + return -EBADF; + + if (hci_bdaddr_list_lookup(list, bdaddr, type)) + return -EEXIST; + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return -ENOMEM; + + bacpy(&entry->bdaddr, bdaddr); + entry->bdaddr_type = type; + + if (peer_irk) + memcpy(entry->peer_irk, peer_irk, 16); + + if (local_irk) + memcpy(entry->local_irk, local_irk, 16); + + list_add(&entry->list, list); + + return 0; +} + +int hci_bdaddr_list_add_with_flags(struct list_head *list, bdaddr_t *bdaddr, + u8 type, u32 flags) +{ + struct bdaddr_list_with_flags *entry; + + if (!bacmp(bdaddr, BDADDR_ANY)) + return -EBADF; + + if (hci_bdaddr_list_lookup(list, bdaddr, type)) + return -EEXIST; + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return -ENOMEM; + + bacpy(&entry->bdaddr, bdaddr); + entry->bdaddr_type = type; + entry->flags = flags; + + list_add(&entry->list, list); + + return 0; +} + +int hci_bdaddr_list_del(struct list_head *list, bdaddr_t *bdaddr, u8 type) +{ + struct bdaddr_list *entry; + + if (!bacmp(bdaddr, BDADDR_ANY)) { + hci_bdaddr_list_clear(list); + return 0; + } + + entry = hci_bdaddr_list_lookup(list, bdaddr, type); + if (!entry) + return -ENOENT; + + list_del(&entry->list); + kfree(entry); + + return 0; +} + +int hci_bdaddr_list_del_with_irk(struct list_head *list, bdaddr_t *bdaddr, + u8 type) +{ + struct bdaddr_list_with_irk *entry; + + if (!bacmp(bdaddr, BDADDR_ANY)) { + hci_bdaddr_list_clear(list); + return 0; + } + + entry = hci_bdaddr_list_lookup_with_irk(list, bdaddr, type); + if (!entry) + return -ENOENT; + + list_del(&entry->list); + kfree(entry); + + return 0; +} + +int hci_bdaddr_list_del_with_flags(struct list_head *list, bdaddr_t *bdaddr, + u8 type) +{ + struct bdaddr_list_with_flags *entry; + + if (!bacmp(bdaddr, BDADDR_ANY)) { + hci_bdaddr_list_clear(list); + return 0; + } + + entry = hci_bdaddr_list_lookup_with_flags(list, bdaddr, type); + if (!entry) + return -ENOENT; + + list_del(&entry->list); + kfree(entry); + + return 0; +} + +/* This function requires the caller holds hdev->lock */ +struct hci_conn_params *hci_conn_params_lookup(struct hci_dev *hdev, + bdaddr_t *addr, u8 addr_type) +{ + struct hci_conn_params *params; + + list_for_each_entry(params, &hdev->le_conn_params, list) { + if (bacmp(¶ms->addr, addr) == 0 && + params->addr_type == addr_type) { + return params; + } + } + + return NULL; +} + +/* This function requires the caller holds hdev->lock or rcu_read_lock */ +struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list, + bdaddr_t *addr, u8 addr_type) +{ + struct hci_conn_params *param; + + rcu_read_lock(); + + list_for_each_entry_rcu(param, list, action) { + if (bacmp(¶m->addr, addr) == 0 && + param->addr_type == addr_type) { + rcu_read_unlock(); + return param; + } + } + + rcu_read_unlock(); + + return NULL; +} + +/* This function requires the caller holds hdev->lock */ +void hci_pend_le_list_del_init(struct hci_conn_params *param) +{ + if (list_empty(¶m->action)) + return; + + list_del_rcu(¶m->action); + synchronize_rcu(); + INIT_LIST_HEAD(¶m->action); +} + +/* This function requires the caller holds hdev->lock */ +void hci_pend_le_list_add(struct hci_conn_params *param, + struct list_head *list) +{ + list_add_rcu(¶m->action, list); +} + +/* This function requires the caller holds hdev->lock */ +struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev, + bdaddr_t *addr, u8 addr_type) +{ + struct hci_conn_params *params; + + params = hci_conn_params_lookup(hdev, addr, addr_type); + if (params) + return params; + + params = kzalloc(sizeof(*params), GFP_KERNEL); + if (!params) { + bt_dev_err(hdev, "out of memory"); + return NULL; + } + + bacpy(¶ms->addr, addr); + params->addr_type = addr_type; + + list_add(¶ms->list, &hdev->le_conn_params); + INIT_LIST_HEAD(¶ms->action); + + params->conn_min_interval = hdev->le_conn_min_interval; + params->conn_max_interval = hdev->le_conn_max_interval; + params->conn_latency = hdev->le_conn_latency; + params->supervision_timeout = hdev->le_supv_timeout; + params->auto_connect = HCI_AUTO_CONN_DISABLED; + + BT_DBG("addr %pMR (type %u)", addr, addr_type); + + return params; +} + +void hci_conn_params_free(struct hci_conn_params *params) +{ + hci_pend_le_list_del_init(params); + + if (params->conn) { + hci_conn_drop(params->conn); + hci_conn_put(params->conn); + } + + list_del(¶ms->list); + kfree(params); +} + +/* This function requires the caller holds hdev->lock */ +void hci_conn_params_del(struct hci_dev *hdev, bdaddr_t *addr, u8 addr_type) +{ + struct hci_conn_params *params; + + params = hci_conn_params_lookup(hdev, addr, addr_type); + if (!params) + return; + + hci_conn_params_free(params); + + hci_update_passive_scan(hdev); + + BT_DBG("addr %pMR (type %u)", addr, addr_type); +} + +/* This function requires the caller holds hdev->lock */ +void hci_conn_params_clear_disabled(struct hci_dev *hdev) +{ + struct hci_conn_params *params, *tmp; + + list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) { + if (params->auto_connect != HCI_AUTO_CONN_DISABLED) + continue; + + /* If trying to establish one time connection to disabled + * device, leave the params, but mark them as just once. + */ + if (params->explicit_connect) { + params->auto_connect = HCI_AUTO_CONN_EXPLICIT; + continue; + } + + hci_conn_params_free(params); + } + + BT_DBG("All LE disabled connection parameters were removed"); +} + +/* This function requires the caller holds hdev->lock */ +static void hci_conn_params_clear_all(struct hci_dev *hdev) +{ + struct hci_conn_params *params, *tmp; + + list_for_each_entry_safe(params, tmp, &hdev->le_conn_params, list) + hci_conn_params_free(params); + + BT_DBG("All LE connection parameters were removed"); +} + +/* Copy the Identity Address of the controller. + * + * If the controller has a public BD_ADDR, then by default use that one. + * If this is a LE only controller without a public address, default to + * the static random address. + * + * For debugging purposes it is possible to force controllers with a + * public address to use the static random address instead. + * + * In case BR/EDR has been disabled on a dual-mode controller and + * userspace has configured a static address, then that address + * becomes the identity address instead of the public BR/EDR address. + */ +void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr, + u8 *bdaddr_type) +{ + if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) || + !bacmp(&hdev->bdaddr, BDADDR_ANY) || + (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) && + bacmp(&hdev->static_addr, BDADDR_ANY))) { + bacpy(bdaddr, &hdev->static_addr); + *bdaddr_type = ADDR_LE_DEV_RANDOM; + } else { + bacpy(bdaddr, &hdev->bdaddr); + *bdaddr_type = ADDR_LE_DEV_PUBLIC; + } +} + +static void hci_clear_wake_reason(struct hci_dev *hdev) +{ + hci_dev_lock(hdev); + + hdev->wake_reason = 0; + bacpy(&hdev->wake_addr, BDADDR_ANY); + hdev->wake_addr_type = 0; + + hci_dev_unlock(hdev); +} + +static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action, + void *data) +{ + struct hci_dev *hdev = + container_of(nb, struct hci_dev, suspend_notifier); + int ret = 0; + + /* Userspace has full control of this device. Do nothing. */ + if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) + return NOTIFY_DONE; + + /* To avoid a potential race with hci_unregister_dev. */ + hci_dev_hold(hdev); + + if (action == PM_SUSPEND_PREPARE) + ret = hci_suspend_dev(hdev); + else if (action == PM_POST_SUSPEND) + ret = hci_resume_dev(hdev); + + if (ret) + bt_dev_err(hdev, "Suspend notifier action (%lu) failed: %d", + action, ret); + + hci_dev_put(hdev); + return NOTIFY_DONE; +} + +/* Alloc HCI device */ +struct hci_dev *hci_alloc_dev_priv(int sizeof_priv) +{ + struct hci_dev *hdev; + unsigned int alloc_size; + + alloc_size = sizeof(*hdev); + if (sizeof_priv) { + /* Fixme: May need ALIGN-ment? */ + alloc_size += sizeof_priv; + } + + hdev = kzalloc(alloc_size, GFP_KERNEL); + if (!hdev) + return NULL; + + hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1); + hdev->esco_type = (ESCO_HV1); + hdev->link_mode = (HCI_LM_ACCEPT); + hdev->num_iac = 0x01; /* One IAC support is mandatory */ + hdev->io_capability = 0x03; /* No Input No Output */ + hdev->manufacturer = 0xffff; /* Default to internal use */ + hdev->inq_tx_power = HCI_TX_POWER_INVALID; + hdev->adv_tx_power = HCI_TX_POWER_INVALID; + hdev->adv_instance_cnt = 0; + hdev->cur_adv_instance = 0x00; + hdev->adv_instance_timeout = 0; + + hdev->advmon_allowlist_duration = 300; + hdev->advmon_no_filter_duration = 500; + hdev->enable_advmon_interleave_scan = 0x00; /* Default to disable */ + + hdev->sniff_max_interval = 800; + hdev->sniff_min_interval = 80; + + hdev->le_adv_channel_map = 0x07; + hdev->le_adv_min_interval = 0x0800; + hdev->le_adv_max_interval = 0x0800; + hdev->le_scan_interval = 0x0060; + hdev->le_scan_window = 0x0030; + hdev->le_scan_int_suspend = 0x0400; + hdev->le_scan_window_suspend = 0x0012; + hdev->le_scan_int_discovery = DISCOV_LE_SCAN_INT; + hdev->le_scan_window_discovery = DISCOV_LE_SCAN_WIN; + hdev->le_scan_int_adv_monitor = 0x0060; + hdev->le_scan_window_adv_monitor = 0x0030; + hdev->le_scan_int_connect = 0x0060; + hdev->le_scan_window_connect = 0x0060; + hdev->le_conn_min_interval = 0x0018; + hdev->le_conn_max_interval = 0x0028; + hdev->le_conn_latency = 0x0000; + hdev->le_supv_timeout = 0x002a; + hdev->le_def_tx_len = 0x001b; + hdev->le_def_tx_time = 0x0148; + hdev->le_max_tx_len = 0x001b; + hdev->le_max_tx_time = 0x0148; + hdev->le_max_rx_len = 0x001b; + hdev->le_max_rx_time = 0x0148; + hdev->le_max_key_size = SMP_MAX_ENC_KEY_SIZE; + hdev->le_min_key_size = SMP_MIN_ENC_KEY_SIZE; + hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M; + hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M; + hdev->le_num_of_adv_sets = HCI_MAX_ADV_INSTANCES; + hdev->def_multi_adv_rotation_duration = HCI_DEFAULT_ADV_DURATION; + hdev->def_le_autoconnect_timeout = HCI_LE_AUTOCONN_TIMEOUT; + hdev->min_le_tx_power = HCI_TX_POWER_INVALID; + hdev->max_le_tx_power = HCI_TX_POWER_INVALID; + + hdev->rpa_timeout = HCI_DEFAULT_RPA_TIMEOUT; + hdev->discov_interleaved_timeout = DISCOV_INTERLEAVED_TIMEOUT; + hdev->conn_info_min_age = DEFAULT_CONN_INFO_MIN_AGE; + hdev->conn_info_max_age = DEFAULT_CONN_INFO_MAX_AGE; + hdev->auth_payload_timeout = DEFAULT_AUTH_PAYLOAD_TIMEOUT; + hdev->min_enc_key_size = HCI_MIN_ENC_KEY_SIZE; + + /* default 1.28 sec page scan */ + hdev->def_page_scan_type = PAGE_SCAN_TYPE_STANDARD; + hdev->def_page_scan_int = 0x0800; + hdev->def_page_scan_window = 0x0012; + + mutex_init(&hdev->lock); + mutex_init(&hdev->req_lock); + + INIT_LIST_HEAD(&hdev->mesh_pending); + INIT_LIST_HEAD(&hdev->mgmt_pending); + INIT_LIST_HEAD(&hdev->reject_list); + INIT_LIST_HEAD(&hdev->accept_list); + INIT_LIST_HEAD(&hdev->uuids); + INIT_LIST_HEAD(&hdev->link_keys); + INIT_LIST_HEAD(&hdev->long_term_keys); + INIT_LIST_HEAD(&hdev->identity_resolving_keys); + INIT_LIST_HEAD(&hdev->remote_oob_data); + INIT_LIST_HEAD(&hdev->le_accept_list); + INIT_LIST_HEAD(&hdev->le_resolv_list); + INIT_LIST_HEAD(&hdev->le_conn_params); + INIT_LIST_HEAD(&hdev->pend_le_conns); + INIT_LIST_HEAD(&hdev->pend_le_reports); + INIT_LIST_HEAD(&hdev->conn_hash.list); + INIT_LIST_HEAD(&hdev->adv_instances); + INIT_LIST_HEAD(&hdev->blocked_keys); + INIT_LIST_HEAD(&hdev->monitored_devices); + + INIT_LIST_HEAD(&hdev->local_codecs); + INIT_WORK(&hdev->rx_work, hci_rx_work); + INIT_WORK(&hdev->cmd_work, hci_cmd_work); + INIT_WORK(&hdev->tx_work, hci_tx_work); + INIT_WORK(&hdev->power_on, hci_power_on); + INIT_WORK(&hdev->error_reset, hci_error_reset); + + hci_cmd_sync_init(hdev); + + INIT_DELAYED_WORK(&hdev->power_off, hci_power_off); + + skb_queue_head_init(&hdev->rx_q); + skb_queue_head_init(&hdev->cmd_q); + skb_queue_head_init(&hdev->raw_q); + + init_waitqueue_head(&hdev->req_wait_q); + + INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout); + INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout); + + hci_request_setup(hdev); + + hci_init_sysfs(hdev); + discovery_init(hdev); + + return hdev; +} +EXPORT_SYMBOL(hci_alloc_dev_priv); + +/* Free HCI device */ +void hci_free_dev(struct hci_dev *hdev) +{ + /* will free via device release */ + put_device(&hdev->dev); +} +EXPORT_SYMBOL(hci_free_dev); + +/* Register HCI device */ +int hci_register_dev(struct hci_dev *hdev) +{ + int id, error; + + if (!hdev->open || !hdev->close || !hdev->send) + return -EINVAL; + + /* Do not allow HCI_AMP devices to register at index 0, + * so the index can be used as the AMP controller ID. + */ + switch (hdev->dev_type) { + case HCI_PRIMARY: + id = ida_simple_get(&hci_index_ida, 0, HCI_MAX_ID, GFP_KERNEL); + break; + case HCI_AMP: + id = ida_simple_get(&hci_index_ida, 1, HCI_MAX_ID, GFP_KERNEL); + break; + default: + return -EINVAL; + } + + if (id < 0) + return id; + + error = dev_set_name(&hdev->dev, "hci%u", id); + if (error) + return error; + + hdev->name = dev_name(&hdev->dev); + hdev->id = id; + + BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); + + hdev->workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, hdev->name); + if (!hdev->workqueue) { + error = -ENOMEM; + goto err; + } + + hdev->req_workqueue = alloc_ordered_workqueue("%s", WQ_HIGHPRI, + hdev->name); + if (!hdev->req_workqueue) { + destroy_workqueue(hdev->workqueue); + error = -ENOMEM; + goto err; + } + + if (!IS_ERR_OR_NULL(bt_debugfs)) + hdev->debugfs = debugfs_create_dir(hdev->name, bt_debugfs); + + error = device_add(&hdev->dev); + if (error < 0) + goto err_wqueue; + + hci_leds_init(hdev); + + hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev, + RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, + hdev); + if (hdev->rfkill) { + if (rfkill_register(hdev->rfkill) < 0) { + rfkill_destroy(hdev->rfkill); + hdev->rfkill = NULL; + } + } + + if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) + hci_dev_set_flag(hdev, HCI_RFKILLED); + + hci_dev_set_flag(hdev, HCI_SETUP); + hci_dev_set_flag(hdev, HCI_AUTO_OFF); + + if (hdev->dev_type == HCI_PRIMARY) { + /* Assume BR/EDR support until proven otherwise (such as + * through reading supported features during init. + */ + hci_dev_set_flag(hdev, HCI_BREDR_ENABLED); + } + + write_lock(&hci_dev_list_lock); + list_add(&hdev->list, &hci_dev_list); + write_unlock(&hci_dev_list_lock); + + /* Devices that are marked for raw-only usage are unconfigured + * and should not be included in normal operation. + */ + if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) + hci_dev_set_flag(hdev, HCI_UNCONFIGURED); + + /* Mark Remote Wakeup connection flag as supported if driver has wakeup + * callback. + */ + if (hdev->wakeup) + hdev->conn_flags |= HCI_CONN_FLAG_REMOTE_WAKEUP; + + hci_sock_dev_event(hdev, HCI_DEV_REG); + hci_dev_hold(hdev); + + error = hci_register_suspend_notifier(hdev); + if (error) + BT_WARN("register suspend notifier failed error:%d\n", error); + + queue_work(hdev->req_workqueue, &hdev->power_on); + + idr_init(&hdev->adv_monitors_idr); + msft_register(hdev); + + return id; + +err_wqueue: + debugfs_remove_recursive(hdev->debugfs); + destroy_workqueue(hdev->workqueue); + destroy_workqueue(hdev->req_workqueue); +err: + ida_simple_remove(&hci_index_ida, hdev->id); + + return error; +} +EXPORT_SYMBOL(hci_register_dev); + +/* Unregister HCI device */ +void hci_unregister_dev(struct hci_dev *hdev) +{ + BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus); + + mutex_lock(&hdev->unregister_lock); + hci_dev_set_flag(hdev, HCI_UNREGISTER); + mutex_unlock(&hdev->unregister_lock); + + write_lock(&hci_dev_list_lock); + list_del(&hdev->list); + write_unlock(&hci_dev_list_lock); + + cancel_work_sync(&hdev->power_on); + + hci_cmd_sync_clear(hdev); + + hci_unregister_suspend_notifier(hdev); + + msft_unregister(hdev); + + hci_dev_do_close(hdev); + + if (!test_bit(HCI_INIT, &hdev->flags) && + !hci_dev_test_flag(hdev, HCI_SETUP) && + !hci_dev_test_flag(hdev, HCI_CONFIG)) { + hci_dev_lock(hdev); + mgmt_index_removed(hdev); + hci_dev_unlock(hdev); + } + + /* mgmt_index_removed should take care of emptying the + * pending list */ + BUG_ON(!list_empty(&hdev->mgmt_pending)); + + hci_sock_dev_event(hdev, HCI_DEV_UNREG); + + if (hdev->rfkill) { + rfkill_unregister(hdev->rfkill); + rfkill_destroy(hdev->rfkill); + } + + device_del(&hdev->dev); + /* Actual cleanup is deferred until hci_release_dev(). */ + hci_dev_put(hdev); +} +EXPORT_SYMBOL(hci_unregister_dev); + +/* Release HCI device */ +void hci_release_dev(struct hci_dev *hdev) +{ + debugfs_remove_recursive(hdev->debugfs); + kfree_const(hdev->hw_info); + kfree_const(hdev->fw_info); + + destroy_workqueue(hdev->workqueue); + destroy_workqueue(hdev->req_workqueue); + + hci_dev_lock(hdev); + hci_bdaddr_list_clear(&hdev->reject_list); + hci_bdaddr_list_clear(&hdev->accept_list); + hci_uuids_clear(hdev); + hci_link_keys_clear(hdev); + hci_smp_ltks_clear(hdev); + hci_smp_irks_clear(hdev); + hci_remote_oob_data_clear(hdev); + hci_adv_instances_clear(hdev); + hci_adv_monitors_clear(hdev); + hci_bdaddr_list_clear(&hdev->le_accept_list); + hci_bdaddr_list_clear(&hdev->le_resolv_list); + hci_conn_params_clear_all(hdev); + hci_discovery_filter_clear(hdev); + hci_blocked_keys_clear(hdev); + hci_codec_list_clear(&hdev->local_codecs); + hci_dev_unlock(hdev); + + ida_simple_remove(&hci_index_ida, hdev->id); + kfree_skb(hdev->sent_cmd); + kfree_skb(hdev->recv_event); + kfree(hdev); +} +EXPORT_SYMBOL(hci_release_dev); + +int hci_register_suspend_notifier(struct hci_dev *hdev) +{ + int ret = 0; + + if (!hdev->suspend_notifier.notifier_call && + !test_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks)) { + hdev->suspend_notifier.notifier_call = hci_suspend_notifier; + ret = register_pm_notifier(&hdev->suspend_notifier); + } + + return ret; +} + +int hci_unregister_suspend_notifier(struct hci_dev *hdev) +{ + int ret = 0; + + if (hdev->suspend_notifier.notifier_call) { + ret = unregister_pm_notifier(&hdev->suspend_notifier); + if (!ret) + hdev->suspend_notifier.notifier_call = NULL; + } + + return ret; +} + +/* Suspend HCI device */ +int hci_suspend_dev(struct hci_dev *hdev) +{ + int ret; + + bt_dev_dbg(hdev, ""); + + /* Suspend should only act on when powered. */ + if (!hdev_is_powered(hdev) || + hci_dev_test_flag(hdev, HCI_UNREGISTER)) + return 0; + + /* If powering down don't attempt to suspend */ + if (mgmt_powering_down(hdev)) + return 0; + + hci_req_sync_lock(hdev); + ret = hci_suspend_sync(hdev); + hci_req_sync_unlock(hdev); + + hci_clear_wake_reason(hdev); + mgmt_suspending(hdev, hdev->suspend_state); + + hci_sock_dev_event(hdev, HCI_DEV_SUSPEND); + return ret; +} +EXPORT_SYMBOL(hci_suspend_dev); + +/* Resume HCI device */ +int hci_resume_dev(struct hci_dev *hdev) +{ + int ret; + + bt_dev_dbg(hdev, ""); + + /* Resume should only act on when powered. */ + if (!hdev_is_powered(hdev) || + hci_dev_test_flag(hdev, HCI_UNREGISTER)) + return 0; + + /* If powering down don't attempt to resume */ + if (mgmt_powering_down(hdev)) + return 0; + + hci_req_sync_lock(hdev); + ret = hci_resume_sync(hdev); + hci_req_sync_unlock(hdev); + + mgmt_resuming(hdev, hdev->wake_reason, &hdev->wake_addr, + hdev->wake_addr_type); + + hci_sock_dev_event(hdev, HCI_DEV_RESUME); + return ret; +} +EXPORT_SYMBOL(hci_resume_dev); + +/* Reset HCI device */ +int hci_reset_dev(struct hci_dev *hdev) +{ + static const u8 hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, 0x00 }; + struct sk_buff *skb; + + skb = bt_skb_alloc(3, GFP_ATOMIC); + if (!skb) + return -ENOMEM; + + hci_skb_pkt_type(skb) = HCI_EVENT_PKT; + skb_put_data(skb, hw_err, 3); + + bt_dev_err(hdev, "Injecting HCI hardware error event"); + + /* Send Hardware Error to upper stack */ + return hci_recv_frame(hdev, skb); +} +EXPORT_SYMBOL(hci_reset_dev); + +/* Receive frame from HCI drivers */ +int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb) +{ + if (!hdev || (!test_bit(HCI_UP, &hdev->flags) + && !test_bit(HCI_INIT, &hdev->flags))) { + kfree_skb(skb); + return -ENXIO; + } + + switch (hci_skb_pkt_type(skb)) { + case HCI_EVENT_PKT: + break; + case HCI_ACLDATA_PKT: + /* Detect if ISO packet has been sent as ACL */ + if (hci_conn_num(hdev, ISO_LINK)) { + __u16 handle = __le16_to_cpu(hci_acl_hdr(skb)->handle); + __u8 type; + + type = hci_conn_lookup_type(hdev, hci_handle(handle)); + if (type == ISO_LINK) + hci_skb_pkt_type(skb) = HCI_ISODATA_PKT; + } + break; + case HCI_SCODATA_PKT: + break; + case HCI_ISODATA_PKT: + break; + default: + kfree_skb(skb); + return -EINVAL; + } + + /* Incoming skb */ + bt_cb(skb)->incoming = 1; + + /* Time stamp */ + __net_timestamp(skb); + + skb_queue_tail(&hdev->rx_q, skb); + queue_work(hdev->workqueue, &hdev->rx_work); + + return 0; +} +EXPORT_SYMBOL(hci_recv_frame); + +/* Receive diagnostic message from HCI drivers */ +int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb) +{ + /* Mark as diagnostic packet */ + hci_skb_pkt_type(skb) = HCI_DIAG_PKT; + + /* Time stamp */ + __net_timestamp(skb); + + skb_queue_tail(&hdev->rx_q, skb); + queue_work(hdev->workqueue, &hdev->rx_work); + + return 0; +} +EXPORT_SYMBOL(hci_recv_diag); + +void hci_set_hw_info(struct hci_dev *hdev, const char *fmt, ...) +{ + va_list vargs; + + va_start(vargs, fmt); + kfree_const(hdev->hw_info); + hdev->hw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs); + va_end(vargs); +} +EXPORT_SYMBOL(hci_set_hw_info); + +void hci_set_fw_info(struct hci_dev *hdev, const char *fmt, ...) +{ + va_list vargs; + + va_start(vargs, fmt); + kfree_const(hdev->fw_info); + hdev->fw_info = kvasprintf_const(GFP_KERNEL, fmt, vargs); + va_end(vargs); +} +EXPORT_SYMBOL(hci_set_fw_info); + +/* ---- Interface to upper protocols ---- */ + +int hci_register_cb(struct hci_cb *cb) +{ + BT_DBG("%p name %s", cb, cb->name); + + mutex_lock(&hci_cb_list_lock); + list_add_tail(&cb->list, &hci_cb_list); + mutex_unlock(&hci_cb_list_lock); + + return 0; +} +EXPORT_SYMBOL(hci_register_cb); + +int hci_unregister_cb(struct hci_cb *cb) +{ + BT_DBG("%p name %s", cb, cb->name); + + mutex_lock(&hci_cb_list_lock); + list_del(&cb->list); + mutex_unlock(&hci_cb_list_lock); + + return 0; +} +EXPORT_SYMBOL(hci_unregister_cb); + +static int hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) +{ + int err; + + BT_DBG("%s type %d len %d", hdev->name, hci_skb_pkt_type(skb), + skb->len); + + /* Time stamp */ + __net_timestamp(skb); + + /* Send copy to monitor */ + hci_send_to_monitor(hdev, skb); + + if (atomic_read(&hdev->promisc)) { + /* Send copy to the sockets */ + hci_send_to_sock(hdev, skb); + } + + /* Get rid of skb owner, prior to sending to the driver. */ + skb_orphan(skb); + + if (!test_bit(HCI_RUNNING, &hdev->flags)) { + kfree_skb(skb); + return -EINVAL; + } + + err = hdev->send(hdev, skb); + if (err < 0) { + bt_dev_err(hdev, "sending frame failed (%d)", err); + kfree_skb(skb); + return err; + } + + return 0; +} + +/* Send HCI command */ +int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, + const void *param) +{ + struct sk_buff *skb; + + BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen); + + skb = hci_prepare_cmd(hdev, opcode, plen, param); + if (!skb) { + bt_dev_err(hdev, "no memory for command"); + return -ENOMEM; + } + + /* Stand-alone HCI commands must be flagged as + * single-command requests. + */ + bt_cb(skb)->hci.req_flags |= HCI_REQ_START; + + skb_queue_tail(&hdev->cmd_q, skb); + queue_work(hdev->workqueue, &hdev->cmd_work); + + return 0; +} + +int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen, + const void *param) +{ + struct sk_buff *skb; + + if (hci_opcode_ogf(opcode) != 0x3f) { + /* A controller receiving a command shall respond with either + * a Command Status Event or a Command Complete Event. + * Therefore, all standard HCI commands must be sent via the + * standard API, using hci_send_cmd or hci_cmd_sync helpers. + * Some vendors do not comply with this rule for vendor-specific + * commands and do not return any event. We want to support + * unresponded commands for such cases only. + */ + bt_dev_err(hdev, "unresponded command not supported"); + return -EINVAL; + } + + skb = hci_prepare_cmd(hdev, opcode, plen, param); + if (!skb) { + bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)", + opcode); + return -ENOMEM; + } + + hci_send_frame(hdev, skb); + + return 0; +} +EXPORT_SYMBOL(__hci_cmd_send); + +/* Get data from the previously sent command */ +void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode) +{ + struct hci_command_hdr *hdr; + + if (!hdev->sent_cmd) + return NULL; + + hdr = (void *) hdev->sent_cmd->data; + + if (hdr->opcode != cpu_to_le16(opcode)) + return NULL; + + BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode); + + return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE; +} + +/* Get data from last received event */ +void *hci_recv_event_data(struct hci_dev *hdev, __u8 event) +{ + struct hci_event_hdr *hdr; + int offset; + + if (!hdev->recv_event) + return NULL; + + hdr = (void *)hdev->recv_event->data; + offset = sizeof(*hdr); + + if (hdr->evt != event) { + /* In case of LE metaevent check the subevent match */ + if (hdr->evt == HCI_EV_LE_META) { + struct hci_ev_le_meta *ev; + + ev = (void *)hdev->recv_event->data + offset; + offset += sizeof(*ev); + if (ev->subevent == event) + goto found; + } + return NULL; + } + +found: + bt_dev_dbg(hdev, "event 0x%2.2x", event); + + return hdev->recv_event->data + offset; +} + +/* Send ACL data */ +static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags) +{ + struct hci_acl_hdr *hdr; + int len = skb->len; + + skb_push(skb, HCI_ACL_HDR_SIZE); + skb_reset_transport_header(skb); + hdr = (struct hci_acl_hdr *)skb_transport_header(skb); + hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags)); + hdr->dlen = cpu_to_le16(len); +} + +static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue, + struct sk_buff *skb, __u16 flags) +{ + struct hci_conn *conn = chan->conn; + struct hci_dev *hdev = conn->hdev; + struct sk_buff *list; + + skb->len = skb_headlen(skb); + skb->data_len = 0; + + hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT; + + switch (hdev->dev_type) { + case HCI_PRIMARY: + hci_add_acl_hdr(skb, conn->handle, flags); + break; + case HCI_AMP: + hci_add_acl_hdr(skb, chan->handle, flags); + break; + default: + bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type); + return; + } + + list = skb_shinfo(skb)->frag_list; + if (!list) { + /* Non fragmented */ + BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len); + + skb_queue_tail(queue, skb); + } else { + /* Fragmented */ + BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); + + skb_shinfo(skb)->frag_list = NULL; + + /* Queue all fragments atomically. We need to use spin_lock_bh + * here because of 6LoWPAN links, as there this function is + * called from softirq and using normal spin lock could cause + * deadlocks. + */ + spin_lock_bh(&queue->lock); + + __skb_queue_tail(queue, skb); + + flags &= ~ACL_START; + flags |= ACL_CONT; + do { + skb = list; list = list->next; + + hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT; + hci_add_acl_hdr(skb, conn->handle, flags); + + BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); + + __skb_queue_tail(queue, skb); + } while (list); + + spin_unlock_bh(&queue->lock); + } +} + +void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags) +{ + struct hci_dev *hdev = chan->conn->hdev; + + BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags); + + hci_queue_acl(chan, &chan->data_q, skb, flags); + + queue_work(hdev->workqueue, &hdev->tx_work); +} + +/* Send SCO data */ +void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb) +{ + struct hci_dev *hdev = conn->hdev; + struct hci_sco_hdr hdr; + + BT_DBG("%s len %d", hdev->name, skb->len); + + hdr.handle = cpu_to_le16(conn->handle); + hdr.dlen = skb->len; + + skb_push(skb, HCI_SCO_HDR_SIZE); + skb_reset_transport_header(skb); + memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE); + + hci_skb_pkt_type(skb) = HCI_SCODATA_PKT; + + skb_queue_tail(&conn->data_q, skb); + queue_work(hdev->workqueue, &hdev->tx_work); +} + +/* Send ISO data */ +static void hci_add_iso_hdr(struct sk_buff *skb, __u16 handle, __u8 flags) +{ + struct hci_iso_hdr *hdr; + int len = skb->len; + + skb_push(skb, HCI_ISO_HDR_SIZE); + skb_reset_transport_header(skb); + hdr = (struct hci_iso_hdr *)skb_transport_header(skb); + hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags)); + hdr->dlen = cpu_to_le16(len); +} + +static void hci_queue_iso(struct hci_conn *conn, struct sk_buff_head *queue, + struct sk_buff *skb) +{ + struct hci_dev *hdev = conn->hdev; + struct sk_buff *list; + __u16 flags; + + skb->len = skb_headlen(skb); + skb->data_len = 0; + + hci_skb_pkt_type(skb) = HCI_ISODATA_PKT; + + list = skb_shinfo(skb)->frag_list; + + flags = hci_iso_flags_pack(list ? ISO_START : ISO_SINGLE, 0x00); + hci_add_iso_hdr(skb, conn->handle, flags); + + if (!list) { + /* Non fragmented */ + BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len); + + skb_queue_tail(queue, skb); + } else { + /* Fragmented */ + BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); + + skb_shinfo(skb)->frag_list = NULL; + + __skb_queue_tail(queue, skb); + + do { + skb = list; list = list->next; + + hci_skb_pkt_type(skb) = HCI_ISODATA_PKT; + flags = hci_iso_flags_pack(list ? ISO_CONT : ISO_END, + 0x00); + hci_add_iso_hdr(skb, conn->handle, flags); + + BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len); + + __skb_queue_tail(queue, skb); + } while (list); + } +} + +void hci_send_iso(struct hci_conn *conn, struct sk_buff *skb) +{ + struct hci_dev *hdev = conn->hdev; + + BT_DBG("%s len %d", hdev->name, skb->len); + + hci_queue_iso(conn, &conn->data_q, skb); + + queue_work(hdev->workqueue, &hdev->tx_work); +} + +/* ---- HCI TX task (outgoing data) ---- */ + +/* HCI Connection scheduler */ +static inline void hci_quote_sent(struct hci_conn *conn, int num, int *quote) +{ + struct hci_dev *hdev; + int cnt, q; + + if (!conn) { + *quote = 0; + return; + } + + hdev = conn->hdev; + + switch (conn->type) { + case ACL_LINK: + cnt = hdev->acl_cnt; + break; + case AMP_LINK: + cnt = hdev->block_cnt; + break; + case SCO_LINK: + case ESCO_LINK: + cnt = hdev->sco_cnt; + break; + case LE_LINK: + cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt; + break; + case ISO_LINK: + cnt = hdev->iso_mtu ? hdev->iso_cnt : + hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt; + break; + default: + cnt = 0; + bt_dev_err(hdev, "unknown link type %d", conn->type); + } + + q = cnt / num; + *quote = q ? q : 1; +} + +static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, + int *quote) +{ + struct hci_conn_hash *h = &hdev->conn_hash; + struct hci_conn *conn = NULL, *c; + unsigned int num = 0, min = ~0; + + /* We don't have to lock device here. Connections are always + * added and removed with TX task disabled. */ + + rcu_read_lock(); + + list_for_each_entry_rcu(c, &h->list, list) { + if (c->type != type || skb_queue_empty(&c->data_q)) + continue; + + if (c->state != BT_CONNECTED && c->state != BT_CONFIG) + continue; + + num++; + + if (c->sent < min) { + min = c->sent; + conn = c; + } + + if (hci_conn_num(hdev, type) == num) + break; + } + + rcu_read_unlock(); + + hci_quote_sent(conn, num, quote); + + BT_DBG("conn %p quote %d", conn, *quote); + return conn; +} + +static void hci_link_tx_to(struct hci_dev *hdev, __u8 type) +{ + struct hci_conn_hash *h = &hdev->conn_hash; + struct hci_conn *c; + + bt_dev_err(hdev, "link tx timeout"); + + rcu_read_lock(); + + /* Kill stalled connections */ + list_for_each_entry_rcu(c, &h->list, list) { + if (c->type == type && c->sent) { + bt_dev_err(hdev, "killing stalled connection %pMR", + &c->dst); + hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM); + } + } + + rcu_read_unlock(); +} + +static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type, + int *quote) +{ + struct hci_conn_hash *h = &hdev->conn_hash; + struct hci_chan *chan = NULL; + unsigned int num = 0, min = ~0, cur_prio = 0; + struct hci_conn *conn; + int conn_num = 0; + + BT_DBG("%s", hdev->name); + + rcu_read_lock(); + + list_for_each_entry_rcu(conn, &h->list, list) { + struct hci_chan *tmp; + + if (conn->type != type) + continue; + + if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG) + continue; + + conn_num++; + + list_for_each_entry_rcu(tmp, &conn->chan_list, list) { + struct sk_buff *skb; + + if (skb_queue_empty(&tmp->data_q)) + continue; + + skb = skb_peek(&tmp->data_q); + if (skb->priority < cur_prio) + continue; + + if (skb->priority > cur_prio) { + num = 0; + min = ~0; + cur_prio = skb->priority; + } + + num++; + + if (conn->sent < min) { + min = conn->sent; + chan = tmp; + } + } + + if (hci_conn_num(hdev, type) == conn_num) + break; + } + + rcu_read_unlock(); + + if (!chan) + return NULL; + + hci_quote_sent(chan->conn, num, quote); + + BT_DBG("chan %p quote %d", chan, *quote); + return chan; +} + +static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type) +{ + struct hci_conn_hash *h = &hdev->conn_hash; + struct hci_conn *conn; + int num = 0; + + BT_DBG("%s", hdev->name); + + rcu_read_lock(); + + list_for_each_entry_rcu(conn, &h->list, list) { + struct hci_chan *chan; + + if (conn->type != type) + continue; + + if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG) + continue; + + num++; + + list_for_each_entry_rcu(chan, &conn->chan_list, list) { + struct sk_buff *skb; + + if (chan->sent) { + chan->sent = 0; + continue; + } + + if (skb_queue_empty(&chan->data_q)) + continue; + + skb = skb_peek(&chan->data_q); + if (skb->priority >= HCI_PRIO_MAX - 1) + continue; + + skb->priority = HCI_PRIO_MAX - 1; + + BT_DBG("chan %p skb %p promoted to %d", chan, skb, + skb->priority); + } + + if (hci_conn_num(hdev, type) == num) + break; + } + + rcu_read_unlock(); + +} + +static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb) +{ + /* Calculate count of blocks used by this packet */ + return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len); +} + +static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type) +{ + unsigned long last_tx; + + if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) + return; + + switch (type) { + case LE_LINK: + last_tx = hdev->le_last_tx; + break; + default: + last_tx = hdev->acl_last_tx; + break; + } + + /* tx timeout must be longer than maximum link supervision timeout + * (40.9 seconds) + */ + if (!cnt && time_after(jiffies, last_tx + HCI_ACL_TX_TIMEOUT)) + hci_link_tx_to(hdev, type); +} + +/* Schedule SCO */ +static void hci_sched_sco(struct hci_dev *hdev) +{ + struct hci_conn *conn; + struct sk_buff *skb; + int quote; + + BT_DBG("%s", hdev->name); + + if (!hci_conn_num(hdev, SCO_LINK)) + return; + + while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) { + while (quote-- && (skb = skb_dequeue(&conn->data_q))) { + BT_DBG("skb %p len %d", skb, skb->len); + hci_send_frame(hdev, skb); + + conn->sent++; + if (conn->sent == ~0) + conn->sent = 0; + } + } +} + +static void hci_sched_esco(struct hci_dev *hdev) +{ + struct hci_conn *conn; + struct sk_buff *skb; + int quote; + + BT_DBG("%s", hdev->name); + + if (!hci_conn_num(hdev, ESCO_LINK)) + return; + + while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, + "e))) { + while (quote-- && (skb = skb_dequeue(&conn->data_q))) { + BT_DBG("skb %p len %d", skb, skb->len); + hci_send_frame(hdev, skb); + + conn->sent++; + if (conn->sent == ~0) + conn->sent = 0; + } + } +} + +static void hci_sched_acl_pkt(struct hci_dev *hdev) +{ + unsigned int cnt = hdev->acl_cnt; + struct hci_chan *chan; + struct sk_buff *skb; + int quote; + + __check_timeout(hdev, cnt, ACL_LINK); + + while (hdev->acl_cnt && + (chan = hci_chan_sent(hdev, ACL_LINK, "e))) { + u32 priority = (skb_peek(&chan->data_q))->priority; + while (quote-- && (skb = skb_peek(&chan->data_q))) { + BT_DBG("chan %p skb %p len %d priority %u", chan, skb, + skb->len, skb->priority); + + /* Stop if priority has changed */ + if (skb->priority < priority) + break; + + skb = skb_dequeue(&chan->data_q); + + hci_conn_enter_active_mode(chan->conn, + bt_cb(skb)->force_active); + + hci_send_frame(hdev, skb); + hdev->acl_last_tx = jiffies; + + hdev->acl_cnt--; + chan->sent++; + chan->conn->sent++; + + /* Send pending SCO packets right away */ + hci_sched_sco(hdev); + hci_sched_esco(hdev); + } + } + + if (cnt != hdev->acl_cnt) + hci_prio_recalculate(hdev, ACL_LINK); +} + +static void hci_sched_acl_blk(struct hci_dev *hdev) +{ + unsigned int cnt = hdev->block_cnt; + struct hci_chan *chan; + struct sk_buff *skb; + int quote; + u8 type; + + BT_DBG("%s", hdev->name); + + if (hdev->dev_type == HCI_AMP) + type = AMP_LINK; + else + type = ACL_LINK; + + __check_timeout(hdev, cnt, type); + + while (hdev->block_cnt > 0 && + (chan = hci_chan_sent(hdev, type, "e))) { + u32 priority = (skb_peek(&chan->data_q))->priority; + while (quote > 0 && (skb = skb_peek(&chan->data_q))) { + int blocks; + + BT_DBG("chan %p skb %p len %d priority %u", chan, skb, + skb->len, skb->priority); + + /* Stop if priority has changed */ + if (skb->priority < priority) + break; + + skb = skb_dequeue(&chan->data_q); + + blocks = __get_blocks(hdev, skb); + if (blocks > hdev->block_cnt) + return; + + hci_conn_enter_active_mode(chan->conn, + bt_cb(skb)->force_active); + + hci_send_frame(hdev, skb); + hdev->acl_last_tx = jiffies; + + hdev->block_cnt -= blocks; + quote -= blocks; + + chan->sent += blocks; + chan->conn->sent += blocks; + } + } + + if (cnt != hdev->block_cnt) + hci_prio_recalculate(hdev, type); +} + +static void hci_sched_acl(struct hci_dev *hdev) +{ + BT_DBG("%s", hdev->name); + + /* No ACL link over BR/EDR controller */ + if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_PRIMARY) + return; + + /* No AMP link over AMP controller */ + if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP) + return; + + switch (hdev->flow_ctl_mode) { + case HCI_FLOW_CTL_MODE_PACKET_BASED: + hci_sched_acl_pkt(hdev); + break; + + case HCI_FLOW_CTL_MODE_BLOCK_BASED: + hci_sched_acl_blk(hdev); + break; + } +} + +static void hci_sched_le(struct hci_dev *hdev) +{ + struct hci_chan *chan; + struct sk_buff *skb; + int quote, cnt, tmp; + + BT_DBG("%s", hdev->name); + + if (!hci_conn_num(hdev, LE_LINK)) + return; + + cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt; + + __check_timeout(hdev, cnt, LE_LINK); + + tmp = cnt; + while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) { + u32 priority = (skb_peek(&chan->data_q))->priority; + while (quote-- && (skb = skb_peek(&chan->data_q))) { + BT_DBG("chan %p skb %p len %d priority %u", chan, skb, + skb->len, skb->priority); + + /* Stop if priority has changed */ + if (skb->priority < priority) + break; + + skb = skb_dequeue(&chan->data_q); + + hci_send_frame(hdev, skb); + hdev->le_last_tx = jiffies; + + cnt--; + chan->sent++; + chan->conn->sent++; + + /* Send pending SCO packets right away */ + hci_sched_sco(hdev); + hci_sched_esco(hdev); + } + } + + if (hdev->le_pkts) + hdev->le_cnt = cnt; + else + hdev->acl_cnt = cnt; + + if (cnt != tmp) + hci_prio_recalculate(hdev, LE_LINK); +} + +/* Schedule CIS */ +static void hci_sched_iso(struct hci_dev *hdev) +{ + struct hci_conn *conn; + struct sk_buff *skb; + int quote, *cnt; + + BT_DBG("%s", hdev->name); + + if (!hci_conn_num(hdev, ISO_LINK)) + return; + + cnt = hdev->iso_pkts ? &hdev->iso_cnt : + hdev->le_pkts ? &hdev->le_cnt : &hdev->acl_cnt; + while (*cnt && (conn = hci_low_sent(hdev, ISO_LINK, "e))) { + while (quote-- && (skb = skb_dequeue(&conn->data_q))) { + BT_DBG("skb %p len %d", skb, skb->len); + hci_send_frame(hdev, skb); + + conn->sent++; + if (conn->sent == ~0) + conn->sent = 0; + (*cnt)--; + } + } +} + +static void hci_tx_work(struct work_struct *work) +{ + struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work); + struct sk_buff *skb; + + BT_DBG("%s acl %d sco %d le %d iso %d", hdev->name, hdev->acl_cnt, + hdev->sco_cnt, hdev->le_cnt, hdev->iso_cnt); + + if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { + /* Schedule queues and send stuff to HCI driver */ + hci_sched_sco(hdev); + hci_sched_esco(hdev); + hci_sched_iso(hdev); + hci_sched_acl(hdev); + hci_sched_le(hdev); + } + + /* Send next queued raw (unknown type) packet */ + while ((skb = skb_dequeue(&hdev->raw_q))) + hci_send_frame(hdev, skb); +} + +/* ----- HCI RX task (incoming data processing) ----- */ + +/* ACL data packet */ +static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_acl_hdr *hdr = (void *) skb->data; + struct hci_conn *conn; + __u16 handle, flags; + + skb_pull(skb, HCI_ACL_HDR_SIZE); + + handle = __le16_to_cpu(hdr->handle); + flags = hci_flags(handle); + handle = hci_handle(handle); + + BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len, + handle, flags); + + hdev->stat.acl_rx++; + + hci_dev_lock(hdev); + conn = hci_conn_hash_lookup_handle(hdev, handle); + hci_dev_unlock(hdev); + + if (conn) { + hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF); + + /* Send to upper protocol */ + l2cap_recv_acldata(conn, skb, flags); + return; + } else { + bt_dev_err(hdev, "ACL packet for unknown connection handle %d", + handle); + } + + kfree_skb(skb); +} + +/* SCO data packet */ +static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_sco_hdr *hdr = (void *) skb->data; + struct hci_conn *conn; + __u16 handle, flags; + + skb_pull(skb, HCI_SCO_HDR_SIZE); + + handle = __le16_to_cpu(hdr->handle); + flags = hci_flags(handle); + handle = hci_handle(handle); + + BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len, + handle, flags); + + hdev->stat.sco_rx++; + + hci_dev_lock(hdev); + conn = hci_conn_hash_lookup_handle(hdev, handle); + hci_dev_unlock(hdev); + + if (conn) { + /* Send to upper protocol */ + bt_cb(skb)->sco.pkt_status = flags & 0x03; + sco_recv_scodata(conn, skb); + return; + } else { + bt_dev_err_ratelimited(hdev, "SCO packet for unknown connection handle %d", + handle); + } + + kfree_skb(skb); +} + +static void hci_isodata_packet(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_iso_hdr *hdr; + struct hci_conn *conn; + __u16 handle, flags; + + hdr = skb_pull_data(skb, sizeof(*hdr)); + if (!hdr) { + bt_dev_err(hdev, "ISO packet too small"); + goto drop; + } + + handle = __le16_to_cpu(hdr->handle); + flags = hci_flags(handle); + handle = hci_handle(handle); + + bt_dev_dbg(hdev, "len %d handle 0x%4.4x flags 0x%4.4x", skb->len, + handle, flags); + + hci_dev_lock(hdev); + conn = hci_conn_hash_lookup_handle(hdev, handle); + hci_dev_unlock(hdev); + + if (!conn) { + bt_dev_err(hdev, "ISO packet for unknown connection handle %d", + handle); + goto drop; + } + + /* Send to upper protocol */ + iso_recv(conn, skb, flags); + return; + +drop: + kfree_skb(skb); +} + +static bool hci_req_is_complete(struct hci_dev *hdev) +{ + struct sk_buff *skb; + + skb = skb_peek(&hdev->cmd_q); + if (!skb) + return true; + + return (bt_cb(skb)->hci.req_flags & HCI_REQ_START); +} + +static void hci_resend_last(struct hci_dev *hdev) +{ + struct hci_command_hdr *sent; + struct sk_buff *skb; + u16 opcode; + + if (!hdev->sent_cmd) + return; + + sent = (void *) hdev->sent_cmd->data; + opcode = __le16_to_cpu(sent->opcode); + if (opcode == HCI_OP_RESET) + return; + + skb = skb_clone(hdev->sent_cmd, GFP_KERNEL); + if (!skb) + return; + + skb_queue_head(&hdev->cmd_q, skb); + queue_work(hdev->workqueue, &hdev->cmd_work); +} + +void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status, + hci_req_complete_t *req_complete, + hci_req_complete_skb_t *req_complete_skb) +{ + struct sk_buff *skb; + unsigned long flags; + + BT_DBG("opcode 0x%04x status 0x%02x", opcode, status); + + /* If the completed command doesn't match the last one that was + * sent we need to do special handling of it. + */ + if (!hci_sent_cmd_data(hdev, opcode)) { + /* Some CSR based controllers generate a spontaneous + * reset complete event during init and any pending + * command will never be completed. In such a case we + * need to resend whatever was the last sent + * command. + */ + if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET) + hci_resend_last(hdev); + + return; + } + + /* If we reach this point this event matches the last command sent */ + hci_dev_clear_flag(hdev, HCI_CMD_PENDING); + + /* If the command succeeded and there's still more commands in + * this request the request is not yet complete. + */ + if (!status && !hci_req_is_complete(hdev)) + return; + + /* If this was the last command in a request the complete + * callback would be found in hdev->sent_cmd instead of the + * command queue (hdev->cmd_q). + */ + if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) { + *req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb; + return; + } + + if (bt_cb(hdev->sent_cmd)->hci.req_complete) { + *req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete; + return; + } + + /* Remove all pending commands belonging to this request */ + spin_lock_irqsave(&hdev->cmd_q.lock, flags); + while ((skb = __skb_dequeue(&hdev->cmd_q))) { + if (bt_cb(skb)->hci.req_flags & HCI_REQ_START) { + __skb_queue_head(&hdev->cmd_q, skb); + break; + } + + if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB) + *req_complete_skb = bt_cb(skb)->hci.req_complete_skb; + else + *req_complete = bt_cb(skb)->hci.req_complete; + dev_kfree_skb_irq(skb); + } + spin_unlock_irqrestore(&hdev->cmd_q.lock, flags); +} + +static void hci_rx_work(struct work_struct *work) +{ + struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work); + struct sk_buff *skb; + + BT_DBG("%s", hdev->name); + + /* The kcov_remote functions used for collecting packet parsing + * coverage information from this background thread and associate + * the coverage with the syscall's thread which originally injected + * the packet. This helps fuzzing the kernel. + */ + for (; (skb = skb_dequeue(&hdev->rx_q)); kcov_remote_stop()) { + kcov_remote_start_common(skb_get_kcov_handle(skb)); + + /* Send copy to monitor */ + hci_send_to_monitor(hdev, skb); + + if (atomic_read(&hdev->promisc)) { + /* Send copy to the sockets */ + hci_send_to_sock(hdev, skb); + } + + /* If the device has been opened in HCI_USER_CHANNEL, + * the userspace has exclusive access to device. + * When device is HCI_INIT, we still need to process + * the data packets to the driver in order + * to complete its setup(). + */ + if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && + !test_bit(HCI_INIT, &hdev->flags)) { + kfree_skb(skb); + continue; + } + + if (test_bit(HCI_INIT, &hdev->flags)) { + /* Don't process data packets in this states. */ + switch (hci_skb_pkt_type(skb)) { + case HCI_ACLDATA_PKT: + case HCI_SCODATA_PKT: + case HCI_ISODATA_PKT: + kfree_skb(skb); + continue; + } + } + + /* Process frame */ + switch (hci_skb_pkt_type(skb)) { + case HCI_EVENT_PKT: + BT_DBG("%s Event packet", hdev->name); + hci_event_packet(hdev, skb); + break; + + case HCI_ACLDATA_PKT: + BT_DBG("%s ACL data packet", hdev->name); + hci_acldata_packet(hdev, skb); + break; + + case HCI_SCODATA_PKT: + BT_DBG("%s SCO data packet", hdev->name); + hci_scodata_packet(hdev, skb); + break; + + case HCI_ISODATA_PKT: + BT_DBG("%s ISO data packet", hdev->name); + hci_isodata_packet(hdev, skb); + break; + + default: + kfree_skb(skb); + break; + } + } +} + +static void hci_cmd_work(struct work_struct *work) +{ + struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work); + struct sk_buff *skb; + + BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name, + atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q)); + + /* Send queued commands */ + if (atomic_read(&hdev->cmd_cnt)) { + skb = skb_dequeue(&hdev->cmd_q); + if (!skb) + return; + + kfree_skb(hdev->sent_cmd); + + hdev->sent_cmd = skb_clone(skb, GFP_KERNEL); + if (hdev->sent_cmd) { + int res; + if (hci_req_status_pend(hdev)) + hci_dev_set_flag(hdev, HCI_CMD_PENDING); + atomic_dec(&hdev->cmd_cnt); + + res = hci_send_frame(hdev, skb); + if (res < 0) + __hci_cmd_sync_cancel(hdev, -res); + + rcu_read_lock(); + if (test_bit(HCI_RESET, &hdev->flags) || + hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE)) + cancel_delayed_work(&hdev->cmd_timer); + else + queue_delayed_work(hdev->workqueue, &hdev->cmd_timer, + HCI_CMD_TIMEOUT); + rcu_read_unlock(); + } else { + skb_queue_head(&hdev->cmd_q, skb); + queue_work(hdev->workqueue, &hdev->cmd_work); + } + } +} diff --git a/net/bluetooth/hci_debugfs.c b/net/bluetooth/hci_debugfs.c new file mode 100644 index 000000000..6124b3425 --- /dev/null +++ b/net/bluetooth/hci_debugfs.c @@ -0,0 +1,1379 @@ +/* + BlueZ - Bluetooth protocol stack for Linux + + Copyright (C) 2014 Intel Corporation + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License version 2 as + published by the Free Software Foundation; + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + SOFTWARE IS DISCLAIMED. +*/ + +#include <linux/debugfs.h> + +#include <net/bluetooth/bluetooth.h> +#include <net/bluetooth/hci_core.h> + +#include "smp.h" +#include "hci_request.h" +#include "hci_debugfs.h" + +#define DEFINE_QUIRK_ATTRIBUTE(__name, __quirk) \ +static ssize_t __name ## _read(struct file *file, \ + char __user *user_buf, \ + size_t count, loff_t *ppos) \ +{ \ + struct hci_dev *hdev = file->private_data; \ + char buf[3]; \ + \ + buf[0] = test_bit(__quirk, &hdev->quirks) ? 'Y' : 'N'; \ + buf[1] = '\n'; \ + buf[2] = '\0'; \ + return simple_read_from_buffer(user_buf, count, ppos, buf, 2); \ +} \ + \ +static ssize_t __name ## _write(struct file *file, \ + const char __user *user_buf, \ + size_t count, loff_t *ppos) \ +{ \ + struct hci_dev *hdev = file->private_data; \ + bool enable; \ + int err; \ + \ + if (test_bit(HCI_UP, &hdev->flags)) \ + return -EBUSY; \ + \ + err = kstrtobool_from_user(user_buf, count, &enable); \ + if (err) \ + return err; \ + \ + if (enable == test_bit(__quirk, &hdev->quirks)) \ + return -EALREADY; \ + \ + change_bit(__quirk, &hdev->quirks); \ + \ + return count; \ +} \ + \ +static const struct file_operations __name ## _fops = { \ + .open = simple_open, \ + .read = __name ## _read, \ + .write = __name ## _write, \ + .llseek = default_llseek, \ +} \ + +#define DEFINE_INFO_ATTRIBUTE(__name, __field) \ +static int __name ## _show(struct seq_file *f, void *ptr) \ +{ \ + struct hci_dev *hdev = f->private; \ + \ + hci_dev_lock(hdev); \ + seq_printf(f, "%s\n", hdev->__field ? : ""); \ + hci_dev_unlock(hdev); \ + \ + return 0; \ +} \ + \ +DEFINE_SHOW_ATTRIBUTE(__name) + +static int features_show(struct seq_file *f, void *ptr) +{ + struct hci_dev *hdev = f->private; + u8 p; + + hci_dev_lock(hdev); + for (p = 0; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) + seq_printf(f, "%2u: %8ph\n", p, hdev->features[p]); + if (lmp_le_capable(hdev)) + seq_printf(f, "LE: %8ph\n", hdev->le_features); + hci_dev_unlock(hdev); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(features); + +static int device_id_show(struct seq_file *f, void *ptr) +{ + struct hci_dev *hdev = f->private; + + hci_dev_lock(hdev); + seq_printf(f, "%4.4x:%4.4x:%4.4x:%4.4x\n", hdev->devid_source, + hdev->devid_vendor, hdev->devid_product, hdev->devid_version); + hci_dev_unlock(hdev); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(device_id); + +static int device_list_show(struct seq_file *f, void *ptr) +{ + struct hci_dev *hdev = f->private; + struct hci_conn_params *p; + struct bdaddr_list *b; + + hci_dev_lock(hdev); + list_for_each_entry(b, &hdev->accept_list, list) + seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type); + list_for_each_entry(p, &hdev->le_conn_params, list) { + seq_printf(f, "%pMR (type %u) %u\n", &p->addr, p->addr_type, + p->auto_connect); + } + hci_dev_unlock(hdev); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(device_list); + +static int blacklist_show(struct seq_file *f, void *p) +{ + struct hci_dev *hdev = f->private; + struct bdaddr_list *b; + + hci_dev_lock(hdev); + list_for_each_entry(b, &hdev->reject_list, list) + seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type); + hci_dev_unlock(hdev); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(blacklist); + +static int blocked_keys_show(struct seq_file *f, void *p) +{ + struct hci_dev *hdev = f->private; + struct blocked_key *key; + + rcu_read_lock(); + list_for_each_entry_rcu(key, &hdev->blocked_keys, list) + seq_printf(f, "%u %*phN\n", key->type, 16, key->val); + rcu_read_unlock(); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(blocked_keys); + +static int uuids_show(struct seq_file *f, void *p) +{ + struct hci_dev *hdev = f->private; + struct bt_uuid *uuid; + + hci_dev_lock(hdev); + list_for_each_entry(uuid, &hdev->uuids, list) { + u8 i, val[16]; + + /* The Bluetooth UUID values are stored in big endian, + * but with reversed byte order. So convert them into + * the right order for the %pUb modifier. + */ + for (i = 0; i < 16; i++) + val[i] = uuid->uuid[15 - i]; + + seq_printf(f, "%pUb\n", val); + } + hci_dev_unlock(hdev); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(uuids); + +static int remote_oob_show(struct seq_file *f, void *ptr) +{ + struct hci_dev *hdev = f->private; + struct oob_data *data; + + hci_dev_lock(hdev); + list_for_each_entry(data, &hdev->remote_oob_data, list) { + seq_printf(f, "%pMR (type %u) %u %*phN %*phN %*phN %*phN\n", + &data->bdaddr, data->bdaddr_type, data->present, + 16, data->hash192, 16, data->rand192, + 16, data->hash256, 16, data->rand256); + } + hci_dev_unlock(hdev); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(remote_oob); + +static int conn_info_min_age_set(void *data, u64 val) +{ + struct hci_dev *hdev = data; + + if (val == 0 || val > hdev->conn_info_max_age) + return -EINVAL; + + hci_dev_lock(hdev); + hdev->conn_info_min_age = val; + hci_dev_unlock(hdev); + + return 0; +} + +static int conn_info_min_age_get(void *data, u64 *val) +{ + struct hci_dev *hdev = data; + + hci_dev_lock(hdev); + *val = hdev->conn_info_min_age; + hci_dev_unlock(hdev); + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(conn_info_min_age_fops, conn_info_min_age_get, + conn_info_min_age_set, "%llu\n"); + +static int conn_info_max_age_set(void *data, u64 val) +{ + struct hci_dev *hdev = data; + + if (val == 0 || val < hdev->conn_info_min_age) + return -EINVAL; + + hci_dev_lock(hdev); + hdev->conn_info_max_age = val; + hci_dev_unlock(hdev); + + return 0; +} + +static int conn_info_max_age_get(void *data, u64 *val) +{ + struct hci_dev *hdev = data; + + hci_dev_lock(hdev); + *val = hdev->conn_info_max_age; + hci_dev_unlock(hdev); + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(conn_info_max_age_fops, conn_info_max_age_get, + conn_info_max_age_set, "%llu\n"); + +static ssize_t use_debug_keys_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct hci_dev *hdev = file->private_data; + char buf[3]; + + buf[0] = hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS) ? 'Y' : 'N'; + buf[1] = '\n'; + buf[2] = '\0'; + return simple_read_from_buffer(user_buf, count, ppos, buf, 2); +} + +static const struct file_operations use_debug_keys_fops = { + .open = simple_open, + .read = use_debug_keys_read, + .llseek = default_llseek, +}; + +static ssize_t sc_only_mode_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct hci_dev *hdev = file->private_data; + char buf[3]; + + buf[0] = hci_dev_test_flag(hdev, HCI_SC_ONLY) ? 'Y' : 'N'; + buf[1] = '\n'; + buf[2] = '\0'; + return simple_read_from_buffer(user_buf, count, ppos, buf, 2); +} + +static const struct file_operations sc_only_mode_fops = { + .open = simple_open, + .read = sc_only_mode_read, + .llseek = default_llseek, +}; + +DEFINE_INFO_ATTRIBUTE(hardware_info, hw_info); +DEFINE_INFO_ATTRIBUTE(firmware_info, fw_info); + +void hci_debugfs_create_common(struct hci_dev *hdev) +{ + debugfs_create_file("features", 0444, hdev->debugfs, hdev, + &features_fops); + debugfs_create_u16("manufacturer", 0444, hdev->debugfs, + &hdev->manufacturer); + debugfs_create_u8("hci_version", 0444, hdev->debugfs, &hdev->hci_ver); + debugfs_create_u16("hci_revision", 0444, hdev->debugfs, &hdev->hci_rev); + debugfs_create_u8("hardware_error", 0444, hdev->debugfs, + &hdev->hw_error_code); + debugfs_create_file("device_id", 0444, hdev->debugfs, hdev, + &device_id_fops); + + debugfs_create_file("device_list", 0444, hdev->debugfs, hdev, + &device_list_fops); + debugfs_create_file("blacklist", 0444, hdev->debugfs, hdev, + &blacklist_fops); + debugfs_create_file("blocked_keys", 0444, hdev->debugfs, hdev, + &blocked_keys_fops); + debugfs_create_file("uuids", 0444, hdev->debugfs, hdev, &uuids_fops); + debugfs_create_file("remote_oob", 0400, hdev->debugfs, hdev, + &remote_oob_fops); + + debugfs_create_file("conn_info_min_age", 0644, hdev->debugfs, hdev, + &conn_info_min_age_fops); + debugfs_create_file("conn_info_max_age", 0644, hdev->debugfs, hdev, + &conn_info_max_age_fops); + + if (lmp_ssp_capable(hdev) || lmp_le_capable(hdev)) + debugfs_create_file("use_debug_keys", 0444, hdev->debugfs, + hdev, &use_debug_keys_fops); + + if (lmp_sc_capable(hdev) || lmp_le_capable(hdev)) + debugfs_create_file("sc_only_mode", 0444, hdev->debugfs, + hdev, &sc_only_mode_fops); + + if (hdev->hw_info) + debugfs_create_file("hardware_info", 0444, hdev->debugfs, + hdev, &hardware_info_fops); + + if (hdev->fw_info) + debugfs_create_file("firmware_info", 0444, hdev->debugfs, + hdev, &firmware_info_fops); +} + +static int inquiry_cache_show(struct seq_file *f, void *p) +{ + struct hci_dev *hdev = f->private; + struct discovery_state *cache = &hdev->discovery; + struct inquiry_entry *e; + + hci_dev_lock(hdev); + + list_for_each_entry(e, &cache->all, all) { + struct inquiry_data *data = &e->data; + seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n", + &data->bdaddr, + data->pscan_rep_mode, data->pscan_period_mode, + data->pscan_mode, data->dev_class[2], + data->dev_class[1], data->dev_class[0], + __le16_to_cpu(data->clock_offset), + data->rssi, data->ssp_mode, e->timestamp); + } + + hci_dev_unlock(hdev); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(inquiry_cache); + +static int link_keys_show(struct seq_file *f, void *ptr) +{ + struct hci_dev *hdev = f->private; + struct link_key *key; + + rcu_read_lock(); + list_for_each_entry_rcu(key, &hdev->link_keys, list) + seq_printf(f, "%pMR %u %*phN %u\n", &key->bdaddr, key->type, + HCI_LINK_KEY_SIZE, key->val, key->pin_len); + rcu_read_unlock(); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(link_keys); + +static int dev_class_show(struct seq_file *f, void *ptr) +{ + struct hci_dev *hdev = f->private; + + hci_dev_lock(hdev); + seq_printf(f, "0x%.2x%.2x%.2x\n", hdev->dev_class[2], + hdev->dev_class[1], hdev->dev_class[0]); + hci_dev_unlock(hdev); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(dev_class); + +static int voice_setting_get(void *data, u64 *val) +{ + struct hci_dev *hdev = data; + + hci_dev_lock(hdev); + *val = hdev->voice_setting; + hci_dev_unlock(hdev); + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(voice_setting_fops, voice_setting_get, + NULL, "0x%4.4llx\n"); + +static ssize_t ssp_debug_mode_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct hci_dev *hdev = file->private_data; + char buf[3]; + + buf[0] = hdev->ssp_debug_mode ? 'Y' : 'N'; + buf[1] = '\n'; + buf[2] = '\0'; + return simple_read_from_buffer(user_buf, count, ppos, buf, 2); +} + +static const struct file_operations ssp_debug_mode_fops = { + .open = simple_open, + .read = ssp_debug_mode_read, + .llseek = default_llseek, +}; + +static int auto_accept_delay_set(void *data, u64 val) +{ + struct hci_dev *hdev = data; + + hci_dev_lock(hdev); + hdev->auto_accept_delay = val; + hci_dev_unlock(hdev); + + return 0; +} + +static int min_encrypt_key_size_set(void *data, u64 val) +{ + struct hci_dev *hdev = data; + + if (val < 1 || val > 16) + return -EINVAL; + + hci_dev_lock(hdev); + hdev->min_enc_key_size = val; + hci_dev_unlock(hdev); + + return 0; +} + +static int min_encrypt_key_size_get(void *data, u64 *val) +{ + struct hci_dev *hdev = data; + + hci_dev_lock(hdev); + *val = hdev->min_enc_key_size; + hci_dev_unlock(hdev); + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(min_encrypt_key_size_fops, + min_encrypt_key_size_get, + min_encrypt_key_size_set, "%llu\n"); + +static int auto_accept_delay_get(void *data, u64 *val) +{ + struct hci_dev *hdev = data; + + hci_dev_lock(hdev); + *val = hdev->auto_accept_delay; + hci_dev_unlock(hdev); + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(auto_accept_delay_fops, auto_accept_delay_get, + auto_accept_delay_set, "%llu\n"); + +static ssize_t force_bredr_smp_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct hci_dev *hdev = file->private_data; + char buf[3]; + + buf[0] = hci_dev_test_flag(hdev, HCI_FORCE_BREDR_SMP) ? 'Y' : 'N'; + buf[1] = '\n'; + buf[2] = '\0'; + return simple_read_from_buffer(user_buf, count, ppos, buf, 2); +} + +static ssize_t force_bredr_smp_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct hci_dev *hdev = file->private_data; + bool enable; + int err; + + err = kstrtobool_from_user(user_buf, count, &enable); + if (err) + return err; + + err = smp_force_bredr(hdev, enable); + if (err) + return err; + + return count; +} + +static const struct file_operations force_bredr_smp_fops = { + .open = simple_open, + .read = force_bredr_smp_read, + .write = force_bredr_smp_write, + .llseek = default_llseek, +}; + +static int idle_timeout_set(void *data, u64 val) +{ + struct hci_dev *hdev = data; + + if (val != 0 && (val < 500 || val > 3600000)) + return -EINVAL; + + hci_dev_lock(hdev); + hdev->idle_timeout = val; + hci_dev_unlock(hdev); + + return 0; +} + +static int idle_timeout_get(void *data, u64 *val) +{ + struct hci_dev *hdev = data; + + hci_dev_lock(hdev); + *val = hdev->idle_timeout; + hci_dev_unlock(hdev); + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(idle_timeout_fops, idle_timeout_get, + idle_timeout_set, "%llu\n"); + +static int sniff_min_interval_set(void *data, u64 val) +{ + struct hci_dev *hdev = data; + + if (val == 0 || val % 2 || val > hdev->sniff_max_interval) + return -EINVAL; + + hci_dev_lock(hdev); + hdev->sniff_min_interval = val; + hci_dev_unlock(hdev); + + return 0; +} + +static int sniff_min_interval_get(void *data, u64 *val) +{ + struct hci_dev *hdev = data; + + hci_dev_lock(hdev); + *val = hdev->sniff_min_interval; + hci_dev_unlock(hdev); + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(sniff_min_interval_fops, sniff_min_interval_get, + sniff_min_interval_set, "%llu\n"); + +static int sniff_max_interval_set(void *data, u64 val) +{ + struct hci_dev *hdev = data; + + if (val == 0 || val % 2 || val < hdev->sniff_min_interval) + return -EINVAL; + + hci_dev_lock(hdev); + hdev->sniff_max_interval = val; + hci_dev_unlock(hdev); + + return 0; +} + +static int sniff_max_interval_get(void *data, u64 *val) +{ + struct hci_dev *hdev = data; + + hci_dev_lock(hdev); + *val = hdev->sniff_max_interval; + hci_dev_unlock(hdev); + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(sniff_max_interval_fops, sniff_max_interval_get, + sniff_max_interval_set, "%llu\n"); + +void hci_debugfs_create_bredr(struct hci_dev *hdev) +{ + debugfs_create_file("inquiry_cache", 0444, hdev->debugfs, hdev, + &inquiry_cache_fops); + debugfs_create_file("link_keys", 0400, hdev->debugfs, hdev, + &link_keys_fops); + debugfs_create_file("dev_class", 0444, hdev->debugfs, hdev, + &dev_class_fops); + debugfs_create_file("voice_setting", 0444, hdev->debugfs, hdev, + &voice_setting_fops); + + /* If the controller does not support BR/EDR Secure Connections + * feature, then the BR/EDR SMP channel shall not be present. + * + * To test this with Bluetooth 4.0 controllers, create a debugfs + * switch that allows forcing BR/EDR SMP support and accepting + * cross-transport pairing on non-AES encrypted connections. + */ + if (!lmp_sc_capable(hdev)) + debugfs_create_file("force_bredr_smp", 0644, hdev->debugfs, + hdev, &force_bredr_smp_fops); + + if (lmp_ssp_capable(hdev)) { + debugfs_create_file("ssp_debug_mode", 0444, hdev->debugfs, + hdev, &ssp_debug_mode_fops); + debugfs_create_file("min_encrypt_key_size", 0644, hdev->debugfs, + hdev, &min_encrypt_key_size_fops); + debugfs_create_file("auto_accept_delay", 0644, hdev->debugfs, + hdev, &auto_accept_delay_fops); + } + + if (lmp_sniff_capable(hdev)) { + debugfs_create_file("idle_timeout", 0644, hdev->debugfs, + hdev, &idle_timeout_fops); + debugfs_create_file("sniff_min_interval", 0644, hdev->debugfs, + hdev, &sniff_min_interval_fops); + debugfs_create_file("sniff_max_interval", 0644, hdev->debugfs, + hdev, &sniff_max_interval_fops); + } +} + +static int identity_show(struct seq_file *f, void *p) +{ + struct hci_dev *hdev = f->private; + bdaddr_t addr; + u8 addr_type; + + hci_dev_lock(hdev); + + hci_copy_identity_address(hdev, &addr, &addr_type); + + seq_printf(f, "%pMR (type %u) %*phN %pMR\n", &addr, addr_type, + 16, hdev->irk, &hdev->rpa); + + hci_dev_unlock(hdev); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(identity); + +static int rpa_timeout_set(void *data, u64 val) +{ + struct hci_dev *hdev = data; + + /* Require the RPA timeout to be at least 30 seconds and at most + * 24 hours. + */ + if (val < 30 || val > (60 * 60 * 24)) + return -EINVAL; + + hci_dev_lock(hdev); + hdev->rpa_timeout = val; + hci_dev_unlock(hdev); + + return 0; +} + +static int rpa_timeout_get(void *data, u64 *val) +{ + struct hci_dev *hdev = data; + + hci_dev_lock(hdev); + *val = hdev->rpa_timeout; + hci_dev_unlock(hdev); + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(rpa_timeout_fops, rpa_timeout_get, + rpa_timeout_set, "%llu\n"); + +static int random_address_show(struct seq_file *f, void *p) +{ + struct hci_dev *hdev = f->private; + + hci_dev_lock(hdev); + seq_printf(f, "%pMR\n", &hdev->random_addr); + hci_dev_unlock(hdev); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(random_address); + +static int static_address_show(struct seq_file *f, void *p) +{ + struct hci_dev *hdev = f->private; + + hci_dev_lock(hdev); + seq_printf(f, "%pMR\n", &hdev->static_addr); + hci_dev_unlock(hdev); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(static_address); + +static ssize_t force_static_address_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct hci_dev *hdev = file->private_data; + char buf[3]; + + buf[0] = hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ? 'Y' : 'N'; + buf[1] = '\n'; + buf[2] = '\0'; + return simple_read_from_buffer(user_buf, count, ppos, buf, 2); +} + +static ssize_t force_static_address_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct hci_dev *hdev = file->private_data; + bool enable; + int err; + + if (test_bit(HCI_UP, &hdev->flags)) + return -EBUSY; + + err = kstrtobool_from_user(user_buf, count, &enable); + if (err) + return err; + + if (enable == hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR)) + return -EALREADY; + + hci_dev_change_flag(hdev, HCI_FORCE_STATIC_ADDR); + + return count; +} + +static const struct file_operations force_static_address_fops = { + .open = simple_open, + .read = force_static_address_read, + .write = force_static_address_write, + .llseek = default_llseek, +}; + +static int white_list_show(struct seq_file *f, void *ptr) +{ + struct hci_dev *hdev = f->private; + struct bdaddr_list *b; + + hci_dev_lock(hdev); + list_for_each_entry(b, &hdev->le_accept_list, list) + seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type); + hci_dev_unlock(hdev); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(white_list); + +static int resolv_list_show(struct seq_file *f, void *ptr) +{ + struct hci_dev *hdev = f->private; + struct bdaddr_list *b; + + hci_dev_lock(hdev); + list_for_each_entry(b, &hdev->le_resolv_list, list) + seq_printf(f, "%pMR (type %u)\n", &b->bdaddr, b->bdaddr_type); + hci_dev_unlock(hdev); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(resolv_list); + +static int identity_resolving_keys_show(struct seq_file *f, void *ptr) +{ + struct hci_dev *hdev = f->private; + struct smp_irk *irk; + + rcu_read_lock(); + list_for_each_entry_rcu(irk, &hdev->identity_resolving_keys, list) { + seq_printf(f, "%pMR (type %u) %*phN %pMR\n", + &irk->bdaddr, irk->addr_type, + 16, irk->val, &irk->rpa); + } + rcu_read_unlock(); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(identity_resolving_keys); + +static int long_term_keys_show(struct seq_file *f, void *ptr) +{ + struct hci_dev *hdev = f->private; + struct smp_ltk *ltk; + + rcu_read_lock(); + list_for_each_entry_rcu(ltk, &hdev->long_term_keys, list) + seq_printf(f, "%pMR (type %u) %u 0x%02x %u %.4x %.16llx %*phN\n", + <k->bdaddr, ltk->bdaddr_type, ltk->authenticated, + ltk->type, ltk->enc_size, __le16_to_cpu(ltk->ediv), + __le64_to_cpu(ltk->rand), 16, ltk->val); + rcu_read_unlock(); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(long_term_keys); + +static int conn_min_interval_set(void *data, u64 val) +{ + struct hci_dev *hdev = data; + + if (val < 0x0006 || val > 0x0c80 || val > hdev->le_conn_max_interval) + return -EINVAL; + + hci_dev_lock(hdev); + hdev->le_conn_min_interval = val; + hci_dev_unlock(hdev); + + return 0; +} + +static int conn_min_interval_get(void *data, u64 *val) +{ + struct hci_dev *hdev = data; + + hci_dev_lock(hdev); + *val = hdev->le_conn_min_interval; + hci_dev_unlock(hdev); + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(conn_min_interval_fops, conn_min_interval_get, + conn_min_interval_set, "%llu\n"); + +static int conn_max_interval_set(void *data, u64 val) +{ + struct hci_dev *hdev = data; + + if (val < 0x0006 || val > 0x0c80 || val < hdev->le_conn_min_interval) + return -EINVAL; + + hci_dev_lock(hdev); + hdev->le_conn_max_interval = val; + hci_dev_unlock(hdev); + + return 0; +} + +static int conn_max_interval_get(void *data, u64 *val) +{ + struct hci_dev *hdev = data; + + hci_dev_lock(hdev); + *val = hdev->le_conn_max_interval; + hci_dev_unlock(hdev); + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(conn_max_interval_fops, conn_max_interval_get, + conn_max_interval_set, "%llu\n"); + +static int conn_latency_set(void *data, u64 val) +{ + struct hci_dev *hdev = data; + + if (val > 0x01f3) + return -EINVAL; + + hci_dev_lock(hdev); + hdev->le_conn_latency = val; + hci_dev_unlock(hdev); + + return 0; +} + +static int conn_latency_get(void *data, u64 *val) +{ + struct hci_dev *hdev = data; + + hci_dev_lock(hdev); + *val = hdev->le_conn_latency; + hci_dev_unlock(hdev); + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(conn_latency_fops, conn_latency_get, + conn_latency_set, "%llu\n"); + +static int supervision_timeout_set(void *data, u64 val) +{ + struct hci_dev *hdev = data; + + if (val < 0x000a || val > 0x0c80) + return -EINVAL; + + hci_dev_lock(hdev); + hdev->le_supv_timeout = val; + hci_dev_unlock(hdev); + + return 0; +} + +static int supervision_timeout_get(void *data, u64 *val) +{ + struct hci_dev *hdev = data; + + hci_dev_lock(hdev); + *val = hdev->le_supv_timeout; + hci_dev_unlock(hdev); + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(supervision_timeout_fops, supervision_timeout_get, + supervision_timeout_set, "%llu\n"); + +static int adv_channel_map_set(void *data, u64 val) +{ + struct hci_dev *hdev = data; + + if (val < 0x01 || val > 0x07) + return -EINVAL; + + hci_dev_lock(hdev); + hdev->le_adv_channel_map = val; + hci_dev_unlock(hdev); + + return 0; +} + +static int adv_channel_map_get(void *data, u64 *val) +{ + struct hci_dev *hdev = data; + + hci_dev_lock(hdev); + *val = hdev->le_adv_channel_map; + hci_dev_unlock(hdev); + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(adv_channel_map_fops, adv_channel_map_get, + adv_channel_map_set, "%llu\n"); + +static int adv_min_interval_set(void *data, u64 val) +{ + struct hci_dev *hdev = data; + + if (val < 0x0020 || val > 0x4000 || val > hdev->le_adv_max_interval) + return -EINVAL; + + hci_dev_lock(hdev); + hdev->le_adv_min_interval = val; + hci_dev_unlock(hdev); + + return 0; +} + +static int adv_min_interval_get(void *data, u64 *val) +{ + struct hci_dev *hdev = data; + + hci_dev_lock(hdev); + *val = hdev->le_adv_min_interval; + hci_dev_unlock(hdev); + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(adv_min_interval_fops, adv_min_interval_get, + adv_min_interval_set, "%llu\n"); + +static int adv_max_interval_set(void *data, u64 val) +{ + struct hci_dev *hdev = data; + + if (val < 0x0020 || val > 0x4000 || val < hdev->le_adv_min_interval) + return -EINVAL; + + hci_dev_lock(hdev); + hdev->le_adv_max_interval = val; + hci_dev_unlock(hdev); + + return 0; +} + +static int adv_max_interval_get(void *data, u64 *val) +{ + struct hci_dev *hdev = data; + + hci_dev_lock(hdev); + *val = hdev->le_adv_max_interval; + hci_dev_unlock(hdev); + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(adv_max_interval_fops, adv_max_interval_get, + adv_max_interval_set, "%llu\n"); + +static int min_key_size_set(void *data, u64 val) +{ + struct hci_dev *hdev = data; + + hci_dev_lock(hdev); + if (val > hdev->le_max_key_size || val < SMP_MIN_ENC_KEY_SIZE) { + hci_dev_unlock(hdev); + return -EINVAL; + } + + hdev->le_min_key_size = val; + hci_dev_unlock(hdev); + + return 0; +} + +static int min_key_size_get(void *data, u64 *val) +{ + struct hci_dev *hdev = data; + + hci_dev_lock(hdev); + *val = hdev->le_min_key_size; + hci_dev_unlock(hdev); + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(min_key_size_fops, min_key_size_get, + min_key_size_set, "%llu\n"); + +static int max_key_size_set(void *data, u64 val) +{ + struct hci_dev *hdev = data; + + hci_dev_lock(hdev); + if (val > SMP_MAX_ENC_KEY_SIZE || val < hdev->le_min_key_size) { + hci_dev_unlock(hdev); + return -EINVAL; + } + + hdev->le_max_key_size = val; + hci_dev_unlock(hdev); + + return 0; +} + +static int max_key_size_get(void *data, u64 *val) +{ + struct hci_dev *hdev = data; + + hci_dev_lock(hdev); + *val = hdev->le_max_key_size; + hci_dev_unlock(hdev); + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(max_key_size_fops, max_key_size_get, + max_key_size_set, "%llu\n"); + +static int auth_payload_timeout_set(void *data, u64 val) +{ + struct hci_dev *hdev = data; + + if (val < 0x0001 || val > 0xffff) + return -EINVAL; + + hci_dev_lock(hdev); + hdev->auth_payload_timeout = val; + hci_dev_unlock(hdev); + + return 0; +} + +static int auth_payload_timeout_get(void *data, u64 *val) +{ + struct hci_dev *hdev = data; + + hci_dev_lock(hdev); + *val = hdev->auth_payload_timeout; + hci_dev_unlock(hdev); + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(auth_payload_timeout_fops, + auth_payload_timeout_get, + auth_payload_timeout_set, "%llu\n"); + +static ssize_t force_no_mitm_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct hci_dev *hdev = file->private_data; + char buf[3]; + + buf[0] = hci_dev_test_flag(hdev, HCI_FORCE_NO_MITM) ? 'Y' : 'N'; + buf[1] = '\n'; + buf[2] = '\0'; + return simple_read_from_buffer(user_buf, count, ppos, buf, 2); +} + +static ssize_t force_no_mitm_write(struct file *file, + const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct hci_dev *hdev = file->private_data; + char buf[32]; + size_t buf_size = min(count, (sizeof(buf) - 1)); + bool enable; + + if (copy_from_user(buf, user_buf, buf_size)) + return -EFAULT; + + buf[buf_size] = '\0'; + if (strtobool(buf, &enable)) + return -EINVAL; + + if (enable == hci_dev_test_flag(hdev, HCI_FORCE_NO_MITM)) + return -EALREADY; + + hci_dev_change_flag(hdev, HCI_FORCE_NO_MITM); + + return count; +} + +static const struct file_operations force_no_mitm_fops = { + .open = simple_open, + .read = force_no_mitm_read, + .write = force_no_mitm_write, + .llseek = default_llseek, +}; + +DEFINE_QUIRK_ATTRIBUTE(quirk_strict_duplicate_filter, + HCI_QUIRK_STRICT_DUPLICATE_FILTER); +DEFINE_QUIRK_ATTRIBUTE(quirk_simultaneous_discovery, + HCI_QUIRK_SIMULTANEOUS_DISCOVERY); + +void hci_debugfs_create_le(struct hci_dev *hdev) +{ + debugfs_create_file("identity", 0400, hdev->debugfs, hdev, + &identity_fops); + debugfs_create_file("rpa_timeout", 0644, hdev->debugfs, hdev, + &rpa_timeout_fops); + debugfs_create_file("random_address", 0444, hdev->debugfs, hdev, + &random_address_fops); + debugfs_create_file("static_address", 0444, hdev->debugfs, hdev, + &static_address_fops); + + /* For controllers with a public address, provide a debug + * option to force the usage of the configured static + * address. By default the public address is used. + */ + if (bacmp(&hdev->bdaddr, BDADDR_ANY)) + debugfs_create_file("force_static_address", 0644, + hdev->debugfs, hdev, + &force_static_address_fops); + + debugfs_create_u8("white_list_size", 0444, hdev->debugfs, + &hdev->le_accept_list_size); + debugfs_create_file("white_list", 0444, hdev->debugfs, hdev, + &white_list_fops); + debugfs_create_u8("resolv_list_size", 0444, hdev->debugfs, + &hdev->le_resolv_list_size); + debugfs_create_file("resolv_list", 0444, hdev->debugfs, hdev, + &resolv_list_fops); + debugfs_create_file("identity_resolving_keys", 0400, hdev->debugfs, + hdev, &identity_resolving_keys_fops); + debugfs_create_file("long_term_keys", 0400, hdev->debugfs, hdev, + &long_term_keys_fops); + debugfs_create_file("conn_min_interval", 0644, hdev->debugfs, hdev, + &conn_min_interval_fops); + debugfs_create_file("conn_max_interval", 0644, hdev->debugfs, hdev, + &conn_max_interval_fops); + debugfs_create_file("conn_latency", 0644, hdev->debugfs, hdev, + &conn_latency_fops); + debugfs_create_file("supervision_timeout", 0644, hdev->debugfs, hdev, + &supervision_timeout_fops); + debugfs_create_file("adv_channel_map", 0644, hdev->debugfs, hdev, + &adv_channel_map_fops); + debugfs_create_file("adv_min_interval", 0644, hdev->debugfs, hdev, + &adv_min_interval_fops); + debugfs_create_file("adv_max_interval", 0644, hdev->debugfs, hdev, + &adv_max_interval_fops); + debugfs_create_u16("discov_interleaved_timeout", 0644, hdev->debugfs, + &hdev->discov_interleaved_timeout); + debugfs_create_file("min_key_size", 0644, hdev->debugfs, hdev, + &min_key_size_fops); + debugfs_create_file("max_key_size", 0644, hdev->debugfs, hdev, + &max_key_size_fops); + debugfs_create_file("auth_payload_timeout", 0644, hdev->debugfs, hdev, + &auth_payload_timeout_fops); + debugfs_create_file("force_no_mitm", 0644, hdev->debugfs, hdev, + &force_no_mitm_fops); + + debugfs_create_file("quirk_strict_duplicate_filter", 0644, + hdev->debugfs, hdev, + &quirk_strict_duplicate_filter_fops); + debugfs_create_file("quirk_simultaneous_discovery", 0644, + hdev->debugfs, hdev, + &quirk_simultaneous_discovery_fops); +} + +void hci_debugfs_create_conn(struct hci_conn *conn) +{ + struct hci_dev *hdev = conn->hdev; + char name[6]; + + if (IS_ERR_OR_NULL(hdev->debugfs) || conn->debugfs) + return; + + snprintf(name, sizeof(name), "%u", conn->handle); + conn->debugfs = debugfs_create_dir(name, hdev->debugfs); +} + +static ssize_t dut_mode_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct hci_dev *hdev = file->private_data; + char buf[3]; + + buf[0] = hci_dev_test_flag(hdev, HCI_DUT_MODE) ? 'Y' : 'N'; + buf[1] = '\n'; + buf[2] = '\0'; + return simple_read_from_buffer(user_buf, count, ppos, buf, 2); +} + +static ssize_t dut_mode_write(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct hci_dev *hdev = file->private_data; + struct sk_buff *skb; + bool enable; + int err; + + if (!test_bit(HCI_UP, &hdev->flags)) + return -ENETDOWN; + + err = kstrtobool_from_user(user_buf, count, &enable); + if (err) + return err; + + if (enable == hci_dev_test_flag(hdev, HCI_DUT_MODE)) + return -EALREADY; + + hci_req_sync_lock(hdev); + if (enable) + skb = __hci_cmd_sync(hdev, HCI_OP_ENABLE_DUT_MODE, 0, NULL, + HCI_CMD_TIMEOUT); + else + skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, + HCI_CMD_TIMEOUT); + hci_req_sync_unlock(hdev); + + if (IS_ERR(skb)) + return PTR_ERR(skb); + + kfree_skb(skb); + + hci_dev_change_flag(hdev, HCI_DUT_MODE); + + return count; +} + +static const struct file_operations dut_mode_fops = { + .open = simple_open, + .read = dut_mode_read, + .write = dut_mode_write, + .llseek = default_llseek, +}; + +static ssize_t vendor_diag_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct hci_dev *hdev = file->private_data; + char buf[3]; + + buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y' : 'N'; + buf[1] = '\n'; + buf[2] = '\0'; + return simple_read_from_buffer(user_buf, count, ppos, buf, 2); +} + +static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct hci_dev *hdev = file->private_data; + bool enable; + int err; + + err = kstrtobool_from_user(user_buf, count, &enable); + if (err) + return err; + + /* When the diagnostic flags are not persistent and the transport + * is not active or in user channel operation, then there is no need + * for the vendor callback. Instead just store the desired value and + * the setting will be programmed when the controller gets powered on. + */ + if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) && + (!test_bit(HCI_RUNNING, &hdev->flags) || + hci_dev_test_flag(hdev, HCI_USER_CHANNEL))) + goto done; + + hci_req_sync_lock(hdev); + err = hdev->set_diag(hdev, enable); + hci_req_sync_unlock(hdev); + + if (err < 0) + return err; + +done: + if (enable) + hci_dev_set_flag(hdev, HCI_VENDOR_DIAG); + else + hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG); + + return count; +} + +static const struct file_operations vendor_diag_fops = { + .open = simple_open, + .read = vendor_diag_read, + .write = vendor_diag_write, + .llseek = default_llseek, +}; + +void hci_debugfs_create_basic(struct hci_dev *hdev) +{ + debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev, + &dut_mode_fops); + + if (hdev->set_diag) + debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev, + &vendor_diag_fops); +} diff --git a/net/bluetooth/hci_debugfs.h b/net/bluetooth/hci_debugfs.h new file mode 100644 index 000000000..9a8a7c93b --- /dev/null +++ b/net/bluetooth/hci_debugfs.h @@ -0,0 +1,53 @@ +/* + BlueZ - Bluetooth protocol stack for Linux + Copyright (C) 2014 Intel Corporation + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License version 2 as + published by the Free Software Foundation; + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + SOFTWARE IS DISCLAIMED. +*/ + +#if IS_ENABLED(CONFIG_BT_DEBUGFS) + +void hci_debugfs_create_common(struct hci_dev *hdev); +void hci_debugfs_create_bredr(struct hci_dev *hdev); +void hci_debugfs_create_le(struct hci_dev *hdev); +void hci_debugfs_create_conn(struct hci_conn *conn); +void hci_debugfs_create_basic(struct hci_dev *hdev); + +#else + +static inline void hci_debugfs_create_common(struct hci_dev *hdev) +{ +} + +static inline void hci_debugfs_create_bredr(struct hci_dev *hdev) +{ +} + +static inline void hci_debugfs_create_le(struct hci_dev *hdev) +{ +} + +static inline void hci_debugfs_create_conn(struct hci_conn *conn) +{ +} + +static inline void hci_debugfs_create_basic(struct hci_dev *hdev) +{ +} + +#endif diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c new file mode 100644 index 000000000..56ecc5f97 --- /dev/null +++ b/net/bluetooth/hci_event.c @@ -0,0 +1,7576 @@ +/* + BlueZ - Bluetooth protocol stack for Linux + Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved. + + Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License version 2 as + published by the Free Software Foundation; + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + SOFTWARE IS DISCLAIMED. +*/ + +/* Bluetooth HCI event handling. */ + +#include <asm/unaligned.h> +#include <linux/crypto.h> +#include <crypto/algapi.h> + +#include <net/bluetooth/bluetooth.h> +#include <net/bluetooth/hci_core.h> +#include <net/bluetooth/mgmt.h> + +#include "hci_request.h" +#include "hci_debugfs.h" +#include "hci_codec.h" +#include "a2mp.h" +#include "amp.h" +#include "smp.h" +#include "msft.h" +#include "eir.h" + +#define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \ + "\x00\x00\x00\x00\x00\x00\x00\x00" + +#define secs_to_jiffies(_secs) msecs_to_jiffies((_secs) * 1000) + +/* Handle HCI Event packets */ + +static void *hci_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb, + u8 ev, size_t len) +{ + void *data; + + data = skb_pull_data(skb, len); + if (!data) + bt_dev_err(hdev, "Malformed Event: 0x%2.2x", ev); + + return data; +} + +static void *hci_cc_skb_pull(struct hci_dev *hdev, struct sk_buff *skb, + u16 op, size_t len) +{ + void *data; + + data = skb_pull_data(skb, len); + if (!data) + bt_dev_err(hdev, "Malformed Command Complete: 0x%4.4x", op); + + return data; +} + +static void *hci_le_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb, + u8 ev, size_t len) +{ + void *data; + + data = skb_pull_data(skb, len); + if (!data) + bt_dev_err(hdev, "Malformed LE Event: 0x%2.2x", ev); + + return data; +} + +static u8 hci_cc_inquiry_cancel(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_status *rp = data; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + /* It is possible that we receive Inquiry Complete event right + * before we receive Inquiry Cancel Command Complete event, in + * which case the latter event should have status of Command + * Disallowed (0x0c). This should not be treated as error, since + * we actually achieve what Inquiry Cancel wants to achieve, + * which is to end the last Inquiry session. + */ + if (rp->status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) { + bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command"); + rp->status = 0x00; + } + + if (rp->status) + return rp->status; + + clear_bit(HCI_INQUIRY, &hdev->flags); + smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */ + wake_up_bit(&hdev->flags, HCI_INQUIRY); + + hci_dev_lock(hdev); + /* Set discovery state to stopped if we're not doing LE active + * scanning. + */ + if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) || + hdev->le_scan_type != LE_SCAN_ACTIVE) + hci_discovery_set_state(hdev, DISCOVERY_STOPPED); + hci_dev_unlock(hdev); + + hci_conn_check_pending(hdev); + + return rp->status; +} + +static u8 hci_cc_periodic_inq(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_status *rp = data; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + hci_dev_set_flag(hdev, HCI_PERIODIC_INQ); + + return rp->status; +} + +static u8 hci_cc_exit_periodic_inq(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_status *rp = data; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + hci_dev_clear_flag(hdev, HCI_PERIODIC_INQ); + + hci_conn_check_pending(hdev); + + return rp->status; +} + +static u8 hci_cc_remote_name_req_cancel(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_status *rp = data; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + return rp->status; +} + +static u8 hci_cc_role_discovery(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_rp_role_discovery *rp = data; + struct hci_conn *conn; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); + if (conn) + conn->role = rp->role; + + hci_dev_unlock(hdev); + + return rp->status; +} + +static u8 hci_cc_read_link_policy(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_rp_read_link_policy *rp = data; + struct hci_conn *conn; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); + if (conn) + conn->link_policy = __le16_to_cpu(rp->policy); + + hci_dev_unlock(hdev); + + return rp->status; +} + +static u8 hci_cc_write_link_policy(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_rp_write_link_policy *rp = data; + struct hci_conn *conn; + void *sent; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LINK_POLICY); + if (!sent) + return rp->status; + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); + if (conn) + conn->link_policy = get_unaligned_le16(sent + 2); + + hci_dev_unlock(hdev); + + return rp->status; +} + +static u8 hci_cc_read_def_link_policy(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_rp_read_def_link_policy *rp = data; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + hdev->link_policy = __le16_to_cpu(rp->policy); + + return rp->status; +} + +static u8 hci_cc_write_def_link_policy(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_status *rp = data; + void *sent; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_LINK_POLICY); + if (!sent) + return rp->status; + + hdev->link_policy = get_unaligned_le16(sent); + + return rp->status; +} + +static u8 hci_cc_reset(struct hci_dev *hdev, void *data, struct sk_buff *skb) +{ + struct hci_ev_status *rp = data; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + clear_bit(HCI_RESET, &hdev->flags); + + if (rp->status) + return rp->status; + + /* Reset all non-persistent flags */ + hci_dev_clear_volatile_flags(hdev); + + hci_discovery_set_state(hdev, DISCOVERY_STOPPED); + + hdev->inq_tx_power = HCI_TX_POWER_INVALID; + hdev->adv_tx_power = HCI_TX_POWER_INVALID; + + memset(hdev->adv_data, 0, sizeof(hdev->adv_data)); + hdev->adv_data_len = 0; + + memset(hdev->scan_rsp_data, 0, sizeof(hdev->scan_rsp_data)); + hdev->scan_rsp_data_len = 0; + + hdev->le_scan_type = LE_SCAN_PASSIVE; + + hdev->ssp_debug_mode = 0; + + hci_bdaddr_list_clear(&hdev->le_accept_list); + hci_bdaddr_list_clear(&hdev->le_resolv_list); + + return rp->status; +} + +static u8 hci_cc_read_stored_link_key(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_rp_read_stored_link_key *rp = data; + struct hci_cp_read_stored_link_key *sent; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + sent = hci_sent_cmd_data(hdev, HCI_OP_READ_STORED_LINK_KEY); + if (!sent) + return rp->status; + + if (!rp->status && sent->read_all == 0x01) { + hdev->stored_max_keys = le16_to_cpu(rp->max_keys); + hdev->stored_num_keys = le16_to_cpu(rp->num_keys); + } + + return rp->status; +} + +static u8 hci_cc_delete_stored_link_key(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_rp_delete_stored_link_key *rp = data; + u16 num_keys; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + num_keys = le16_to_cpu(rp->num_keys); + + if (num_keys <= hdev->stored_num_keys) + hdev->stored_num_keys -= num_keys; + else + hdev->stored_num_keys = 0; + + return rp->status; +} + +static u8 hci_cc_write_local_name(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_status *rp = data; + void *sent; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LOCAL_NAME); + if (!sent) + return rp->status; + + hci_dev_lock(hdev); + + if (hci_dev_test_flag(hdev, HCI_MGMT)) + mgmt_set_local_name_complete(hdev, sent, rp->status); + else if (!rp->status) + memcpy(hdev->dev_name, sent, HCI_MAX_NAME_LENGTH); + + hci_dev_unlock(hdev); + + return rp->status; +} + +static u8 hci_cc_read_local_name(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_rp_read_local_name *rp = data; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + if (hci_dev_test_flag(hdev, HCI_SETUP) || + hci_dev_test_flag(hdev, HCI_CONFIG)) + memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH); + + return rp->status; +} + +static u8 hci_cc_write_auth_enable(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_status *rp = data; + void *sent; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_ENABLE); + if (!sent) + return rp->status; + + hci_dev_lock(hdev); + + if (!rp->status) { + __u8 param = *((__u8 *) sent); + + if (param == AUTH_ENABLED) + set_bit(HCI_AUTH, &hdev->flags); + else + clear_bit(HCI_AUTH, &hdev->flags); + } + + if (hci_dev_test_flag(hdev, HCI_MGMT)) + mgmt_auth_enable_complete(hdev, rp->status); + + hci_dev_unlock(hdev); + + return rp->status; +} + +static u8 hci_cc_write_encrypt_mode(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_status *rp = data; + __u8 param; + void *sent; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_ENCRYPT_MODE); + if (!sent) + return rp->status; + + param = *((__u8 *) sent); + + if (param) + set_bit(HCI_ENCRYPT, &hdev->flags); + else + clear_bit(HCI_ENCRYPT, &hdev->flags); + + return rp->status; +} + +static u8 hci_cc_write_scan_enable(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_status *rp = data; + __u8 param; + void *sent; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SCAN_ENABLE); + if (!sent) + return rp->status; + + param = *((__u8 *) sent); + + hci_dev_lock(hdev); + + if (rp->status) { + hdev->discov_timeout = 0; + goto done; + } + + if (param & SCAN_INQUIRY) + set_bit(HCI_ISCAN, &hdev->flags); + else + clear_bit(HCI_ISCAN, &hdev->flags); + + if (param & SCAN_PAGE) + set_bit(HCI_PSCAN, &hdev->flags); + else + clear_bit(HCI_PSCAN, &hdev->flags); + +done: + hci_dev_unlock(hdev); + + return rp->status; +} + +static u8 hci_cc_set_event_filter(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_status *rp = data; + struct hci_cp_set_event_filter *cp; + void *sent; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + sent = hci_sent_cmd_data(hdev, HCI_OP_SET_EVENT_FLT); + if (!sent) + return rp->status; + + cp = (struct hci_cp_set_event_filter *)sent; + + if (cp->flt_type == HCI_FLT_CLEAR_ALL) + hci_dev_clear_flag(hdev, HCI_EVENT_FILTER_CONFIGURED); + else + hci_dev_set_flag(hdev, HCI_EVENT_FILTER_CONFIGURED); + + return rp->status; +} + +static u8 hci_cc_read_class_of_dev(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_rp_read_class_of_dev *rp = data; + + if (WARN_ON(!hdev)) + return HCI_ERROR_UNSPECIFIED; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + memcpy(hdev->dev_class, rp->dev_class, 3); + + bt_dev_dbg(hdev, "class 0x%.2x%.2x%.2x", hdev->dev_class[2], + hdev->dev_class[1], hdev->dev_class[0]); + + return rp->status; +} + +static u8 hci_cc_write_class_of_dev(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_status *rp = data; + void *sent; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_CLASS_OF_DEV); + if (!sent) + return rp->status; + + hci_dev_lock(hdev); + + if (!rp->status) + memcpy(hdev->dev_class, sent, 3); + + if (hci_dev_test_flag(hdev, HCI_MGMT)) + mgmt_set_class_of_dev_complete(hdev, sent, rp->status); + + hci_dev_unlock(hdev); + + return rp->status; +} + +static u8 hci_cc_read_voice_setting(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_rp_read_voice_setting *rp = data; + __u16 setting; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + setting = __le16_to_cpu(rp->voice_setting); + + if (hdev->voice_setting == setting) + return rp->status; + + hdev->voice_setting = setting; + + bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting); + + if (hdev->notify) + hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); + + return rp->status; +} + +static u8 hci_cc_write_voice_setting(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_status *rp = data; + __u16 setting; + void *sent; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_VOICE_SETTING); + if (!sent) + return rp->status; + + setting = get_unaligned_le16(sent); + + if (hdev->voice_setting == setting) + return rp->status; + + hdev->voice_setting = setting; + + bt_dev_dbg(hdev, "voice setting 0x%4.4x", setting); + + if (hdev->notify) + hdev->notify(hdev, HCI_NOTIFY_VOICE_SETTING); + + return rp->status; +} + +static u8 hci_cc_read_num_supported_iac(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_rp_read_num_supported_iac *rp = data; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + hdev->num_iac = rp->num_iac; + + bt_dev_dbg(hdev, "num iac %d", hdev->num_iac); + + return rp->status; +} + +static u8 hci_cc_write_ssp_mode(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_status *rp = data; + struct hci_cp_write_ssp_mode *sent; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_MODE); + if (!sent) + return rp->status; + + hci_dev_lock(hdev); + + if (!rp->status) { + if (sent->mode) + hdev->features[1][0] |= LMP_HOST_SSP; + else + hdev->features[1][0] &= ~LMP_HOST_SSP; + } + + if (!rp->status) { + if (sent->mode) + hci_dev_set_flag(hdev, HCI_SSP_ENABLED); + else + hci_dev_clear_flag(hdev, HCI_SSP_ENABLED); + } + + hci_dev_unlock(hdev); + + return rp->status; +} + +static u8 hci_cc_write_sc_support(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_status *rp = data; + struct hci_cp_write_sc_support *sent; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SC_SUPPORT); + if (!sent) + return rp->status; + + hci_dev_lock(hdev); + + if (!rp->status) { + if (sent->support) + hdev->features[1][0] |= LMP_HOST_SC; + else + hdev->features[1][0] &= ~LMP_HOST_SC; + } + + if (!hci_dev_test_flag(hdev, HCI_MGMT) && !rp->status) { + if (sent->support) + hci_dev_set_flag(hdev, HCI_SC_ENABLED); + else + hci_dev_clear_flag(hdev, HCI_SC_ENABLED); + } + + hci_dev_unlock(hdev); + + return rp->status; +} + +static u8 hci_cc_read_local_version(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_rp_read_local_version *rp = data; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + if (hci_dev_test_flag(hdev, HCI_SETUP) || + hci_dev_test_flag(hdev, HCI_CONFIG)) { + hdev->hci_ver = rp->hci_ver; + hdev->hci_rev = __le16_to_cpu(rp->hci_rev); + hdev->lmp_ver = rp->lmp_ver; + hdev->manufacturer = __le16_to_cpu(rp->manufacturer); + hdev->lmp_subver = __le16_to_cpu(rp->lmp_subver); + } + + return rp->status; +} + +static u8 hci_cc_read_enc_key_size(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_rp_read_enc_key_size *rp = data; + struct hci_conn *conn; + u16 handle; + u8 status = rp->status; + + bt_dev_dbg(hdev, "status 0x%2.2x", status); + + handle = le16_to_cpu(rp->handle); + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_handle(hdev, handle); + if (!conn) { + status = 0xFF; + goto done; + } + + /* While unexpected, the read_enc_key_size command may fail. The most + * secure approach is to then assume the key size is 0 to force a + * disconnection. + */ + if (status) { + bt_dev_err(hdev, "failed to read key size for handle %u", + handle); + conn->enc_key_size = 0; + } else { + conn->enc_key_size = rp->key_size; + status = 0; + + if (conn->enc_key_size < hdev->min_enc_key_size) { + /* As slave role, the conn->state has been set to + * BT_CONNECTED and l2cap conn req might not be received + * yet, at this moment the l2cap layer almost does + * nothing with the non-zero status. + * So we also clear encrypt related bits, and then the + * handler of l2cap conn req will get the right secure + * state at a later time. + */ + status = HCI_ERROR_AUTH_FAILURE; + clear_bit(HCI_CONN_ENCRYPT, &conn->flags); + clear_bit(HCI_CONN_AES_CCM, &conn->flags); + } + } + + hci_encrypt_cfm(conn, status); + +done: + hci_dev_unlock(hdev); + + return status; +} + +static u8 hci_cc_read_local_commands(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_rp_read_local_commands *rp = data; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + if (hci_dev_test_flag(hdev, HCI_SETUP) || + hci_dev_test_flag(hdev, HCI_CONFIG)) + memcpy(hdev->commands, rp->commands, sizeof(hdev->commands)); + + return rp->status; +} + +static u8 hci_cc_read_auth_payload_timeout(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_rp_read_auth_payload_to *rp = data; + struct hci_conn *conn; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); + if (conn) + conn->auth_payload_timeout = __le16_to_cpu(rp->timeout); + + hci_dev_unlock(hdev); + + return rp->status; +} + +static u8 hci_cc_write_auth_payload_timeout(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_rp_write_auth_payload_to *rp = data; + struct hci_conn *conn; + void *sent; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO); + if (!sent) + return rp->status; + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); + if (conn) + conn->auth_payload_timeout = get_unaligned_le16(sent + 2); + + hci_dev_unlock(hdev); + + return rp->status; +} + +static u8 hci_cc_read_local_features(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_rp_read_local_features *rp = data; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + memcpy(hdev->features, rp->features, 8); + + /* Adjust default settings according to features + * supported by device. */ + + if (hdev->features[0][0] & LMP_3SLOT) + hdev->pkt_type |= (HCI_DM3 | HCI_DH3); + + if (hdev->features[0][0] & LMP_5SLOT) + hdev->pkt_type |= (HCI_DM5 | HCI_DH5); + + if (hdev->features[0][1] & LMP_HV2) { + hdev->pkt_type |= (HCI_HV2); + hdev->esco_type |= (ESCO_HV2); + } + + if (hdev->features[0][1] & LMP_HV3) { + hdev->pkt_type |= (HCI_HV3); + hdev->esco_type |= (ESCO_HV3); + } + + if (lmp_esco_capable(hdev)) + hdev->esco_type |= (ESCO_EV3); + + if (hdev->features[0][4] & LMP_EV4) + hdev->esco_type |= (ESCO_EV4); + + if (hdev->features[0][4] & LMP_EV5) + hdev->esco_type |= (ESCO_EV5); + + if (hdev->features[0][5] & LMP_EDR_ESCO_2M) + hdev->esco_type |= (ESCO_2EV3); + + if (hdev->features[0][5] & LMP_EDR_ESCO_3M) + hdev->esco_type |= (ESCO_3EV3); + + if (hdev->features[0][5] & LMP_EDR_3S_ESCO) + hdev->esco_type |= (ESCO_2EV5 | ESCO_3EV5); + + return rp->status; +} + +static u8 hci_cc_read_local_ext_features(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_rp_read_local_ext_features *rp = data; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + if (hdev->max_page < rp->max_page) { + if (test_bit(HCI_QUIRK_BROKEN_LOCAL_EXT_FEATURES_PAGE_2, + &hdev->quirks)) + bt_dev_warn(hdev, "broken local ext features page 2"); + else + hdev->max_page = rp->max_page; + } + + if (rp->page < HCI_MAX_PAGES) + memcpy(hdev->features[rp->page], rp->features, 8); + + return rp->status; +} + +static u8 hci_cc_read_flow_control_mode(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_rp_read_flow_control_mode *rp = data; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + hdev->flow_ctl_mode = rp->mode; + + return rp->status; +} + +static u8 hci_cc_read_buffer_size(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_rp_read_buffer_size *rp = data; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + hdev->acl_mtu = __le16_to_cpu(rp->acl_mtu); + hdev->sco_mtu = rp->sco_mtu; + hdev->acl_pkts = __le16_to_cpu(rp->acl_max_pkt); + hdev->sco_pkts = __le16_to_cpu(rp->sco_max_pkt); + + if (test_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks)) { + hdev->sco_mtu = 64; + hdev->sco_pkts = 8; + } + + hdev->acl_cnt = hdev->acl_pkts; + hdev->sco_cnt = hdev->sco_pkts; + + BT_DBG("%s acl mtu %d:%d sco mtu %d:%d", hdev->name, hdev->acl_mtu, + hdev->acl_pkts, hdev->sco_mtu, hdev->sco_pkts); + + return rp->status; +} + +static u8 hci_cc_read_bd_addr(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_rp_read_bd_addr *rp = data; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + if (test_bit(HCI_INIT, &hdev->flags)) + bacpy(&hdev->bdaddr, &rp->bdaddr); + + if (hci_dev_test_flag(hdev, HCI_SETUP)) + bacpy(&hdev->setup_addr, &rp->bdaddr); + + return rp->status; +} + +static u8 hci_cc_read_local_pairing_opts(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_rp_read_local_pairing_opts *rp = data; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + if (hci_dev_test_flag(hdev, HCI_SETUP) || + hci_dev_test_flag(hdev, HCI_CONFIG)) { + hdev->pairing_opts = rp->pairing_opts; + hdev->max_enc_key_size = rp->max_key_size; + } + + return rp->status; +} + +static u8 hci_cc_read_page_scan_activity(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_rp_read_page_scan_activity *rp = data; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + if (test_bit(HCI_INIT, &hdev->flags)) { + hdev->page_scan_interval = __le16_to_cpu(rp->interval); + hdev->page_scan_window = __le16_to_cpu(rp->window); + } + + return rp->status; +} + +static u8 hci_cc_write_page_scan_activity(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_status *rp = data; + struct hci_cp_write_page_scan_activity *sent; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY); + if (!sent) + return rp->status; + + hdev->page_scan_interval = __le16_to_cpu(sent->interval); + hdev->page_scan_window = __le16_to_cpu(sent->window); + + return rp->status; +} + +static u8 hci_cc_read_page_scan_type(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_rp_read_page_scan_type *rp = data; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + if (test_bit(HCI_INIT, &hdev->flags)) + hdev->page_scan_type = rp->type; + + return rp->status; +} + +static u8 hci_cc_write_page_scan_type(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_status *rp = data; + u8 *type; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + type = hci_sent_cmd_data(hdev, HCI_OP_WRITE_PAGE_SCAN_TYPE); + if (type) + hdev->page_scan_type = *type; + + return rp->status; +} + +static u8 hci_cc_read_data_block_size(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_rp_read_data_block_size *rp = data; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + hdev->block_mtu = __le16_to_cpu(rp->max_acl_len); + hdev->block_len = __le16_to_cpu(rp->block_len); + hdev->num_blocks = __le16_to_cpu(rp->num_blocks); + + hdev->block_cnt = hdev->num_blocks; + + BT_DBG("%s blk mtu %d cnt %d len %d", hdev->name, hdev->block_mtu, + hdev->block_cnt, hdev->block_len); + + return rp->status; +} + +static u8 hci_cc_read_clock(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_rp_read_clock *rp = data; + struct hci_cp_read_clock *cp; + struct hci_conn *conn; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + hci_dev_lock(hdev); + + cp = hci_sent_cmd_data(hdev, HCI_OP_READ_CLOCK); + if (!cp) + goto unlock; + + if (cp->which == 0x00) { + hdev->clock = le32_to_cpu(rp->clock); + goto unlock; + } + + conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); + if (conn) { + conn->clock = le32_to_cpu(rp->clock); + conn->clock_accuracy = le16_to_cpu(rp->accuracy); + } + +unlock: + hci_dev_unlock(hdev); + return rp->status; +} + +static u8 hci_cc_read_local_amp_info(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_rp_read_local_amp_info *rp = data; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + hdev->amp_status = rp->amp_status; + hdev->amp_total_bw = __le32_to_cpu(rp->total_bw); + hdev->amp_max_bw = __le32_to_cpu(rp->max_bw); + hdev->amp_min_latency = __le32_to_cpu(rp->min_latency); + hdev->amp_max_pdu = __le32_to_cpu(rp->max_pdu); + hdev->amp_type = rp->amp_type; + hdev->amp_pal_cap = __le16_to_cpu(rp->pal_cap); + hdev->amp_assoc_size = __le16_to_cpu(rp->max_assoc_size); + hdev->amp_be_flush_to = __le32_to_cpu(rp->be_flush_to); + hdev->amp_max_flush_to = __le32_to_cpu(rp->max_flush_to); + + return rp->status; +} + +static u8 hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_rp_read_inq_rsp_tx_power *rp = data; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + hdev->inq_tx_power = rp->tx_power; + + return rp->status; +} + +static u8 hci_cc_read_def_err_data_reporting(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_rp_read_def_err_data_reporting *rp = data; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + hdev->err_data_reporting = rp->err_data_reporting; + + return rp->status; +} + +static u8 hci_cc_write_def_err_data_reporting(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_status *rp = data; + struct hci_cp_write_def_err_data_reporting *cp; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING); + if (!cp) + return rp->status; + + hdev->err_data_reporting = cp->err_data_reporting; + + return rp->status; +} + +static u8 hci_cc_pin_code_reply(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_rp_pin_code_reply *rp = data; + struct hci_cp_pin_code_reply *cp; + struct hci_conn *conn; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + hci_dev_lock(hdev); + + if (hci_dev_test_flag(hdev, HCI_MGMT)) + mgmt_pin_code_reply_complete(hdev, &rp->bdaddr, rp->status); + + if (rp->status) + goto unlock; + + cp = hci_sent_cmd_data(hdev, HCI_OP_PIN_CODE_REPLY); + if (!cp) + goto unlock; + + conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); + if (conn) + conn->pin_length = cp->pin_len; + +unlock: + hci_dev_unlock(hdev); + return rp->status; +} + +static u8 hci_cc_pin_code_neg_reply(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_rp_pin_code_neg_reply *rp = data; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + hci_dev_lock(hdev); + + if (hci_dev_test_flag(hdev, HCI_MGMT)) + mgmt_pin_code_neg_reply_complete(hdev, &rp->bdaddr, + rp->status); + + hci_dev_unlock(hdev); + + return rp->status; +} + +static u8 hci_cc_le_read_buffer_size(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_rp_le_read_buffer_size *rp = data; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + hdev->le_mtu = __le16_to_cpu(rp->le_mtu); + hdev->le_pkts = rp->le_max_pkt; + + hdev->le_cnt = hdev->le_pkts; + + BT_DBG("%s le mtu %d:%d", hdev->name, hdev->le_mtu, hdev->le_pkts); + + return rp->status; +} + +static u8 hci_cc_le_read_local_features(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_rp_le_read_local_features *rp = data; + + BT_DBG("%s status 0x%2.2x", hdev->name, rp->status); + + if (rp->status) + return rp->status; + + memcpy(hdev->le_features, rp->features, 8); + + return rp->status; +} + +static u8 hci_cc_le_read_adv_tx_power(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_rp_le_read_adv_tx_power *rp = data; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + hdev->adv_tx_power = rp->tx_power; + + return rp->status; +} + +static u8 hci_cc_user_confirm_reply(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_rp_user_confirm_reply *rp = data; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + hci_dev_lock(hdev); + + if (hci_dev_test_flag(hdev, HCI_MGMT)) + mgmt_user_confirm_reply_complete(hdev, &rp->bdaddr, ACL_LINK, 0, + rp->status); + + hci_dev_unlock(hdev); + + return rp->status; +} + +static u8 hci_cc_user_confirm_neg_reply(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_rp_user_confirm_reply *rp = data; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + hci_dev_lock(hdev); + + if (hci_dev_test_flag(hdev, HCI_MGMT)) + mgmt_user_confirm_neg_reply_complete(hdev, &rp->bdaddr, + ACL_LINK, 0, rp->status); + + hci_dev_unlock(hdev); + + return rp->status; +} + +static u8 hci_cc_user_passkey_reply(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_rp_user_confirm_reply *rp = data; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + hci_dev_lock(hdev); + + if (hci_dev_test_flag(hdev, HCI_MGMT)) + mgmt_user_passkey_reply_complete(hdev, &rp->bdaddr, ACL_LINK, + 0, rp->status); + + hci_dev_unlock(hdev); + + return rp->status; +} + +static u8 hci_cc_user_passkey_neg_reply(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_rp_user_confirm_reply *rp = data; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + hci_dev_lock(hdev); + + if (hci_dev_test_flag(hdev, HCI_MGMT)) + mgmt_user_passkey_neg_reply_complete(hdev, &rp->bdaddr, + ACL_LINK, 0, rp->status); + + hci_dev_unlock(hdev); + + return rp->status; +} + +static u8 hci_cc_read_local_oob_data(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_rp_read_local_oob_data *rp = data; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + return rp->status; +} + +static u8 hci_cc_read_local_oob_ext_data(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_rp_read_local_oob_ext_data *rp = data; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + return rp->status; +} + +static u8 hci_cc_le_set_random_addr(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_status *rp = data; + bdaddr_t *sent; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_RANDOM_ADDR); + if (!sent) + return rp->status; + + hci_dev_lock(hdev); + + bacpy(&hdev->random_addr, sent); + + if (!bacmp(&hdev->rpa, sent)) { + hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED); + queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, + secs_to_jiffies(hdev->rpa_timeout)); + } + + hci_dev_unlock(hdev); + + return rp->status; +} + +static u8 hci_cc_le_set_default_phy(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_status *rp = data; + struct hci_cp_le_set_default_phy *cp; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_DEFAULT_PHY); + if (!cp) + return rp->status; + + hci_dev_lock(hdev); + + hdev->le_tx_def_phys = cp->tx_phys; + hdev->le_rx_def_phys = cp->rx_phys; + + hci_dev_unlock(hdev); + + return rp->status; +} + +static u8 hci_cc_le_set_adv_set_random_addr(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_status *rp = data; + struct hci_cp_le_set_adv_set_rand_addr *cp; + struct adv_info *adv; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR); + /* Update only in case the adv instance since handle 0x00 shall be using + * HCI_OP_LE_SET_RANDOM_ADDR since that allows both extended and + * non-extended adverting. + */ + if (!cp || !cp->handle) + return rp->status; + + hci_dev_lock(hdev); + + adv = hci_find_adv_instance(hdev, cp->handle); + if (adv) { + bacpy(&adv->random_addr, &cp->bdaddr); + if (!bacmp(&hdev->rpa, &cp->bdaddr)) { + adv->rpa_expired = false; + queue_delayed_work(hdev->workqueue, + &adv->rpa_expired_cb, + secs_to_jiffies(hdev->rpa_timeout)); + } + } + + hci_dev_unlock(hdev); + + return rp->status; +} + +static u8 hci_cc_le_remove_adv_set(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_status *rp = data; + u8 *instance; + int err; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + instance = hci_sent_cmd_data(hdev, HCI_OP_LE_REMOVE_ADV_SET); + if (!instance) + return rp->status; + + hci_dev_lock(hdev); + + err = hci_remove_adv_instance(hdev, *instance); + if (!err) + mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd), hdev, + *instance); + + hci_dev_unlock(hdev); + + return rp->status; +} + +static u8 hci_cc_le_clear_adv_sets(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_status *rp = data; + struct adv_info *adv, *n; + int err; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + if (!hci_sent_cmd_data(hdev, HCI_OP_LE_CLEAR_ADV_SETS)) + return rp->status; + + hci_dev_lock(hdev); + + list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) { + u8 instance = adv->instance; + + err = hci_remove_adv_instance(hdev, instance); + if (!err) + mgmt_advertising_removed(hci_skb_sk(hdev->sent_cmd), + hdev, instance); + } + + hci_dev_unlock(hdev); + + return rp->status; +} + +static u8 hci_cc_le_read_transmit_power(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_rp_le_read_transmit_power *rp = data; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + hdev->min_le_tx_power = rp->min_le_tx_power; + hdev->max_le_tx_power = rp->max_le_tx_power; + + return rp->status; +} + +static u8 hci_cc_le_set_privacy_mode(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_status *rp = data; + struct hci_cp_le_set_privacy_mode *cp; + struct hci_conn_params *params; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PRIVACY_MODE); + if (!cp) + return rp->status; + + hci_dev_lock(hdev); + + params = hci_conn_params_lookup(hdev, &cp->bdaddr, cp->bdaddr_type); + if (params) + WRITE_ONCE(params->privacy_mode, cp->mode); + + hci_dev_unlock(hdev); + + return rp->status; +} + +static u8 hci_cc_le_set_adv_enable(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_status *rp = data; + __u8 *sent; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_ENABLE); + if (!sent) + return rp->status; + + hci_dev_lock(hdev); + + /* If we're doing connection initiation as peripheral. Set a + * timeout in case something goes wrong. + */ + if (*sent) { + struct hci_conn *conn; + + hci_dev_set_flag(hdev, HCI_LE_ADV); + + conn = hci_lookup_le_connect(hdev); + if (conn) + queue_delayed_work(hdev->workqueue, + &conn->le_conn_timeout, + conn->conn_timeout); + } else { + hci_dev_clear_flag(hdev, HCI_LE_ADV); + } + + hci_dev_unlock(hdev); + + return rp->status; +} + +static u8 hci_cc_le_set_ext_adv_enable(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_cp_le_set_ext_adv_enable *cp; + struct hci_cp_ext_adv_set *set; + struct adv_info *adv = NULL, *n; + struct hci_ev_status *rp = data; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE); + if (!cp) + return rp->status; + + set = (void *)cp->data; + + hci_dev_lock(hdev); + + if (cp->num_of_sets) + adv = hci_find_adv_instance(hdev, set->handle); + + if (cp->enable) { + struct hci_conn *conn; + + hci_dev_set_flag(hdev, HCI_LE_ADV); + + if (adv) + adv->enabled = true; + + conn = hci_lookup_le_connect(hdev); + if (conn) + queue_delayed_work(hdev->workqueue, + &conn->le_conn_timeout, + conn->conn_timeout); + } else { + if (cp->num_of_sets) { + if (adv) + adv->enabled = false; + + /* If just one instance was disabled check if there are + * any other instance enabled before clearing HCI_LE_ADV + */ + list_for_each_entry_safe(adv, n, &hdev->adv_instances, + list) { + if (adv->enabled) + goto unlock; + } + } else { + /* All instances shall be considered disabled */ + list_for_each_entry_safe(adv, n, &hdev->adv_instances, + list) + adv->enabled = false; + } + + hci_dev_clear_flag(hdev, HCI_LE_ADV); + } + +unlock: + hci_dev_unlock(hdev); + return rp->status; +} + +static u8 hci_cc_le_set_scan_param(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_cp_le_set_scan_param *cp; + struct hci_ev_status *rp = data; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_PARAM); + if (!cp) + return rp->status; + + hci_dev_lock(hdev); + + hdev->le_scan_type = cp->type; + + hci_dev_unlock(hdev); + + return rp->status; +} + +static u8 hci_cc_le_set_ext_scan_param(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_cp_le_set_ext_scan_params *cp; + struct hci_ev_status *rp = data; + struct hci_cp_le_scan_phy_params *phy_param; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS); + if (!cp) + return rp->status; + + phy_param = (void *)cp->data; + + hci_dev_lock(hdev); + + hdev->le_scan_type = phy_param->type; + + hci_dev_unlock(hdev); + + return rp->status; +} + +static bool has_pending_adv_report(struct hci_dev *hdev) +{ + struct discovery_state *d = &hdev->discovery; + + return bacmp(&d->last_adv_addr, BDADDR_ANY); +} + +static void clear_pending_adv_report(struct hci_dev *hdev) +{ + struct discovery_state *d = &hdev->discovery; + + bacpy(&d->last_adv_addr, BDADDR_ANY); + d->last_adv_data_len = 0; +} + +static void store_pending_adv_report(struct hci_dev *hdev, bdaddr_t *bdaddr, + u8 bdaddr_type, s8 rssi, u32 flags, + u8 *data, u8 len) +{ + struct discovery_state *d = &hdev->discovery; + + if (len > HCI_MAX_AD_LENGTH) + return; + + bacpy(&d->last_adv_addr, bdaddr); + d->last_adv_addr_type = bdaddr_type; + d->last_adv_rssi = rssi; + d->last_adv_flags = flags; + memcpy(d->last_adv_data, data, len); + d->last_adv_data_len = len; +} + +static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable) +{ + hci_dev_lock(hdev); + + switch (enable) { + case LE_SCAN_ENABLE: + hci_dev_set_flag(hdev, HCI_LE_SCAN); + if (hdev->le_scan_type == LE_SCAN_ACTIVE) + clear_pending_adv_report(hdev); + if (hci_dev_test_flag(hdev, HCI_MESH)) + hci_discovery_set_state(hdev, DISCOVERY_FINDING); + break; + + case LE_SCAN_DISABLE: + /* We do this here instead of when setting DISCOVERY_STOPPED + * since the latter would potentially require waiting for + * inquiry to stop too. + */ + if (has_pending_adv_report(hdev)) { + struct discovery_state *d = &hdev->discovery; + + mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK, + d->last_adv_addr_type, NULL, + d->last_adv_rssi, d->last_adv_flags, + d->last_adv_data, + d->last_adv_data_len, NULL, 0, 0); + } + + /* Cancel this timer so that we don't try to disable scanning + * when it's already disabled. + */ + cancel_delayed_work(&hdev->le_scan_disable); + + hci_dev_clear_flag(hdev, HCI_LE_SCAN); + + /* The HCI_LE_SCAN_INTERRUPTED flag indicates that we + * interrupted scanning due to a connect request. Mark + * therefore discovery as stopped. + */ + if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED)) + hci_discovery_set_state(hdev, DISCOVERY_STOPPED); + else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) && + hdev->discovery.state == DISCOVERY_FINDING) + queue_work(hdev->workqueue, &hdev->reenable_adv_work); + + break; + + default: + bt_dev_err(hdev, "use of reserved LE_Scan_Enable param %d", + enable); + break; + } + + hci_dev_unlock(hdev); +} + +static u8 hci_cc_le_set_scan_enable(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_cp_le_set_scan_enable *cp; + struct hci_ev_status *rp = data; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_SCAN_ENABLE); + if (!cp) + return rp->status; + + le_set_scan_enable_complete(hdev, cp->enable); + + return rp->status; +} + +static u8 hci_cc_le_set_ext_scan_enable(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_cp_le_set_ext_scan_enable *cp; + struct hci_ev_status *rp = data; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE); + if (!cp) + return rp->status; + + le_set_scan_enable_complete(hdev, cp->enable); + + return rp->status; +} + +static u8 hci_cc_le_read_num_adv_sets(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_rp_le_read_num_supported_adv_sets *rp = data; + + bt_dev_dbg(hdev, "status 0x%2.2x No of Adv sets %u", rp->status, + rp->num_of_sets); + + if (rp->status) + return rp->status; + + hdev->le_num_of_adv_sets = rp->num_of_sets; + + return rp->status; +} + +static u8 hci_cc_le_read_accept_list_size(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_rp_le_read_accept_list_size *rp = data; + + bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size); + + if (rp->status) + return rp->status; + + hdev->le_accept_list_size = rp->size; + + return rp->status; +} + +static u8 hci_cc_le_clear_accept_list(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_status *rp = data; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + hci_dev_lock(hdev); + hci_bdaddr_list_clear(&hdev->le_accept_list); + hci_dev_unlock(hdev); + + return rp->status; +} + +static u8 hci_cc_le_add_to_accept_list(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_cp_le_add_to_accept_list *sent; + struct hci_ev_status *rp = data; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST); + if (!sent) + return rp->status; + + hci_dev_lock(hdev); + hci_bdaddr_list_add(&hdev->le_accept_list, &sent->bdaddr, + sent->bdaddr_type); + hci_dev_unlock(hdev); + + return rp->status; +} + +static u8 hci_cc_le_del_from_accept_list(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_cp_le_del_from_accept_list *sent; + struct hci_ev_status *rp = data; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST); + if (!sent) + return rp->status; + + hci_dev_lock(hdev); + hci_bdaddr_list_del(&hdev->le_accept_list, &sent->bdaddr, + sent->bdaddr_type); + hci_dev_unlock(hdev); + + return rp->status; +} + +static u8 hci_cc_le_read_supported_states(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_rp_le_read_supported_states *rp = data; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + memcpy(hdev->le_states, rp->le_states, 8); + + return rp->status; +} + +static u8 hci_cc_le_read_def_data_len(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_rp_le_read_def_data_len *rp = data; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + hdev->le_def_tx_len = le16_to_cpu(rp->tx_len); + hdev->le_def_tx_time = le16_to_cpu(rp->tx_time); + + return rp->status; +} + +static u8 hci_cc_le_write_def_data_len(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_cp_le_write_def_data_len *sent; + struct hci_ev_status *rp = data; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + sent = hci_sent_cmd_data(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN); + if (!sent) + return rp->status; + + hdev->le_def_tx_len = le16_to_cpu(sent->tx_len); + hdev->le_def_tx_time = le16_to_cpu(sent->tx_time); + + return rp->status; +} + +static u8 hci_cc_le_add_to_resolv_list(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_cp_le_add_to_resolv_list *sent; + struct hci_ev_status *rp = data; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST); + if (!sent) + return rp->status; + + hci_dev_lock(hdev); + hci_bdaddr_list_add_with_irk(&hdev->le_resolv_list, &sent->bdaddr, + sent->bdaddr_type, sent->peer_irk, + sent->local_irk); + hci_dev_unlock(hdev); + + return rp->status; +} + +static u8 hci_cc_le_del_from_resolv_list(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_cp_le_del_from_resolv_list *sent; + struct hci_ev_status *rp = data; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + sent = hci_sent_cmd_data(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST); + if (!sent) + return rp->status; + + hci_dev_lock(hdev); + hci_bdaddr_list_del_with_irk(&hdev->le_resolv_list, &sent->bdaddr, + sent->bdaddr_type); + hci_dev_unlock(hdev); + + return rp->status; +} + +static u8 hci_cc_le_clear_resolv_list(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_status *rp = data; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + hci_dev_lock(hdev); + hci_bdaddr_list_clear(&hdev->le_resolv_list); + hci_dev_unlock(hdev); + + return rp->status; +} + +static u8 hci_cc_le_read_resolv_list_size(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_rp_le_read_resolv_list_size *rp = data; + + bt_dev_dbg(hdev, "status 0x%2.2x size %u", rp->status, rp->size); + + if (rp->status) + return rp->status; + + hdev->le_resolv_list_size = rp->size; + + return rp->status; +} + +static u8 hci_cc_le_set_addr_resolution_enable(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_status *rp = data; + __u8 *sent; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE); + if (!sent) + return rp->status; + + hci_dev_lock(hdev); + + if (*sent) + hci_dev_set_flag(hdev, HCI_LL_RPA_RESOLUTION); + else + hci_dev_clear_flag(hdev, HCI_LL_RPA_RESOLUTION); + + hci_dev_unlock(hdev); + + return rp->status; +} + +static u8 hci_cc_le_read_max_data_len(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_rp_le_read_max_data_len *rp = data; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + hdev->le_max_tx_len = le16_to_cpu(rp->tx_len); + hdev->le_max_tx_time = le16_to_cpu(rp->tx_time); + hdev->le_max_rx_len = le16_to_cpu(rp->rx_len); + hdev->le_max_rx_time = le16_to_cpu(rp->rx_time); + + return rp->status; +} + +static u8 hci_cc_write_le_host_supported(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_cp_write_le_host_supported *sent; + struct hci_ev_status *rp = data; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + sent = hci_sent_cmd_data(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED); + if (!sent) + return rp->status; + + hci_dev_lock(hdev); + + if (sent->le) { + hdev->features[1][0] |= LMP_HOST_LE; + hci_dev_set_flag(hdev, HCI_LE_ENABLED); + } else { + hdev->features[1][0] &= ~LMP_HOST_LE; + hci_dev_clear_flag(hdev, HCI_LE_ENABLED); + hci_dev_clear_flag(hdev, HCI_ADVERTISING); + } + + if (sent->simul) + hdev->features[1][0] |= LMP_HOST_LE_BREDR; + else + hdev->features[1][0] &= ~LMP_HOST_LE_BREDR; + + hci_dev_unlock(hdev); + + return rp->status; +} + +static u8 hci_cc_set_adv_param(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_cp_le_set_adv_param *cp; + struct hci_ev_status *rp = data; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_ADV_PARAM); + if (!cp) + return rp->status; + + hci_dev_lock(hdev); + hdev->adv_addr_type = cp->own_address_type; + hci_dev_unlock(hdev); + + return rp->status; +} + +static u8 hci_cc_set_ext_adv_param(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_rp_le_set_ext_adv_params *rp = data; + struct hci_cp_le_set_ext_adv_params *cp; + struct adv_info *adv_instance; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS); + if (!cp) + return rp->status; + + hci_dev_lock(hdev); + hdev->adv_addr_type = cp->own_addr_type; + if (!cp->handle) { + /* Store in hdev for instance 0 */ + hdev->adv_tx_power = rp->tx_power; + } else { + adv_instance = hci_find_adv_instance(hdev, cp->handle); + if (adv_instance) + adv_instance->tx_power = rp->tx_power; + } + /* Update adv data as tx power is known now */ + hci_update_adv_data(hdev, cp->handle); + + hci_dev_unlock(hdev); + + return rp->status; +} + +static u8 hci_cc_read_rssi(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_rp_read_rssi *rp = data; + struct hci_conn *conn; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); + if (conn) + conn->rssi = rp->rssi; + + hci_dev_unlock(hdev); + + return rp->status; +} + +static u8 hci_cc_read_tx_power(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_cp_read_tx_power *sent; + struct hci_rp_read_tx_power *rp = data; + struct hci_conn *conn; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + sent = hci_sent_cmd_data(hdev, HCI_OP_READ_TX_POWER); + if (!sent) + return rp->status; + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(rp->handle)); + if (!conn) + goto unlock; + + switch (sent->type) { + case 0x00: + conn->tx_power = rp->tx_power; + break; + case 0x01: + conn->max_tx_power = rp->tx_power; + break; + } + +unlock: + hci_dev_unlock(hdev); + return rp->status; +} + +static u8 hci_cc_write_ssp_debug_mode(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_status *rp = data; + u8 *mode; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + mode = hci_sent_cmd_data(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE); + if (mode) + hdev->ssp_debug_mode = *mode; + + return rp->status; +} + +static void hci_cs_inquiry(struct hci_dev *hdev, __u8 status) +{ + bt_dev_dbg(hdev, "status 0x%2.2x", status); + + if (status) { + hci_conn_check_pending(hdev); + return; + } + + if (hci_sent_cmd_data(hdev, HCI_OP_INQUIRY)) + set_bit(HCI_INQUIRY, &hdev->flags); +} + +static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status) +{ + struct hci_cp_create_conn *cp; + struct hci_conn *conn; + + bt_dev_dbg(hdev, "status 0x%2.2x", status); + + cp = hci_sent_cmd_data(hdev, HCI_OP_CREATE_CONN); + if (!cp) + return; + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); + + bt_dev_dbg(hdev, "bdaddr %pMR hcon %p", &cp->bdaddr, conn); + + if (status) { + if (conn && conn->state == BT_CONNECT) { + if (status != 0x0c || conn->attempt > 2) { + conn->state = BT_CLOSED; + hci_connect_cfm(conn, status); + hci_conn_del(conn); + } else + conn->state = BT_CONNECT2; + } + } else { + if (!conn) { + conn = hci_conn_add(hdev, ACL_LINK, &cp->bdaddr, + HCI_ROLE_MASTER); + if (!conn) + bt_dev_err(hdev, "no memory for new connection"); + } + } + + hci_dev_unlock(hdev); +} + +static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status) +{ + struct hci_cp_add_sco *cp; + struct hci_conn *acl, *sco; + __u16 handle; + + bt_dev_dbg(hdev, "status 0x%2.2x", status); + + if (!status) + return; + + cp = hci_sent_cmd_data(hdev, HCI_OP_ADD_SCO); + if (!cp) + return; + + handle = __le16_to_cpu(cp->handle); + + bt_dev_dbg(hdev, "handle 0x%4.4x", handle); + + hci_dev_lock(hdev); + + acl = hci_conn_hash_lookup_handle(hdev, handle); + if (acl) { + sco = acl->link; + if (sco) { + sco->state = BT_CLOSED; + + hci_connect_cfm(sco, status); + hci_conn_del(sco); + } + } + + hci_dev_unlock(hdev); +} + +static void hci_cs_auth_requested(struct hci_dev *hdev, __u8 status) +{ + struct hci_cp_auth_requested *cp; + struct hci_conn *conn; + + bt_dev_dbg(hdev, "status 0x%2.2x", status); + + if (!status) + return; + + cp = hci_sent_cmd_data(hdev, HCI_OP_AUTH_REQUESTED); + if (!cp) + return; + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); + if (conn) { + if (conn->state == BT_CONFIG) { + hci_connect_cfm(conn, status); + hci_conn_drop(conn); + } + } + + hci_dev_unlock(hdev); +} + +static void hci_cs_set_conn_encrypt(struct hci_dev *hdev, __u8 status) +{ + struct hci_cp_set_conn_encrypt *cp; + struct hci_conn *conn; + + bt_dev_dbg(hdev, "status 0x%2.2x", status); + + if (!status) + return; + + cp = hci_sent_cmd_data(hdev, HCI_OP_SET_CONN_ENCRYPT); + if (!cp) + return; + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); + if (conn) { + if (conn->state == BT_CONFIG) { + hci_connect_cfm(conn, status); + hci_conn_drop(conn); + } + } + + hci_dev_unlock(hdev); +} + +static int hci_outgoing_auth_needed(struct hci_dev *hdev, + struct hci_conn *conn) +{ + if (conn->state != BT_CONFIG || !conn->out) + return 0; + + if (conn->pending_sec_level == BT_SECURITY_SDP) + return 0; + + /* Only request authentication for SSP connections or non-SSP + * devices with sec_level MEDIUM or HIGH or if MITM protection + * is requested. + */ + if (!hci_conn_ssp_enabled(conn) && !(conn->auth_type & 0x01) && + conn->pending_sec_level != BT_SECURITY_FIPS && + conn->pending_sec_level != BT_SECURITY_HIGH && + conn->pending_sec_level != BT_SECURITY_MEDIUM) + return 0; + + return 1; +} + +static int hci_resolve_name(struct hci_dev *hdev, + struct inquiry_entry *e) +{ + struct hci_cp_remote_name_req cp; + + memset(&cp, 0, sizeof(cp)); + + bacpy(&cp.bdaddr, &e->data.bdaddr); + cp.pscan_rep_mode = e->data.pscan_rep_mode; + cp.pscan_mode = e->data.pscan_mode; + cp.clock_offset = e->data.clock_offset; + + return hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); +} + +static bool hci_resolve_next_name(struct hci_dev *hdev) +{ + struct discovery_state *discov = &hdev->discovery; + struct inquiry_entry *e; + + if (list_empty(&discov->resolve)) + return false; + + /* We should stop if we already spent too much time resolving names. */ + if (time_after(jiffies, discov->name_resolve_timeout)) { + bt_dev_warn_ratelimited(hdev, "Name resolve takes too long."); + return false; + } + + e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED); + if (!e) + return false; + + if (hci_resolve_name(hdev, e) == 0) { + e->name_state = NAME_PENDING; + return true; + } + + return false; +} + +static void hci_check_pending_name(struct hci_dev *hdev, struct hci_conn *conn, + bdaddr_t *bdaddr, u8 *name, u8 name_len) +{ + struct discovery_state *discov = &hdev->discovery; + struct inquiry_entry *e; + + /* Update the mgmt connected state if necessary. Be careful with + * conn objects that exist but are not (yet) connected however. + * Only those in BT_CONFIG or BT_CONNECTED states can be + * considered connected. + */ + if (conn && + (conn->state == BT_CONFIG || conn->state == BT_CONNECTED) && + !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) + mgmt_device_connected(hdev, conn, name, name_len); + + if (discov->state == DISCOVERY_STOPPED) + return; + + if (discov->state == DISCOVERY_STOPPING) + goto discov_complete; + + if (discov->state != DISCOVERY_RESOLVING) + return; + + e = hci_inquiry_cache_lookup_resolve(hdev, bdaddr, NAME_PENDING); + /* If the device was not found in a list of found devices names of which + * are pending. there is no need to continue resolving a next name as it + * will be done upon receiving another Remote Name Request Complete + * Event */ + if (!e) + return; + + list_del(&e->list); + + e->name_state = name ? NAME_KNOWN : NAME_NOT_KNOWN; + mgmt_remote_name(hdev, bdaddr, ACL_LINK, 0x00, e->data.rssi, + name, name_len); + + if (hci_resolve_next_name(hdev)) + return; + +discov_complete: + hci_discovery_set_state(hdev, DISCOVERY_STOPPED); +} + +static void hci_cs_remote_name_req(struct hci_dev *hdev, __u8 status) +{ + struct hci_cp_remote_name_req *cp; + struct hci_conn *conn; + + bt_dev_dbg(hdev, "status 0x%2.2x", status); + + /* If successful wait for the name req complete event before + * checking for the need to do authentication */ + if (!status) + return; + + cp = hci_sent_cmd_data(hdev, HCI_OP_REMOTE_NAME_REQ); + if (!cp) + return; + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); + + if (hci_dev_test_flag(hdev, HCI_MGMT)) + hci_check_pending_name(hdev, conn, &cp->bdaddr, NULL, 0); + + if (!conn) + goto unlock; + + if (!hci_outgoing_auth_needed(hdev, conn)) + goto unlock; + + if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) { + struct hci_cp_auth_requested auth_cp; + + set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags); + + auth_cp.handle = __cpu_to_le16(conn->handle); + hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, + sizeof(auth_cp), &auth_cp); + } + +unlock: + hci_dev_unlock(hdev); +} + +static void hci_cs_read_remote_features(struct hci_dev *hdev, __u8 status) +{ + struct hci_cp_read_remote_features *cp; + struct hci_conn *conn; + + bt_dev_dbg(hdev, "status 0x%2.2x", status); + + if (!status) + return; + + cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_FEATURES); + if (!cp) + return; + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); + if (conn) { + if (conn->state == BT_CONFIG) { + hci_connect_cfm(conn, status); + hci_conn_drop(conn); + } + } + + hci_dev_unlock(hdev); +} + +static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status) +{ + struct hci_cp_read_remote_ext_features *cp; + struct hci_conn *conn; + + bt_dev_dbg(hdev, "status 0x%2.2x", status); + + if (!status) + return; + + cp = hci_sent_cmd_data(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES); + if (!cp) + return; + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); + if (conn) { + if (conn->state == BT_CONFIG) { + hci_connect_cfm(conn, status); + hci_conn_drop(conn); + } + } + + hci_dev_unlock(hdev); +} + +static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status) +{ + struct hci_cp_setup_sync_conn *cp; + struct hci_conn *acl, *sco; + __u16 handle; + + bt_dev_dbg(hdev, "status 0x%2.2x", status); + + if (!status) + return; + + cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN); + if (!cp) + return; + + handle = __le16_to_cpu(cp->handle); + + bt_dev_dbg(hdev, "handle 0x%4.4x", handle); + + hci_dev_lock(hdev); + + acl = hci_conn_hash_lookup_handle(hdev, handle); + if (acl) { + sco = acl->link; + if (sco) { + sco->state = BT_CLOSED; + + hci_connect_cfm(sco, status); + hci_conn_del(sco); + } + } + + hci_dev_unlock(hdev); +} + +static void hci_cs_enhanced_setup_sync_conn(struct hci_dev *hdev, __u8 status) +{ + struct hci_cp_enhanced_setup_sync_conn *cp; + struct hci_conn *acl, *sco; + __u16 handle; + + bt_dev_dbg(hdev, "status 0x%2.2x", status); + + if (!status) + return; + + cp = hci_sent_cmd_data(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN); + if (!cp) + return; + + handle = __le16_to_cpu(cp->handle); + + bt_dev_dbg(hdev, "handle 0x%4.4x", handle); + + hci_dev_lock(hdev); + + acl = hci_conn_hash_lookup_handle(hdev, handle); + if (acl) { + sco = acl->link; + if (sco) { + sco->state = BT_CLOSED; + + hci_connect_cfm(sco, status); + hci_conn_del(sco); + } + } + + hci_dev_unlock(hdev); +} + +static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status) +{ + struct hci_cp_sniff_mode *cp; + struct hci_conn *conn; + + bt_dev_dbg(hdev, "status 0x%2.2x", status); + + if (!status) + return; + + cp = hci_sent_cmd_data(hdev, HCI_OP_SNIFF_MODE); + if (!cp) + return; + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); + if (conn) { + clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags); + + if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags)) + hci_sco_setup(conn, status); + } + + hci_dev_unlock(hdev); +} + +static void hci_cs_exit_sniff_mode(struct hci_dev *hdev, __u8 status) +{ + struct hci_cp_exit_sniff_mode *cp; + struct hci_conn *conn; + + bt_dev_dbg(hdev, "status 0x%2.2x", status); + + if (!status) + return; + + cp = hci_sent_cmd_data(hdev, HCI_OP_EXIT_SNIFF_MODE); + if (!cp) + return; + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); + if (conn) { + clear_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags); + + if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags)) + hci_sco_setup(conn, status); + } + + hci_dev_unlock(hdev); +} + +static void hci_cs_disconnect(struct hci_dev *hdev, u8 status) +{ + struct hci_cp_disconnect *cp; + struct hci_conn_params *params; + struct hci_conn *conn; + bool mgmt_conn; + + bt_dev_dbg(hdev, "status 0x%2.2x", status); + + /* Wait for HCI_EV_DISCONN_COMPLETE if status 0x00 and not suspended + * otherwise cleanup the connection immediately. + */ + if (!status && !hdev->suspended) + return; + + cp = hci_sent_cmd_data(hdev, HCI_OP_DISCONNECT); + if (!cp) + return; + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); + if (!conn) + goto unlock; + + if (status) { + mgmt_disconnect_failed(hdev, &conn->dst, conn->type, + conn->dst_type, status); + + if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) { + hdev->cur_adv_instance = conn->adv_instance; + hci_enable_advertising(hdev); + } + + /* Inform sockets conn is gone before we delete it */ + hci_disconn_cfm(conn, HCI_ERROR_UNSPECIFIED); + + goto done; + } + + mgmt_conn = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags); + + if (conn->type == ACL_LINK) { + if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags)) + hci_remove_link_key(hdev, &conn->dst); + } + + params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type); + if (params) { + switch (params->auto_connect) { + case HCI_AUTO_CONN_LINK_LOSS: + if (cp->reason != HCI_ERROR_CONNECTION_TIMEOUT) + break; + fallthrough; + + case HCI_AUTO_CONN_DIRECT: + case HCI_AUTO_CONN_ALWAYS: + hci_pend_le_list_del_init(params); + hci_pend_le_list_add(params, &hdev->pend_le_conns); + break; + + default: + break; + } + } + + mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type, + cp->reason, mgmt_conn); + + hci_disconn_cfm(conn, cp->reason); + +done: + /* If the disconnection failed for any reason, the upper layer + * does not retry to disconnect in current implementation. + * Hence, we need to do some basic cleanup here and re-enable + * advertising if necessary. + */ + hci_conn_del(conn); +unlock: + hci_dev_unlock(hdev); +} + +static u8 ev_bdaddr_type(struct hci_dev *hdev, u8 type, bool *resolved) +{ + /* When using controller based address resolution, then the new + * address types 0x02 and 0x03 are used. These types need to be + * converted back into either public address or random address type + */ + switch (type) { + case ADDR_LE_DEV_PUBLIC_RESOLVED: + if (resolved) + *resolved = true; + return ADDR_LE_DEV_PUBLIC; + case ADDR_LE_DEV_RANDOM_RESOLVED: + if (resolved) + *resolved = true; + return ADDR_LE_DEV_RANDOM; + } + + if (resolved) + *resolved = false; + return type; +} + +static void cs_le_create_conn(struct hci_dev *hdev, bdaddr_t *peer_addr, + u8 peer_addr_type, u8 own_address_type, + u8 filter_policy) +{ + struct hci_conn *conn; + + conn = hci_conn_hash_lookup_le(hdev, peer_addr, + peer_addr_type); + if (!conn) + return; + + own_address_type = ev_bdaddr_type(hdev, own_address_type, NULL); + + /* Store the initiator and responder address information which + * is needed for SMP. These values will not change during the + * lifetime of the connection. + */ + conn->init_addr_type = own_address_type; + if (own_address_type == ADDR_LE_DEV_RANDOM) + bacpy(&conn->init_addr, &hdev->random_addr); + else + bacpy(&conn->init_addr, &hdev->bdaddr); + + conn->resp_addr_type = peer_addr_type; + bacpy(&conn->resp_addr, peer_addr); +} + +static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status) +{ + struct hci_cp_le_create_conn *cp; + + bt_dev_dbg(hdev, "status 0x%2.2x", status); + + /* All connection failure handling is taken care of by the + * hci_conn_failed function which is triggered by the HCI + * request completion callbacks used for connecting. + */ + if (status) + return; + + cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CONN); + if (!cp) + return; + + hci_dev_lock(hdev); + + cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type, + cp->own_address_type, cp->filter_policy); + + hci_dev_unlock(hdev); +} + +static void hci_cs_le_ext_create_conn(struct hci_dev *hdev, u8 status) +{ + struct hci_cp_le_ext_create_conn *cp; + + bt_dev_dbg(hdev, "status 0x%2.2x", status); + + /* All connection failure handling is taken care of by the + * hci_conn_failed function which is triggered by the HCI + * request completion callbacks used for connecting. + */ + if (status) + return; + + cp = hci_sent_cmd_data(hdev, HCI_OP_LE_EXT_CREATE_CONN); + if (!cp) + return; + + hci_dev_lock(hdev); + + cs_le_create_conn(hdev, &cp->peer_addr, cp->peer_addr_type, + cp->own_addr_type, cp->filter_policy); + + hci_dev_unlock(hdev); +} + +static void hci_cs_le_read_remote_features(struct hci_dev *hdev, u8 status) +{ + struct hci_cp_le_read_remote_features *cp; + struct hci_conn *conn; + + bt_dev_dbg(hdev, "status 0x%2.2x", status); + + if (!status) + return; + + cp = hci_sent_cmd_data(hdev, HCI_OP_LE_READ_REMOTE_FEATURES); + if (!cp) + return; + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); + if (conn) { + if (conn->state == BT_CONFIG) { + hci_connect_cfm(conn, status); + hci_conn_drop(conn); + } + } + + hci_dev_unlock(hdev); +} + +static void hci_cs_le_start_enc(struct hci_dev *hdev, u8 status) +{ + struct hci_cp_le_start_enc *cp; + struct hci_conn *conn; + + bt_dev_dbg(hdev, "status 0x%2.2x", status); + + if (!status) + return; + + hci_dev_lock(hdev); + + cp = hci_sent_cmd_data(hdev, HCI_OP_LE_START_ENC); + if (!cp) + goto unlock; + + conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); + if (!conn) + goto unlock; + + if (conn->state != BT_CONNECTED) + goto unlock; + + hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); + hci_conn_drop(conn); + +unlock: + hci_dev_unlock(hdev); +} + +static void hci_cs_switch_role(struct hci_dev *hdev, u8 status) +{ + struct hci_cp_switch_role *cp; + struct hci_conn *conn; + + BT_DBG("%s status 0x%2.2x", hdev->name, status); + + if (!status) + return; + + cp = hci_sent_cmd_data(hdev, HCI_OP_SWITCH_ROLE); + if (!cp) + return; + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->bdaddr); + if (conn) + clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags); + + hci_dev_unlock(hdev); +} + +static void hci_inquiry_complete_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_status *ev = data; + struct discovery_state *discov = &hdev->discovery; + struct inquiry_entry *e; + + bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); + + hci_conn_check_pending(hdev); + + if (!test_and_clear_bit(HCI_INQUIRY, &hdev->flags)) + return; + + smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */ + wake_up_bit(&hdev->flags, HCI_INQUIRY); + + if (!hci_dev_test_flag(hdev, HCI_MGMT)) + return; + + hci_dev_lock(hdev); + + if (discov->state != DISCOVERY_FINDING) + goto unlock; + + if (list_empty(&discov->resolve)) { + /* When BR/EDR inquiry is active and no LE scanning is in + * progress, then change discovery state to indicate completion. + * + * When running LE scanning and BR/EDR inquiry simultaneously + * and the LE scan already finished, then change the discovery + * state to indicate completion. + */ + if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) || + !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) + hci_discovery_set_state(hdev, DISCOVERY_STOPPED); + goto unlock; + } + + e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, NAME_NEEDED); + if (e && hci_resolve_name(hdev, e) == 0) { + e->name_state = NAME_PENDING; + hci_discovery_set_state(hdev, DISCOVERY_RESOLVING); + discov->name_resolve_timeout = jiffies + NAME_RESOLVE_DURATION; + } else { + /* When BR/EDR inquiry is active and no LE scanning is in + * progress, then change discovery state to indicate completion. + * + * When running LE scanning and BR/EDR inquiry simultaneously + * and the LE scan already finished, then change the discovery + * state to indicate completion. + */ + if (!hci_dev_test_flag(hdev, HCI_LE_SCAN) || + !test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) + hci_discovery_set_state(hdev, DISCOVERY_STOPPED); + } + +unlock: + hci_dev_unlock(hdev); +} + +static void hci_inquiry_result_evt(struct hci_dev *hdev, void *edata, + struct sk_buff *skb) +{ + struct hci_ev_inquiry_result *ev = edata; + struct inquiry_data data; + int i; + + if (!hci_ev_skb_pull(hdev, skb, HCI_EV_INQUIRY_RESULT, + flex_array_size(ev, info, ev->num))) + return; + + bt_dev_dbg(hdev, "num %d", ev->num); + + if (!ev->num) + return; + + if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) + return; + + hci_dev_lock(hdev); + + for (i = 0; i < ev->num; i++) { + struct inquiry_info *info = &ev->info[i]; + u32 flags; + + bacpy(&data.bdaddr, &info->bdaddr); + data.pscan_rep_mode = info->pscan_rep_mode; + data.pscan_period_mode = info->pscan_period_mode; + data.pscan_mode = info->pscan_mode; + memcpy(data.dev_class, info->dev_class, 3); + data.clock_offset = info->clock_offset; + data.rssi = HCI_RSSI_INVALID; + data.ssp_mode = 0x00; + + flags = hci_inquiry_cache_update(hdev, &data, false); + + mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, + info->dev_class, HCI_RSSI_INVALID, + flags, NULL, 0, NULL, 0, 0); + } + + hci_dev_unlock(hdev); +} + +static void hci_conn_complete_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_conn_complete *ev = data; + struct hci_conn *conn; + u8 status = ev->status; + + bt_dev_dbg(hdev, "status 0x%2.2x", status); + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); + if (!conn) { + /* In case of error status and there is no connection pending + * just unlock as there is nothing to cleanup. + */ + if (ev->status) + goto unlock; + + /* Connection may not exist if auto-connected. Check the bredr + * allowlist to see if this device is allowed to auto connect. + * If link is an ACL type, create a connection class + * automatically. + * + * Auto-connect will only occur if the event filter is + * programmed with a given address. Right now, event filter is + * only used during suspend. + */ + if (ev->link_type == ACL_LINK && + hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, + &ev->bdaddr, + BDADDR_BREDR)) { + conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr, + HCI_ROLE_SLAVE); + if (!conn) { + bt_dev_err(hdev, "no memory for new conn"); + goto unlock; + } + } else { + if (ev->link_type != SCO_LINK) + goto unlock; + + conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, + &ev->bdaddr); + if (!conn) + goto unlock; + + conn->type = SCO_LINK; + } + } + + /* The HCI_Connection_Complete event is only sent once per connection. + * Processing it more than once per connection can corrupt kernel memory. + * + * As the connection handle is set here for the first time, it indicates + * whether the connection is already set up. + */ + if (conn->handle != HCI_CONN_HANDLE_UNSET) { + bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection"); + goto unlock; + } + + if (!status) { + conn->handle = __le16_to_cpu(ev->handle); + if (conn->handle > HCI_CONN_HANDLE_MAX) { + bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x", + conn->handle, HCI_CONN_HANDLE_MAX); + status = HCI_ERROR_INVALID_PARAMETERS; + goto done; + } + + if (conn->type == ACL_LINK) { + conn->state = BT_CONFIG; + hci_conn_hold(conn); + + if (!conn->out && !hci_conn_ssp_enabled(conn) && + !hci_find_link_key(hdev, &ev->bdaddr)) + conn->disc_timeout = HCI_PAIRING_TIMEOUT; + else + conn->disc_timeout = HCI_DISCONN_TIMEOUT; + } else + conn->state = BT_CONNECTED; + + hci_debugfs_create_conn(conn); + hci_conn_add_sysfs(conn); + + if (test_bit(HCI_AUTH, &hdev->flags)) + set_bit(HCI_CONN_AUTH, &conn->flags); + + if (test_bit(HCI_ENCRYPT, &hdev->flags)) + set_bit(HCI_CONN_ENCRYPT, &conn->flags); + + /* Get remote features */ + if (conn->type == ACL_LINK) { + struct hci_cp_read_remote_features cp; + cp.handle = ev->handle; + hci_send_cmd(hdev, HCI_OP_READ_REMOTE_FEATURES, + sizeof(cp), &cp); + + hci_update_scan(hdev); + } + + /* Set packet type for incoming connection */ + if (!conn->out && hdev->hci_ver < BLUETOOTH_VER_2_0) { + struct hci_cp_change_conn_ptype cp; + cp.handle = ev->handle; + cp.pkt_type = cpu_to_le16(conn->pkt_type); + hci_send_cmd(hdev, HCI_OP_CHANGE_CONN_PTYPE, sizeof(cp), + &cp); + } + } + + if (conn->type == ACL_LINK) + hci_sco_setup(conn, ev->status); + +done: + if (status) { + hci_conn_failed(conn, status); + } else if (ev->link_type == SCO_LINK) { + switch (conn->setting & SCO_AIRMODE_MASK) { + case SCO_AIRMODE_CVSD: + if (hdev->notify) + hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD); + break; + } + + hci_connect_cfm(conn, status); + } + +unlock: + hci_dev_unlock(hdev); + + hci_conn_check_pending(hdev); +} + +static void hci_reject_conn(struct hci_dev *hdev, bdaddr_t *bdaddr) +{ + struct hci_cp_reject_conn_req cp; + + bacpy(&cp.bdaddr, bdaddr); + cp.reason = HCI_ERROR_REJ_BAD_ADDR; + hci_send_cmd(hdev, HCI_OP_REJECT_CONN_REQ, sizeof(cp), &cp); +} + +static void hci_conn_request_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_conn_request *ev = data; + int mask = hdev->link_mode; + struct inquiry_entry *ie; + struct hci_conn *conn; + __u8 flags = 0; + + bt_dev_dbg(hdev, "bdaddr %pMR type 0x%x", &ev->bdaddr, ev->link_type); + + /* Reject incoming connection from device with same BD ADDR against + * CVE-2020-26555 + */ + if (hdev && !bacmp(&hdev->bdaddr, &ev->bdaddr)) { + bt_dev_dbg(hdev, "Reject connection with same BD_ADDR %pMR\n", + &ev->bdaddr); + hci_reject_conn(hdev, &ev->bdaddr); + return; + } + + mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ev->link_type, + &flags); + + if (!(mask & HCI_LM_ACCEPT)) { + hci_reject_conn(hdev, &ev->bdaddr); + return; + } + + hci_dev_lock(hdev); + + if (hci_bdaddr_list_lookup(&hdev->reject_list, &ev->bdaddr, + BDADDR_BREDR)) { + hci_reject_conn(hdev, &ev->bdaddr); + goto unlock; + } + + /* Require HCI_CONNECTABLE or an accept list entry to accept the + * connection. These features are only touched through mgmt so + * only do the checks if HCI_MGMT is set. + */ + if (hci_dev_test_flag(hdev, HCI_MGMT) && + !hci_dev_test_flag(hdev, HCI_CONNECTABLE) && + !hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, &ev->bdaddr, + BDADDR_BREDR)) { + hci_reject_conn(hdev, &ev->bdaddr); + goto unlock; + } + + /* Connection accepted */ + + ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); + if (ie) + memcpy(ie->data.dev_class, ev->dev_class, 3); + + conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, + &ev->bdaddr); + if (!conn) { + conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr, + HCI_ROLE_SLAVE); + if (!conn) { + bt_dev_err(hdev, "no memory for new connection"); + goto unlock; + } + } + + memcpy(conn->dev_class, ev->dev_class, 3); + + hci_dev_unlock(hdev); + + if (ev->link_type == ACL_LINK || + (!(flags & HCI_PROTO_DEFER) && !lmp_esco_capable(hdev))) { + struct hci_cp_accept_conn_req cp; + conn->state = BT_CONNECT; + + bacpy(&cp.bdaddr, &ev->bdaddr); + + if (lmp_rswitch_capable(hdev) && (mask & HCI_LM_MASTER)) + cp.role = 0x00; /* Become central */ + else + cp.role = 0x01; /* Remain peripheral */ + + hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp); + } else if (!(flags & HCI_PROTO_DEFER)) { + struct hci_cp_accept_sync_conn_req cp; + conn->state = BT_CONNECT; + + bacpy(&cp.bdaddr, &ev->bdaddr); + cp.pkt_type = cpu_to_le16(conn->pkt_type); + + cp.tx_bandwidth = cpu_to_le32(0x00001f40); + cp.rx_bandwidth = cpu_to_le32(0x00001f40); + cp.max_latency = cpu_to_le16(0xffff); + cp.content_format = cpu_to_le16(hdev->voice_setting); + cp.retrans_effort = 0xff; + + hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, sizeof(cp), + &cp); + } else { + conn->state = BT_CONNECT2; + hci_connect_cfm(conn, 0); + } + + return; +unlock: + hci_dev_unlock(hdev); +} + +static u8 hci_to_mgmt_reason(u8 err) +{ + switch (err) { + case HCI_ERROR_CONNECTION_TIMEOUT: + return MGMT_DEV_DISCONN_TIMEOUT; + case HCI_ERROR_REMOTE_USER_TERM: + case HCI_ERROR_REMOTE_LOW_RESOURCES: + case HCI_ERROR_REMOTE_POWER_OFF: + return MGMT_DEV_DISCONN_REMOTE; + case HCI_ERROR_LOCAL_HOST_TERM: + return MGMT_DEV_DISCONN_LOCAL_HOST; + default: + return MGMT_DEV_DISCONN_UNKNOWN; + } +} + +static void hci_disconn_complete_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_disconn_complete *ev = data; + u8 reason; + struct hci_conn_params *params; + struct hci_conn *conn; + bool mgmt_connected; + + bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); + if (!conn) + goto unlock; + + if (ev->status) { + mgmt_disconnect_failed(hdev, &conn->dst, conn->type, + conn->dst_type, ev->status); + goto unlock; + } + + conn->state = BT_CLOSED; + + mgmt_connected = test_and_clear_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags); + + if (test_bit(HCI_CONN_AUTH_FAILURE, &conn->flags)) + reason = MGMT_DEV_DISCONN_AUTH_FAILURE; + else + reason = hci_to_mgmt_reason(ev->reason); + + mgmt_device_disconnected(hdev, &conn->dst, conn->type, conn->dst_type, + reason, mgmt_connected); + + if (conn->type == ACL_LINK) { + if (test_and_clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags)) + hci_remove_link_key(hdev, &conn->dst); + + hci_update_scan(hdev); + } + + params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type); + if (params) { + switch (params->auto_connect) { + case HCI_AUTO_CONN_LINK_LOSS: + if (ev->reason != HCI_ERROR_CONNECTION_TIMEOUT) + break; + fallthrough; + + case HCI_AUTO_CONN_DIRECT: + case HCI_AUTO_CONN_ALWAYS: + hci_pend_le_list_del_init(params); + hci_pend_le_list_add(params, &hdev->pend_le_conns); + hci_update_passive_scan(hdev); + break; + + default: + break; + } + } + + hci_disconn_cfm(conn, ev->reason); + + /* Re-enable advertising if necessary, since it might + * have been disabled by the connection. From the + * HCI_LE_Set_Advertise_Enable command description in + * the core specification (v4.0): + * "The Controller shall continue advertising until the Host + * issues an LE_Set_Advertise_Enable command with + * Advertising_Enable set to 0x00 (Advertising is disabled) + * or until a connection is created or until the Advertising + * is timed out due to Directed Advertising." + */ + if (conn->type == LE_LINK && conn->role == HCI_ROLE_SLAVE) { + hdev->cur_adv_instance = conn->adv_instance; + hci_enable_advertising(hdev); + } + + hci_conn_del(conn); + +unlock: + hci_dev_unlock(hdev); +} + +static void hci_auth_complete_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_auth_complete *ev = data; + struct hci_conn *conn; + + bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); + if (!conn) + goto unlock; + + if (!ev->status) { + clear_bit(HCI_CONN_AUTH_FAILURE, &conn->flags); + set_bit(HCI_CONN_AUTH, &conn->flags); + conn->sec_level = conn->pending_sec_level; + } else { + if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING) + set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags); + + mgmt_auth_failed(conn, ev->status); + } + + clear_bit(HCI_CONN_AUTH_PEND, &conn->flags); + + if (conn->state == BT_CONFIG) { + if (!ev->status && hci_conn_ssp_enabled(conn)) { + struct hci_cp_set_conn_encrypt cp; + cp.handle = ev->handle; + cp.encrypt = 0x01; + hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), + &cp); + } else { + conn->state = BT_CONNECTED; + hci_connect_cfm(conn, ev->status); + hci_conn_drop(conn); + } + } else { + hci_auth_cfm(conn, ev->status); + + hci_conn_hold(conn); + conn->disc_timeout = HCI_DISCONN_TIMEOUT; + hci_conn_drop(conn); + } + + if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) { + if (!ev->status) { + struct hci_cp_set_conn_encrypt cp; + cp.handle = ev->handle; + cp.encrypt = 0x01; + hci_send_cmd(hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp), + &cp); + } else { + clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); + hci_encrypt_cfm(conn, ev->status); + } + } + +unlock: + hci_dev_unlock(hdev); +} + +static void hci_remote_name_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_remote_name *ev = data; + struct hci_conn *conn; + + bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); + + hci_conn_check_pending(hdev); + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); + + if (!hci_dev_test_flag(hdev, HCI_MGMT)) + goto check_auth; + + if (ev->status == 0) + hci_check_pending_name(hdev, conn, &ev->bdaddr, ev->name, + strnlen(ev->name, HCI_MAX_NAME_LENGTH)); + else + hci_check_pending_name(hdev, conn, &ev->bdaddr, NULL, 0); + +check_auth: + if (!conn) + goto unlock; + + if (!hci_outgoing_auth_needed(hdev, conn)) + goto unlock; + + if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) { + struct hci_cp_auth_requested cp; + + set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags); + + cp.handle = __cpu_to_le16(conn->handle); + hci_send_cmd(hdev, HCI_OP_AUTH_REQUESTED, sizeof(cp), &cp); + } + +unlock: + hci_dev_unlock(hdev); +} + +static void hci_encrypt_change_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_encrypt_change *ev = data; + struct hci_conn *conn; + + bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); + if (!conn) + goto unlock; + + if (!ev->status) { + if (ev->encrypt) { + /* Encryption implies authentication */ + set_bit(HCI_CONN_AUTH, &conn->flags); + set_bit(HCI_CONN_ENCRYPT, &conn->flags); + conn->sec_level = conn->pending_sec_level; + + /* P-256 authentication key implies FIPS */ + if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256) + set_bit(HCI_CONN_FIPS, &conn->flags); + + if ((conn->type == ACL_LINK && ev->encrypt == 0x02) || + conn->type == LE_LINK) + set_bit(HCI_CONN_AES_CCM, &conn->flags); + } else { + clear_bit(HCI_CONN_ENCRYPT, &conn->flags); + clear_bit(HCI_CONN_AES_CCM, &conn->flags); + } + } + + /* We should disregard the current RPA and generate a new one + * whenever the encryption procedure fails. + */ + if (ev->status && conn->type == LE_LINK) { + hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); + hci_adv_instances_set_rpa_expired(hdev, true); + } + + clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); + + /* Check link security requirements are met */ + if (!hci_conn_check_link_mode(conn)) + ev->status = HCI_ERROR_AUTH_FAILURE; + + if (ev->status && conn->state == BT_CONNECTED) { + if (ev->status == HCI_ERROR_PIN_OR_KEY_MISSING) + set_bit(HCI_CONN_AUTH_FAILURE, &conn->flags); + + /* Notify upper layers so they can cleanup before + * disconnecting. + */ + hci_encrypt_cfm(conn, ev->status); + hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); + hci_conn_drop(conn); + goto unlock; + } + + /* Try reading the encryption key size for encrypted ACL links */ + if (!ev->status && ev->encrypt && conn->type == ACL_LINK) { + struct hci_cp_read_enc_key_size cp; + + /* Only send HCI_Read_Encryption_Key_Size if the + * controller really supports it. If it doesn't, assume + * the default size (16). + */ + if (!(hdev->commands[20] & 0x10)) { + conn->enc_key_size = HCI_LINK_KEY_SIZE; + goto notify; + } + + cp.handle = cpu_to_le16(conn->handle); + if (hci_send_cmd(hdev, HCI_OP_READ_ENC_KEY_SIZE, + sizeof(cp), &cp)) { + bt_dev_err(hdev, "sending read key size failed"); + conn->enc_key_size = HCI_LINK_KEY_SIZE; + goto notify; + } + + goto unlock; + } + + /* Set the default Authenticated Payload Timeout after + * an LE Link is established. As per Core Spec v5.0, Vol 2, Part B + * Section 3.3, the HCI command WRITE_AUTH_PAYLOAD_TIMEOUT should be + * sent when the link is active and Encryption is enabled, the conn + * type can be either LE or ACL and controller must support LMP Ping. + * Ensure for AES-CCM encryption as well. + */ + if (test_bit(HCI_CONN_ENCRYPT, &conn->flags) && + test_bit(HCI_CONN_AES_CCM, &conn->flags) && + ((conn->type == ACL_LINK && lmp_ping_capable(hdev)) || + (conn->type == LE_LINK && (hdev->le_features[0] & HCI_LE_PING)))) { + struct hci_cp_write_auth_payload_to cp; + + cp.handle = cpu_to_le16(conn->handle); + cp.timeout = cpu_to_le16(hdev->auth_payload_timeout); + hci_send_cmd(conn->hdev, HCI_OP_WRITE_AUTH_PAYLOAD_TO, + sizeof(cp), &cp); + } + +notify: + hci_encrypt_cfm(conn, ev->status); + +unlock: + hci_dev_unlock(hdev); +} + +static void hci_change_link_key_complete_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_change_link_key_complete *ev = data; + struct hci_conn *conn; + + bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); + if (conn) { + if (!ev->status) + set_bit(HCI_CONN_SECURE, &conn->flags); + + clear_bit(HCI_CONN_AUTH_PEND, &conn->flags); + + hci_key_change_cfm(conn, ev->status); + } + + hci_dev_unlock(hdev); +} + +static void hci_remote_features_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_remote_features *ev = data; + struct hci_conn *conn; + + bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); + if (!conn) + goto unlock; + + if (!ev->status) + memcpy(conn->features[0], ev->features, 8); + + if (conn->state != BT_CONFIG) + goto unlock; + + if (!ev->status && lmp_ext_feat_capable(hdev) && + lmp_ext_feat_capable(conn)) { + struct hci_cp_read_remote_ext_features cp; + cp.handle = ev->handle; + cp.page = 0x01; + hci_send_cmd(hdev, HCI_OP_READ_REMOTE_EXT_FEATURES, + sizeof(cp), &cp); + goto unlock; + } + + if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) { + struct hci_cp_remote_name_req cp; + memset(&cp, 0, sizeof(cp)); + bacpy(&cp.bdaddr, &conn->dst); + cp.pscan_rep_mode = 0x02; + hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); + } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) + mgmt_device_connected(hdev, conn, NULL, 0); + + if (!hci_outgoing_auth_needed(hdev, conn)) { + conn->state = BT_CONNECTED; + hci_connect_cfm(conn, ev->status); + hci_conn_drop(conn); + } + +unlock: + hci_dev_unlock(hdev); +} + +static inline void handle_cmd_cnt_and_timer(struct hci_dev *hdev, u8 ncmd) +{ + cancel_delayed_work(&hdev->cmd_timer); + + rcu_read_lock(); + if (!test_bit(HCI_RESET, &hdev->flags)) { + if (ncmd) { + cancel_delayed_work(&hdev->ncmd_timer); + atomic_set(&hdev->cmd_cnt, 1); + } else { + if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE)) + queue_delayed_work(hdev->workqueue, &hdev->ncmd_timer, + HCI_NCMD_TIMEOUT); + } + } + rcu_read_unlock(); +} + +static u8 hci_cc_le_read_buffer_size_v2(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_rp_le_read_buffer_size_v2 *rp = data; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + hdev->le_mtu = __le16_to_cpu(rp->acl_mtu); + hdev->le_pkts = rp->acl_max_pkt; + hdev->iso_mtu = __le16_to_cpu(rp->iso_mtu); + hdev->iso_pkts = rp->iso_max_pkt; + + hdev->le_cnt = hdev->le_pkts; + hdev->iso_cnt = hdev->iso_pkts; + + BT_DBG("%s acl mtu %d:%d iso mtu %d:%d", hdev->name, hdev->acl_mtu, + hdev->acl_pkts, hdev->iso_mtu, hdev->iso_pkts); + + return rp->status; +} + +static u8 hci_cc_le_set_cig_params(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_rp_le_set_cig_params *rp = data; + struct hci_conn *conn; + int i = 0; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + hci_dev_lock(hdev); + + if (rp->status) { + while ((conn = hci_conn_hash_lookup_cig(hdev, rp->cig_id))) { + conn->state = BT_CLOSED; + hci_connect_cfm(conn, rp->status); + hci_conn_del(conn); + } + goto unlock; + } + + rcu_read_lock(); + + list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) { + if (conn->type != ISO_LINK || conn->iso_qos.cig != rp->cig_id || + conn->state == BT_CONNECTED) + continue; + + conn->handle = __le16_to_cpu(rp->handle[i++]); + + bt_dev_dbg(hdev, "%p handle 0x%4.4x link %p", conn, + conn->handle, conn->link); + + /* Create CIS if LE is already connected */ + if (conn->link && conn->link->state == BT_CONNECTED) { + rcu_read_unlock(); + hci_le_create_cis(conn->link); + rcu_read_lock(); + } + + if (i == rp->num_handles) + break; + } + + rcu_read_unlock(); + +unlock: + hci_dev_unlock(hdev); + + return rp->status; +} + +static u8 hci_cc_le_setup_iso_path(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_rp_le_setup_iso_path *rp = data; + struct hci_cp_le_setup_iso_path *cp; + struct hci_conn *conn; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SETUP_ISO_PATH); + if (!cp) + return rp->status; + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle)); + if (!conn) + goto unlock; + + if (rp->status) { + hci_connect_cfm(conn, rp->status); + hci_conn_del(conn); + goto unlock; + } + + switch (cp->direction) { + /* Input (Host to Controller) */ + case 0x00: + /* Only confirm connection if output only */ + if (conn->iso_qos.out.sdu && !conn->iso_qos.in.sdu) + hci_connect_cfm(conn, rp->status); + break; + /* Output (Controller to Host) */ + case 0x01: + /* Confirm connection since conn->iso_qos is always configured + * last. + */ + hci_connect_cfm(conn, rp->status); + break; + } + +unlock: + hci_dev_unlock(hdev); + return rp->status; +} + +static void hci_cs_le_create_big(struct hci_dev *hdev, u8 status) +{ + bt_dev_dbg(hdev, "status 0x%2.2x", status); +} + +static u8 hci_cc_set_per_adv_param(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_status *rp = data; + struct hci_cp_le_set_per_adv_params *cp; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + cp = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_PARAMS); + if (!cp) + return rp->status; + + /* TODO: set the conn state */ + return rp->status; +} + +static u8 hci_cc_le_set_per_adv_enable(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_status *rp = data; + __u8 *sent; + + bt_dev_dbg(hdev, "status 0x%2.2x", rp->status); + + if (rp->status) + return rp->status; + + sent = hci_sent_cmd_data(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE); + if (!sent) + return rp->status; + + hci_dev_lock(hdev); + + if (*sent) + hci_dev_set_flag(hdev, HCI_LE_PER_ADV); + else + hci_dev_clear_flag(hdev, HCI_LE_PER_ADV); + + hci_dev_unlock(hdev); + + return rp->status; +} + +#define HCI_CC_VL(_op, _func, _min, _max) \ +{ \ + .op = _op, \ + .func = _func, \ + .min_len = _min, \ + .max_len = _max, \ +} + +#define HCI_CC(_op, _func, _len) \ + HCI_CC_VL(_op, _func, _len, _len) + +#define HCI_CC_STATUS(_op, _func) \ + HCI_CC(_op, _func, sizeof(struct hci_ev_status)) + +static const struct hci_cc { + u16 op; + u8 (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb); + u16 min_len; + u16 max_len; +} hci_cc_table[] = { + HCI_CC_STATUS(HCI_OP_INQUIRY_CANCEL, hci_cc_inquiry_cancel), + HCI_CC_STATUS(HCI_OP_PERIODIC_INQ, hci_cc_periodic_inq), + HCI_CC_STATUS(HCI_OP_EXIT_PERIODIC_INQ, hci_cc_exit_periodic_inq), + HCI_CC_STATUS(HCI_OP_REMOTE_NAME_REQ_CANCEL, + hci_cc_remote_name_req_cancel), + HCI_CC(HCI_OP_ROLE_DISCOVERY, hci_cc_role_discovery, + sizeof(struct hci_rp_role_discovery)), + HCI_CC(HCI_OP_READ_LINK_POLICY, hci_cc_read_link_policy, + sizeof(struct hci_rp_read_link_policy)), + HCI_CC(HCI_OP_WRITE_LINK_POLICY, hci_cc_write_link_policy, + sizeof(struct hci_rp_write_link_policy)), + HCI_CC(HCI_OP_READ_DEF_LINK_POLICY, hci_cc_read_def_link_policy, + sizeof(struct hci_rp_read_def_link_policy)), + HCI_CC_STATUS(HCI_OP_WRITE_DEF_LINK_POLICY, + hci_cc_write_def_link_policy), + HCI_CC_STATUS(HCI_OP_RESET, hci_cc_reset), + HCI_CC(HCI_OP_READ_STORED_LINK_KEY, hci_cc_read_stored_link_key, + sizeof(struct hci_rp_read_stored_link_key)), + HCI_CC(HCI_OP_DELETE_STORED_LINK_KEY, hci_cc_delete_stored_link_key, + sizeof(struct hci_rp_delete_stored_link_key)), + HCI_CC_STATUS(HCI_OP_WRITE_LOCAL_NAME, hci_cc_write_local_name), + HCI_CC(HCI_OP_READ_LOCAL_NAME, hci_cc_read_local_name, + sizeof(struct hci_rp_read_local_name)), + HCI_CC_STATUS(HCI_OP_WRITE_AUTH_ENABLE, hci_cc_write_auth_enable), + HCI_CC_STATUS(HCI_OP_WRITE_ENCRYPT_MODE, hci_cc_write_encrypt_mode), + HCI_CC_STATUS(HCI_OP_WRITE_SCAN_ENABLE, hci_cc_write_scan_enable), + HCI_CC_STATUS(HCI_OP_SET_EVENT_FLT, hci_cc_set_event_filter), + HCI_CC(HCI_OP_READ_CLASS_OF_DEV, hci_cc_read_class_of_dev, + sizeof(struct hci_rp_read_class_of_dev)), + HCI_CC_STATUS(HCI_OP_WRITE_CLASS_OF_DEV, hci_cc_write_class_of_dev), + HCI_CC(HCI_OP_READ_VOICE_SETTING, hci_cc_read_voice_setting, + sizeof(struct hci_rp_read_voice_setting)), + HCI_CC_STATUS(HCI_OP_WRITE_VOICE_SETTING, hci_cc_write_voice_setting), + HCI_CC(HCI_OP_READ_NUM_SUPPORTED_IAC, hci_cc_read_num_supported_iac, + sizeof(struct hci_rp_read_num_supported_iac)), + HCI_CC_STATUS(HCI_OP_WRITE_SSP_MODE, hci_cc_write_ssp_mode), + HCI_CC_STATUS(HCI_OP_WRITE_SC_SUPPORT, hci_cc_write_sc_support), + HCI_CC(HCI_OP_READ_AUTH_PAYLOAD_TO, hci_cc_read_auth_payload_timeout, + sizeof(struct hci_rp_read_auth_payload_to)), + HCI_CC(HCI_OP_WRITE_AUTH_PAYLOAD_TO, hci_cc_write_auth_payload_timeout, + sizeof(struct hci_rp_write_auth_payload_to)), + HCI_CC(HCI_OP_READ_LOCAL_VERSION, hci_cc_read_local_version, + sizeof(struct hci_rp_read_local_version)), + HCI_CC(HCI_OP_READ_LOCAL_COMMANDS, hci_cc_read_local_commands, + sizeof(struct hci_rp_read_local_commands)), + HCI_CC(HCI_OP_READ_LOCAL_FEATURES, hci_cc_read_local_features, + sizeof(struct hci_rp_read_local_features)), + HCI_CC(HCI_OP_READ_LOCAL_EXT_FEATURES, hci_cc_read_local_ext_features, + sizeof(struct hci_rp_read_local_ext_features)), + HCI_CC(HCI_OP_READ_BUFFER_SIZE, hci_cc_read_buffer_size, + sizeof(struct hci_rp_read_buffer_size)), + HCI_CC(HCI_OP_READ_BD_ADDR, hci_cc_read_bd_addr, + sizeof(struct hci_rp_read_bd_addr)), + HCI_CC(HCI_OP_READ_LOCAL_PAIRING_OPTS, hci_cc_read_local_pairing_opts, + sizeof(struct hci_rp_read_local_pairing_opts)), + HCI_CC(HCI_OP_READ_PAGE_SCAN_ACTIVITY, hci_cc_read_page_scan_activity, + sizeof(struct hci_rp_read_page_scan_activity)), + HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_ACTIVITY, + hci_cc_write_page_scan_activity), + HCI_CC(HCI_OP_READ_PAGE_SCAN_TYPE, hci_cc_read_page_scan_type, + sizeof(struct hci_rp_read_page_scan_type)), + HCI_CC_STATUS(HCI_OP_WRITE_PAGE_SCAN_TYPE, hci_cc_write_page_scan_type), + HCI_CC(HCI_OP_READ_DATA_BLOCK_SIZE, hci_cc_read_data_block_size, + sizeof(struct hci_rp_read_data_block_size)), + HCI_CC(HCI_OP_READ_FLOW_CONTROL_MODE, hci_cc_read_flow_control_mode, + sizeof(struct hci_rp_read_flow_control_mode)), + HCI_CC(HCI_OP_READ_LOCAL_AMP_INFO, hci_cc_read_local_amp_info, + sizeof(struct hci_rp_read_local_amp_info)), + HCI_CC(HCI_OP_READ_CLOCK, hci_cc_read_clock, + sizeof(struct hci_rp_read_clock)), + HCI_CC(HCI_OP_READ_ENC_KEY_SIZE, hci_cc_read_enc_key_size, + sizeof(struct hci_rp_read_enc_key_size)), + HCI_CC(HCI_OP_READ_INQ_RSP_TX_POWER, hci_cc_read_inq_rsp_tx_power, + sizeof(struct hci_rp_read_inq_rsp_tx_power)), + HCI_CC(HCI_OP_READ_DEF_ERR_DATA_REPORTING, + hci_cc_read_def_err_data_reporting, + sizeof(struct hci_rp_read_def_err_data_reporting)), + HCI_CC_STATUS(HCI_OP_WRITE_DEF_ERR_DATA_REPORTING, + hci_cc_write_def_err_data_reporting), + HCI_CC(HCI_OP_PIN_CODE_REPLY, hci_cc_pin_code_reply, + sizeof(struct hci_rp_pin_code_reply)), + HCI_CC(HCI_OP_PIN_CODE_NEG_REPLY, hci_cc_pin_code_neg_reply, + sizeof(struct hci_rp_pin_code_neg_reply)), + HCI_CC(HCI_OP_READ_LOCAL_OOB_DATA, hci_cc_read_local_oob_data, + sizeof(struct hci_rp_read_local_oob_data)), + HCI_CC(HCI_OP_READ_LOCAL_OOB_EXT_DATA, hci_cc_read_local_oob_ext_data, + sizeof(struct hci_rp_read_local_oob_ext_data)), + HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE, hci_cc_le_read_buffer_size, + sizeof(struct hci_rp_le_read_buffer_size)), + HCI_CC(HCI_OP_LE_READ_LOCAL_FEATURES, hci_cc_le_read_local_features, + sizeof(struct hci_rp_le_read_local_features)), + HCI_CC(HCI_OP_LE_READ_ADV_TX_POWER, hci_cc_le_read_adv_tx_power, + sizeof(struct hci_rp_le_read_adv_tx_power)), + HCI_CC(HCI_OP_USER_CONFIRM_REPLY, hci_cc_user_confirm_reply, + sizeof(struct hci_rp_user_confirm_reply)), + HCI_CC(HCI_OP_USER_CONFIRM_NEG_REPLY, hci_cc_user_confirm_neg_reply, + sizeof(struct hci_rp_user_confirm_reply)), + HCI_CC(HCI_OP_USER_PASSKEY_REPLY, hci_cc_user_passkey_reply, + sizeof(struct hci_rp_user_confirm_reply)), + HCI_CC(HCI_OP_USER_PASSKEY_NEG_REPLY, hci_cc_user_passkey_neg_reply, + sizeof(struct hci_rp_user_confirm_reply)), + HCI_CC_STATUS(HCI_OP_LE_SET_RANDOM_ADDR, hci_cc_le_set_random_addr), + HCI_CC_STATUS(HCI_OP_LE_SET_ADV_ENABLE, hci_cc_le_set_adv_enable), + HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_PARAM, hci_cc_le_set_scan_param), + HCI_CC_STATUS(HCI_OP_LE_SET_SCAN_ENABLE, hci_cc_le_set_scan_enable), + HCI_CC(HCI_OP_LE_READ_ACCEPT_LIST_SIZE, + hci_cc_le_read_accept_list_size, + sizeof(struct hci_rp_le_read_accept_list_size)), + HCI_CC_STATUS(HCI_OP_LE_CLEAR_ACCEPT_LIST, hci_cc_le_clear_accept_list), + HCI_CC_STATUS(HCI_OP_LE_ADD_TO_ACCEPT_LIST, + hci_cc_le_add_to_accept_list), + HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_ACCEPT_LIST, + hci_cc_le_del_from_accept_list), + HCI_CC(HCI_OP_LE_READ_SUPPORTED_STATES, hci_cc_le_read_supported_states, + sizeof(struct hci_rp_le_read_supported_states)), + HCI_CC(HCI_OP_LE_READ_DEF_DATA_LEN, hci_cc_le_read_def_data_len, + sizeof(struct hci_rp_le_read_def_data_len)), + HCI_CC_STATUS(HCI_OP_LE_WRITE_DEF_DATA_LEN, + hci_cc_le_write_def_data_len), + HCI_CC_STATUS(HCI_OP_LE_ADD_TO_RESOLV_LIST, + hci_cc_le_add_to_resolv_list), + HCI_CC_STATUS(HCI_OP_LE_DEL_FROM_RESOLV_LIST, + hci_cc_le_del_from_resolv_list), + HCI_CC_STATUS(HCI_OP_LE_CLEAR_RESOLV_LIST, + hci_cc_le_clear_resolv_list), + HCI_CC(HCI_OP_LE_READ_RESOLV_LIST_SIZE, hci_cc_le_read_resolv_list_size, + sizeof(struct hci_rp_le_read_resolv_list_size)), + HCI_CC_STATUS(HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, + hci_cc_le_set_addr_resolution_enable), + HCI_CC(HCI_OP_LE_READ_MAX_DATA_LEN, hci_cc_le_read_max_data_len, + sizeof(struct hci_rp_le_read_max_data_len)), + HCI_CC_STATUS(HCI_OP_WRITE_LE_HOST_SUPPORTED, + hci_cc_write_le_host_supported), + HCI_CC_STATUS(HCI_OP_LE_SET_ADV_PARAM, hci_cc_set_adv_param), + HCI_CC(HCI_OP_READ_RSSI, hci_cc_read_rssi, + sizeof(struct hci_rp_read_rssi)), + HCI_CC(HCI_OP_READ_TX_POWER, hci_cc_read_tx_power, + sizeof(struct hci_rp_read_tx_power)), + HCI_CC_STATUS(HCI_OP_WRITE_SSP_DEBUG_MODE, hci_cc_write_ssp_debug_mode), + HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_PARAMS, + hci_cc_le_set_ext_scan_param), + HCI_CC_STATUS(HCI_OP_LE_SET_EXT_SCAN_ENABLE, + hci_cc_le_set_ext_scan_enable), + HCI_CC_STATUS(HCI_OP_LE_SET_DEFAULT_PHY, hci_cc_le_set_default_phy), + HCI_CC(HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS, + hci_cc_le_read_num_adv_sets, + sizeof(struct hci_rp_le_read_num_supported_adv_sets)), + HCI_CC(HCI_OP_LE_SET_EXT_ADV_PARAMS, hci_cc_set_ext_adv_param, + sizeof(struct hci_rp_le_set_ext_adv_params)), + HCI_CC_STATUS(HCI_OP_LE_SET_EXT_ADV_ENABLE, + hci_cc_le_set_ext_adv_enable), + HCI_CC_STATUS(HCI_OP_LE_SET_ADV_SET_RAND_ADDR, + hci_cc_le_set_adv_set_random_addr), + HCI_CC_STATUS(HCI_OP_LE_REMOVE_ADV_SET, hci_cc_le_remove_adv_set), + HCI_CC_STATUS(HCI_OP_LE_CLEAR_ADV_SETS, hci_cc_le_clear_adv_sets), + HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_PARAMS, hci_cc_set_per_adv_param), + HCI_CC_STATUS(HCI_OP_LE_SET_PER_ADV_ENABLE, + hci_cc_le_set_per_adv_enable), + HCI_CC(HCI_OP_LE_READ_TRANSMIT_POWER, hci_cc_le_read_transmit_power, + sizeof(struct hci_rp_le_read_transmit_power)), + HCI_CC_STATUS(HCI_OP_LE_SET_PRIVACY_MODE, hci_cc_le_set_privacy_mode), + HCI_CC(HCI_OP_LE_READ_BUFFER_SIZE_V2, hci_cc_le_read_buffer_size_v2, + sizeof(struct hci_rp_le_read_buffer_size_v2)), + HCI_CC_VL(HCI_OP_LE_SET_CIG_PARAMS, hci_cc_le_set_cig_params, + sizeof(struct hci_rp_le_set_cig_params), HCI_MAX_EVENT_SIZE), + HCI_CC(HCI_OP_LE_SETUP_ISO_PATH, hci_cc_le_setup_iso_path, + sizeof(struct hci_rp_le_setup_iso_path)), +}; + +static u8 hci_cc_func(struct hci_dev *hdev, const struct hci_cc *cc, + struct sk_buff *skb) +{ + void *data; + + if (skb->len < cc->min_len) { + bt_dev_err(hdev, "unexpected cc 0x%4.4x length: %u < %u", + cc->op, skb->len, cc->min_len); + return HCI_ERROR_UNSPECIFIED; + } + + /* Just warn if the length is over max_len size it still be possible to + * partially parse the cc so leave to callback to decide if that is + * acceptable. + */ + if (skb->len > cc->max_len) + bt_dev_warn(hdev, "unexpected cc 0x%4.4x length: %u > %u", + cc->op, skb->len, cc->max_len); + + data = hci_cc_skb_pull(hdev, skb, cc->op, cc->min_len); + if (!data) + return HCI_ERROR_UNSPECIFIED; + + return cc->func(hdev, data, skb); +} + +static void hci_cmd_complete_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb, u16 *opcode, u8 *status, + hci_req_complete_t *req_complete, + hci_req_complete_skb_t *req_complete_skb) +{ + struct hci_ev_cmd_complete *ev = data; + int i; + + *opcode = __le16_to_cpu(ev->opcode); + + bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode); + + for (i = 0; i < ARRAY_SIZE(hci_cc_table); i++) { + if (hci_cc_table[i].op == *opcode) { + *status = hci_cc_func(hdev, &hci_cc_table[i], skb); + break; + } + } + + if (i == ARRAY_SIZE(hci_cc_table)) { + /* Unknown opcode, assume byte 0 contains the status, so + * that e.g. __hci_cmd_sync() properly returns errors + * for vendor specific commands send by HCI drivers. + * If a vendor doesn't actually follow this convention we may + * need to introduce a vendor CC table in order to properly set + * the status. + */ + *status = skb->data[0]; + } + + handle_cmd_cnt_and_timer(hdev, ev->ncmd); + + hci_req_cmd_complete(hdev, *opcode, *status, req_complete, + req_complete_skb); + + if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) { + bt_dev_err(hdev, + "unexpected event for opcode 0x%4.4x", *opcode); + return; + } + + if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q)) + queue_work(hdev->workqueue, &hdev->cmd_work); +} + +static void hci_cs_le_create_cis(struct hci_dev *hdev, u8 status) +{ + struct hci_cp_le_create_cis *cp; + int i; + + bt_dev_dbg(hdev, "status 0x%2.2x", status); + + if (!status) + return; + + cp = hci_sent_cmd_data(hdev, HCI_OP_LE_CREATE_CIS); + if (!cp) + return; + + hci_dev_lock(hdev); + + /* Remove connection if command failed */ + for (i = 0; cp->num_cis; cp->num_cis--, i++) { + struct hci_conn *conn; + u16 handle; + + handle = __le16_to_cpu(cp->cis[i].cis_handle); + + conn = hci_conn_hash_lookup_handle(hdev, handle); + if (conn) { + conn->state = BT_CLOSED; + hci_connect_cfm(conn, status); + hci_conn_del(conn); + } + } + + hci_dev_unlock(hdev); +} + +#define HCI_CS(_op, _func) \ +{ \ + .op = _op, \ + .func = _func, \ +} + +static const struct hci_cs { + u16 op; + void (*func)(struct hci_dev *hdev, __u8 status); +} hci_cs_table[] = { + HCI_CS(HCI_OP_INQUIRY, hci_cs_inquiry), + HCI_CS(HCI_OP_CREATE_CONN, hci_cs_create_conn), + HCI_CS(HCI_OP_DISCONNECT, hci_cs_disconnect), + HCI_CS(HCI_OP_ADD_SCO, hci_cs_add_sco), + HCI_CS(HCI_OP_AUTH_REQUESTED, hci_cs_auth_requested), + HCI_CS(HCI_OP_SET_CONN_ENCRYPT, hci_cs_set_conn_encrypt), + HCI_CS(HCI_OP_REMOTE_NAME_REQ, hci_cs_remote_name_req), + HCI_CS(HCI_OP_READ_REMOTE_FEATURES, hci_cs_read_remote_features), + HCI_CS(HCI_OP_READ_REMOTE_EXT_FEATURES, + hci_cs_read_remote_ext_features), + HCI_CS(HCI_OP_SETUP_SYNC_CONN, hci_cs_setup_sync_conn), + HCI_CS(HCI_OP_ENHANCED_SETUP_SYNC_CONN, + hci_cs_enhanced_setup_sync_conn), + HCI_CS(HCI_OP_SNIFF_MODE, hci_cs_sniff_mode), + HCI_CS(HCI_OP_EXIT_SNIFF_MODE, hci_cs_exit_sniff_mode), + HCI_CS(HCI_OP_SWITCH_ROLE, hci_cs_switch_role), + HCI_CS(HCI_OP_LE_CREATE_CONN, hci_cs_le_create_conn), + HCI_CS(HCI_OP_LE_READ_REMOTE_FEATURES, hci_cs_le_read_remote_features), + HCI_CS(HCI_OP_LE_START_ENC, hci_cs_le_start_enc), + HCI_CS(HCI_OP_LE_EXT_CREATE_CONN, hci_cs_le_ext_create_conn), + HCI_CS(HCI_OP_LE_CREATE_CIS, hci_cs_le_create_cis), + HCI_CS(HCI_OP_LE_CREATE_BIG, hci_cs_le_create_big), +}; + +static void hci_cmd_status_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb, u16 *opcode, u8 *status, + hci_req_complete_t *req_complete, + hci_req_complete_skb_t *req_complete_skb) +{ + struct hci_ev_cmd_status *ev = data; + int i; + + *opcode = __le16_to_cpu(ev->opcode); + *status = ev->status; + + bt_dev_dbg(hdev, "opcode 0x%4.4x", *opcode); + + for (i = 0; i < ARRAY_SIZE(hci_cs_table); i++) { + if (hci_cs_table[i].op == *opcode) { + hci_cs_table[i].func(hdev, ev->status); + break; + } + } + + handle_cmd_cnt_and_timer(hdev, ev->ncmd); + + /* Indicate request completion if the command failed. Also, if + * we're not waiting for a special event and we get a success + * command status we should try to flag the request as completed + * (since for this kind of commands there will not be a command + * complete event). + */ + if (ev->status || (hdev->sent_cmd && !hci_skb_event(hdev->sent_cmd))) { + hci_req_cmd_complete(hdev, *opcode, ev->status, req_complete, + req_complete_skb); + if (hci_dev_test_flag(hdev, HCI_CMD_PENDING)) { + bt_dev_err(hdev, "unexpected event for opcode 0x%4.4x", + *opcode); + return; + } + } + + if (atomic_read(&hdev->cmd_cnt) && !skb_queue_empty(&hdev->cmd_q)) + queue_work(hdev->workqueue, &hdev->cmd_work); +} + +static void hci_hardware_error_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_hardware_error *ev = data; + + bt_dev_dbg(hdev, "code 0x%2.2x", ev->code); + + hdev->hw_error_code = ev->code; + + queue_work(hdev->req_workqueue, &hdev->error_reset); +} + +static void hci_role_change_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_role_change *ev = data; + struct hci_conn *conn; + + bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); + if (conn) { + if (!ev->status) + conn->role = ev->role; + + clear_bit(HCI_CONN_RSWITCH_PEND, &conn->flags); + + hci_role_switch_cfm(conn, ev->status, ev->role); + } + + hci_dev_unlock(hdev); +} + +static void hci_num_comp_pkts_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_num_comp_pkts *ev = data; + int i; + + if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_PKTS, + flex_array_size(ev, handles, ev->num))) + return; + + if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_PACKET_BASED) { + bt_dev_err(hdev, "wrong event for mode %d", hdev->flow_ctl_mode); + return; + } + + bt_dev_dbg(hdev, "num %d", ev->num); + + for (i = 0; i < ev->num; i++) { + struct hci_comp_pkts_info *info = &ev->handles[i]; + struct hci_conn *conn; + __u16 handle, count; + + handle = __le16_to_cpu(info->handle); + count = __le16_to_cpu(info->count); + + conn = hci_conn_hash_lookup_handle(hdev, handle); + if (!conn) + continue; + + conn->sent -= count; + + switch (conn->type) { + case ACL_LINK: + hdev->acl_cnt += count; + if (hdev->acl_cnt > hdev->acl_pkts) + hdev->acl_cnt = hdev->acl_pkts; + break; + + case LE_LINK: + if (hdev->le_pkts) { + hdev->le_cnt += count; + if (hdev->le_cnt > hdev->le_pkts) + hdev->le_cnt = hdev->le_pkts; + } else { + hdev->acl_cnt += count; + if (hdev->acl_cnt > hdev->acl_pkts) + hdev->acl_cnt = hdev->acl_pkts; + } + break; + + case SCO_LINK: + hdev->sco_cnt += count; + if (hdev->sco_cnt > hdev->sco_pkts) + hdev->sco_cnt = hdev->sco_pkts; + break; + + case ISO_LINK: + if (hdev->iso_pkts) { + hdev->iso_cnt += count; + if (hdev->iso_cnt > hdev->iso_pkts) + hdev->iso_cnt = hdev->iso_pkts; + } else if (hdev->le_pkts) { + hdev->le_cnt += count; + if (hdev->le_cnt > hdev->le_pkts) + hdev->le_cnt = hdev->le_pkts; + } else { + hdev->acl_cnt += count; + if (hdev->acl_cnt > hdev->acl_pkts) + hdev->acl_cnt = hdev->acl_pkts; + } + break; + + default: + bt_dev_err(hdev, "unknown type %d conn %p", + conn->type, conn); + break; + } + } + + queue_work(hdev->workqueue, &hdev->tx_work); +} + +static struct hci_conn *__hci_conn_lookup_handle(struct hci_dev *hdev, + __u16 handle) +{ + struct hci_chan *chan; + + switch (hdev->dev_type) { + case HCI_PRIMARY: + return hci_conn_hash_lookup_handle(hdev, handle); + case HCI_AMP: + chan = hci_chan_lookup_handle(hdev, handle); + if (chan) + return chan->conn; + break; + default: + bt_dev_err(hdev, "unknown dev_type %d", hdev->dev_type); + break; + } + + return NULL; +} + +static void hci_num_comp_blocks_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_num_comp_blocks *ev = data; + int i; + + if (!hci_ev_skb_pull(hdev, skb, HCI_EV_NUM_COMP_BLOCKS, + flex_array_size(ev, handles, ev->num_hndl))) + return; + + if (hdev->flow_ctl_mode != HCI_FLOW_CTL_MODE_BLOCK_BASED) { + bt_dev_err(hdev, "wrong event for mode %d", + hdev->flow_ctl_mode); + return; + } + + bt_dev_dbg(hdev, "num_blocks %d num_hndl %d", ev->num_blocks, + ev->num_hndl); + + for (i = 0; i < ev->num_hndl; i++) { + struct hci_comp_blocks_info *info = &ev->handles[i]; + struct hci_conn *conn = NULL; + __u16 handle, block_count; + + handle = __le16_to_cpu(info->handle); + block_count = __le16_to_cpu(info->blocks); + + conn = __hci_conn_lookup_handle(hdev, handle); + if (!conn) + continue; + + conn->sent -= block_count; + + switch (conn->type) { + case ACL_LINK: + case AMP_LINK: + hdev->block_cnt += block_count; + if (hdev->block_cnt > hdev->num_blocks) + hdev->block_cnt = hdev->num_blocks; + break; + + default: + bt_dev_err(hdev, "unknown type %d conn %p", + conn->type, conn); + break; + } + } + + queue_work(hdev->workqueue, &hdev->tx_work); +} + +static void hci_mode_change_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_mode_change *ev = data; + struct hci_conn *conn; + + bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); + if (conn) { + conn->mode = ev->mode; + + if (!test_and_clear_bit(HCI_CONN_MODE_CHANGE_PEND, + &conn->flags)) { + if (conn->mode == HCI_CM_ACTIVE) + set_bit(HCI_CONN_POWER_SAVE, &conn->flags); + else + clear_bit(HCI_CONN_POWER_SAVE, &conn->flags); + } + + if (test_and_clear_bit(HCI_CONN_SCO_SETUP_PEND, &conn->flags)) + hci_sco_setup(conn, ev->status); + } + + hci_dev_unlock(hdev); +} + +static void hci_pin_code_request_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_pin_code_req *ev = data; + struct hci_conn *conn; + + bt_dev_dbg(hdev, ""); + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); + if (!conn) + goto unlock; + + if (conn->state == BT_CONNECTED) { + hci_conn_hold(conn); + conn->disc_timeout = HCI_PAIRING_TIMEOUT; + hci_conn_drop(conn); + } + + if (!hci_dev_test_flag(hdev, HCI_BONDABLE) && + !test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags)) { + hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, + sizeof(ev->bdaddr), &ev->bdaddr); + } else if (hci_dev_test_flag(hdev, HCI_MGMT)) { + u8 secure; + + if (conn->pending_sec_level == BT_SECURITY_HIGH) + secure = 1; + else + secure = 0; + + mgmt_pin_code_request(hdev, &ev->bdaddr, secure); + } + +unlock: + hci_dev_unlock(hdev); +} + +static void conn_set_key(struct hci_conn *conn, u8 key_type, u8 pin_len) +{ + if (key_type == HCI_LK_CHANGED_COMBINATION) + return; + + conn->pin_length = pin_len; + conn->key_type = key_type; + + switch (key_type) { + case HCI_LK_LOCAL_UNIT: + case HCI_LK_REMOTE_UNIT: + case HCI_LK_DEBUG_COMBINATION: + return; + case HCI_LK_COMBINATION: + if (pin_len == 16) + conn->pending_sec_level = BT_SECURITY_HIGH; + else + conn->pending_sec_level = BT_SECURITY_MEDIUM; + break; + case HCI_LK_UNAUTH_COMBINATION_P192: + case HCI_LK_UNAUTH_COMBINATION_P256: + conn->pending_sec_level = BT_SECURITY_MEDIUM; + break; + case HCI_LK_AUTH_COMBINATION_P192: + conn->pending_sec_level = BT_SECURITY_HIGH; + break; + case HCI_LK_AUTH_COMBINATION_P256: + conn->pending_sec_level = BT_SECURITY_FIPS; + break; + } +} + +static void hci_link_key_request_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_link_key_req *ev = data; + struct hci_cp_link_key_reply cp; + struct hci_conn *conn; + struct link_key *key; + + bt_dev_dbg(hdev, ""); + + if (!hci_dev_test_flag(hdev, HCI_MGMT)) + return; + + hci_dev_lock(hdev); + + key = hci_find_link_key(hdev, &ev->bdaddr); + if (!key) { + bt_dev_dbg(hdev, "link key not found for %pMR", &ev->bdaddr); + goto not_found; + } + + bt_dev_dbg(hdev, "found key type %u for %pMR", key->type, &ev->bdaddr); + + conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); + if (conn) { + clear_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags); + + if ((key->type == HCI_LK_UNAUTH_COMBINATION_P192 || + key->type == HCI_LK_UNAUTH_COMBINATION_P256) && + conn->auth_type != 0xff && (conn->auth_type & 0x01)) { + bt_dev_dbg(hdev, "ignoring unauthenticated key"); + goto not_found; + } + + if (key->type == HCI_LK_COMBINATION && key->pin_len < 16 && + (conn->pending_sec_level == BT_SECURITY_HIGH || + conn->pending_sec_level == BT_SECURITY_FIPS)) { + bt_dev_dbg(hdev, "ignoring key unauthenticated for high security"); + goto not_found; + } + + conn_set_key(conn, key->type, key->pin_len); + } + + bacpy(&cp.bdaddr, &ev->bdaddr); + memcpy(cp.link_key, key->val, HCI_LINK_KEY_SIZE); + + hci_send_cmd(hdev, HCI_OP_LINK_KEY_REPLY, sizeof(cp), &cp); + + hci_dev_unlock(hdev); + + return; + +not_found: + hci_send_cmd(hdev, HCI_OP_LINK_KEY_NEG_REPLY, 6, &ev->bdaddr); + hci_dev_unlock(hdev); +} + +static void hci_link_key_notify_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_link_key_notify *ev = data; + struct hci_conn *conn; + struct link_key *key; + bool persistent; + u8 pin_len = 0; + + bt_dev_dbg(hdev, ""); + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); + if (!conn) + goto unlock; + + /* Ignore NULL link key against CVE-2020-26555 */ + if (!crypto_memneq(ev->link_key, ZERO_KEY, HCI_LINK_KEY_SIZE)) { + bt_dev_dbg(hdev, "Ignore NULL link key (ZERO KEY) for %pMR", + &ev->bdaddr); + hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); + hci_conn_drop(conn); + goto unlock; + } + + hci_conn_hold(conn); + conn->disc_timeout = HCI_DISCONN_TIMEOUT; + hci_conn_drop(conn); + + set_bit(HCI_CONN_NEW_LINK_KEY, &conn->flags); + conn_set_key(conn, ev->key_type, conn->pin_length); + + if (!hci_dev_test_flag(hdev, HCI_MGMT)) + goto unlock; + + key = hci_add_link_key(hdev, conn, &ev->bdaddr, ev->link_key, + ev->key_type, pin_len, &persistent); + if (!key) + goto unlock; + + /* Update connection information since adding the key will have + * fixed up the type in the case of changed combination keys. + */ + if (ev->key_type == HCI_LK_CHANGED_COMBINATION) + conn_set_key(conn, key->type, key->pin_len); + + mgmt_new_link_key(hdev, key, persistent); + + /* Keep debug keys around only if the HCI_KEEP_DEBUG_KEYS flag + * is set. If it's not set simply remove the key from the kernel + * list (we've still notified user space about it but with + * store_hint being 0). + */ + if (key->type == HCI_LK_DEBUG_COMBINATION && + !hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) { + list_del_rcu(&key->list); + kfree_rcu(key, rcu); + goto unlock; + } + + if (persistent) + clear_bit(HCI_CONN_FLUSH_KEY, &conn->flags); + else + set_bit(HCI_CONN_FLUSH_KEY, &conn->flags); + +unlock: + hci_dev_unlock(hdev); +} + +static void hci_clock_offset_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_clock_offset *ev = data; + struct hci_conn *conn; + + bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); + if (conn && !ev->status) { + struct inquiry_entry *ie; + + ie = hci_inquiry_cache_lookup(hdev, &conn->dst); + if (ie) { + ie->data.clock_offset = ev->clock_offset; + ie->timestamp = jiffies; + } + } + + hci_dev_unlock(hdev); +} + +static void hci_pkt_type_change_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_pkt_type_change *ev = data; + struct hci_conn *conn; + + bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); + if (conn && !ev->status) + conn->pkt_type = __le16_to_cpu(ev->pkt_type); + + hci_dev_unlock(hdev); +} + +static void hci_pscan_rep_mode_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_pscan_rep_mode *ev = data; + struct inquiry_entry *ie; + + bt_dev_dbg(hdev, ""); + + hci_dev_lock(hdev); + + ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); + if (ie) { + ie->data.pscan_rep_mode = ev->pscan_rep_mode; + ie->timestamp = jiffies; + } + + hci_dev_unlock(hdev); +} + +static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, void *edata, + struct sk_buff *skb) +{ + struct hci_ev_inquiry_result_rssi *ev = edata; + struct inquiry_data data; + int i; + + bt_dev_dbg(hdev, "num_rsp %d", ev->num); + + if (!ev->num) + return; + + if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) + return; + + hci_dev_lock(hdev); + + if (skb->len == array_size(ev->num, + sizeof(struct inquiry_info_rssi_pscan))) { + struct inquiry_info_rssi_pscan *info; + + for (i = 0; i < ev->num; i++) { + u32 flags; + + info = hci_ev_skb_pull(hdev, skb, + HCI_EV_INQUIRY_RESULT_WITH_RSSI, + sizeof(*info)); + if (!info) { + bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x", + HCI_EV_INQUIRY_RESULT_WITH_RSSI); + goto unlock; + } + + bacpy(&data.bdaddr, &info->bdaddr); + data.pscan_rep_mode = info->pscan_rep_mode; + data.pscan_period_mode = info->pscan_period_mode; + data.pscan_mode = info->pscan_mode; + memcpy(data.dev_class, info->dev_class, 3); + data.clock_offset = info->clock_offset; + data.rssi = info->rssi; + data.ssp_mode = 0x00; + + flags = hci_inquiry_cache_update(hdev, &data, false); + + mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, + info->dev_class, info->rssi, + flags, NULL, 0, NULL, 0, 0); + } + } else if (skb->len == array_size(ev->num, + sizeof(struct inquiry_info_rssi))) { + struct inquiry_info_rssi *info; + + for (i = 0; i < ev->num; i++) { + u32 flags; + + info = hci_ev_skb_pull(hdev, skb, + HCI_EV_INQUIRY_RESULT_WITH_RSSI, + sizeof(*info)); + if (!info) { + bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x", + HCI_EV_INQUIRY_RESULT_WITH_RSSI); + goto unlock; + } + + bacpy(&data.bdaddr, &info->bdaddr); + data.pscan_rep_mode = info->pscan_rep_mode; + data.pscan_period_mode = info->pscan_period_mode; + data.pscan_mode = 0x00; + memcpy(data.dev_class, info->dev_class, 3); + data.clock_offset = info->clock_offset; + data.rssi = info->rssi; + data.ssp_mode = 0x00; + + flags = hci_inquiry_cache_update(hdev, &data, false); + + mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, + info->dev_class, info->rssi, + flags, NULL, 0, NULL, 0, 0); + } + } else { + bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x", + HCI_EV_INQUIRY_RESULT_WITH_RSSI); + } +unlock: + hci_dev_unlock(hdev); +} + +static void hci_remote_ext_features_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_remote_ext_features *ev = data; + struct hci_conn *conn; + + bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); + if (!conn) + goto unlock; + + if (ev->page < HCI_MAX_PAGES) + memcpy(conn->features[ev->page], ev->features, 8); + + if (!ev->status && ev->page == 0x01) { + struct inquiry_entry *ie; + + ie = hci_inquiry_cache_lookup(hdev, &conn->dst); + if (ie) + ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP); + + if (ev->features[0] & LMP_HOST_SSP) { + set_bit(HCI_CONN_SSP_ENABLED, &conn->flags); + } else { + /* It is mandatory by the Bluetooth specification that + * Extended Inquiry Results are only used when Secure + * Simple Pairing is enabled, but some devices violate + * this. + * + * To make these devices work, the internal SSP + * enabled flag needs to be cleared if the remote host + * features do not indicate SSP support */ + clear_bit(HCI_CONN_SSP_ENABLED, &conn->flags); + } + + if (ev->features[0] & LMP_HOST_SC) + set_bit(HCI_CONN_SC_ENABLED, &conn->flags); + } + + if (conn->state != BT_CONFIG) + goto unlock; + + if (!ev->status && !test_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) { + struct hci_cp_remote_name_req cp; + memset(&cp, 0, sizeof(cp)); + bacpy(&cp.bdaddr, &conn->dst); + cp.pscan_rep_mode = 0x02; + hci_send_cmd(hdev, HCI_OP_REMOTE_NAME_REQ, sizeof(cp), &cp); + } else if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) + mgmt_device_connected(hdev, conn, NULL, 0); + + if (!hci_outgoing_auth_needed(hdev, conn)) { + conn->state = BT_CONNECTED; + hci_connect_cfm(conn, ev->status); + hci_conn_drop(conn); + } + +unlock: + hci_dev_unlock(hdev); +} + +static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_sync_conn_complete *ev = data; + struct hci_conn *conn; + u8 status = ev->status; + + switch (ev->link_type) { + case SCO_LINK: + case ESCO_LINK: + break; + default: + /* As per Core 5.3 Vol 4 Part E 7.7.35 (p.2219), Link_Type + * for HCI_Synchronous_Connection_Complete is limited to + * either SCO or eSCO + */ + bt_dev_err(hdev, "Ignoring connect complete event for invalid link type"); + return; + } + + bt_dev_dbg(hdev, "status 0x%2.2x", status); + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr); + if (!conn) { + if (ev->link_type == ESCO_LINK) + goto unlock; + + /* When the link type in the event indicates SCO connection + * and lookup of the connection object fails, then check + * if an eSCO connection object exists. + * + * The core limits the synchronous connections to either + * SCO or eSCO. The eSCO connection is preferred and tried + * to be setup first and until successfully established, + * the link type will be hinted as eSCO. + */ + conn = hci_conn_hash_lookup_ba(hdev, ESCO_LINK, &ev->bdaddr); + if (!conn) + goto unlock; + } + + /* The HCI_Synchronous_Connection_Complete event is only sent once per connection. + * Processing it more than once per connection can corrupt kernel memory. + * + * As the connection handle is set here for the first time, it indicates + * whether the connection is already set up. + */ + if (conn->handle != HCI_CONN_HANDLE_UNSET) { + bt_dev_err(hdev, "Ignoring HCI_Sync_Conn_Complete event for existing connection"); + goto unlock; + } + + switch (status) { + case 0x00: + conn->handle = __le16_to_cpu(ev->handle); + if (conn->handle > HCI_CONN_HANDLE_MAX) { + bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x", + conn->handle, HCI_CONN_HANDLE_MAX); + status = HCI_ERROR_INVALID_PARAMETERS; + conn->state = BT_CLOSED; + break; + } + + conn->state = BT_CONNECTED; + conn->type = ev->link_type; + + hci_debugfs_create_conn(conn); + hci_conn_add_sysfs(conn); + break; + + case 0x10: /* Connection Accept Timeout */ + case 0x0d: /* Connection Rejected due to Limited Resources */ + case 0x11: /* Unsupported Feature or Parameter Value */ + case 0x1c: /* SCO interval rejected */ + case 0x1a: /* Unsupported Remote Feature */ + case 0x1e: /* Invalid LMP Parameters */ + case 0x1f: /* Unspecified error */ + case 0x20: /* Unsupported LMP Parameter value */ + if (conn->out) { + conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) | + (hdev->esco_type & EDR_ESCO_MASK); + if (hci_setup_sync(conn, conn->link->handle)) + goto unlock; + } + fallthrough; + + default: + conn->state = BT_CLOSED; + break; + } + + bt_dev_dbg(hdev, "SCO connected with air mode: %02x", ev->air_mode); + /* Notify only in case of SCO over HCI transport data path which + * is zero and non-zero value shall be non-HCI transport data path + */ + if (conn->codec.data_path == 0 && hdev->notify) { + switch (ev->air_mode) { + case 0x02: + hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_CVSD); + break; + case 0x03: + hdev->notify(hdev, HCI_NOTIFY_ENABLE_SCO_TRANSP); + break; + } + } + + hci_connect_cfm(conn, status); + if (status) + hci_conn_del(conn); + +unlock: + hci_dev_unlock(hdev); +} + +static inline size_t eir_get_length(u8 *eir, size_t eir_len) +{ + size_t parsed = 0; + + while (parsed < eir_len) { + u8 field_len = eir[0]; + + if (field_len == 0) + return parsed; + + parsed += field_len + 1; + eir += field_len + 1; + } + + return eir_len; +} + +static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, void *edata, + struct sk_buff *skb) +{ + struct hci_ev_ext_inquiry_result *ev = edata; + struct inquiry_data data; + size_t eir_len; + int i; + + if (!hci_ev_skb_pull(hdev, skb, HCI_EV_EXTENDED_INQUIRY_RESULT, + flex_array_size(ev, info, ev->num))) + return; + + bt_dev_dbg(hdev, "num %d", ev->num); + + if (!ev->num) + return; + + if (hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) + return; + + hci_dev_lock(hdev); + + for (i = 0; i < ev->num; i++) { + struct extended_inquiry_info *info = &ev->info[i]; + u32 flags; + bool name_known; + + bacpy(&data.bdaddr, &info->bdaddr); + data.pscan_rep_mode = info->pscan_rep_mode; + data.pscan_period_mode = info->pscan_period_mode; + data.pscan_mode = 0x00; + memcpy(data.dev_class, info->dev_class, 3); + data.clock_offset = info->clock_offset; + data.rssi = info->rssi; + data.ssp_mode = 0x01; + + if (hci_dev_test_flag(hdev, HCI_MGMT)) + name_known = eir_get_data(info->data, + sizeof(info->data), + EIR_NAME_COMPLETE, NULL); + else + name_known = true; + + flags = hci_inquiry_cache_update(hdev, &data, name_known); + + eir_len = eir_get_length(info->data, sizeof(info->data)); + + mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00, + info->dev_class, info->rssi, + flags, info->data, eir_len, NULL, 0, 0); + } + + hci_dev_unlock(hdev); +} + +static void hci_key_refresh_complete_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_key_refresh_complete *ev = data; + struct hci_conn *conn; + + bt_dev_dbg(hdev, "status 0x%2.2x handle 0x%4.4x", ev->status, + __le16_to_cpu(ev->handle)); + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); + if (!conn) + goto unlock; + + /* For BR/EDR the necessary steps are taken through the + * auth_complete event. + */ + if (conn->type != LE_LINK) + goto unlock; + + if (!ev->status) + conn->sec_level = conn->pending_sec_level; + + clear_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); + + if (ev->status && conn->state == BT_CONNECTED) { + hci_disconnect(conn, HCI_ERROR_AUTH_FAILURE); + hci_conn_drop(conn); + goto unlock; + } + + if (conn->state == BT_CONFIG) { + if (!ev->status) + conn->state = BT_CONNECTED; + + hci_connect_cfm(conn, ev->status); + hci_conn_drop(conn); + } else { + hci_auth_cfm(conn, ev->status); + + hci_conn_hold(conn); + conn->disc_timeout = HCI_DISCONN_TIMEOUT; + hci_conn_drop(conn); + } + +unlock: + hci_dev_unlock(hdev); +} + +static u8 hci_get_auth_req(struct hci_conn *conn) +{ + /* If remote requests no-bonding follow that lead */ + if (conn->remote_auth == HCI_AT_NO_BONDING || + conn->remote_auth == HCI_AT_NO_BONDING_MITM) + return conn->remote_auth | (conn->auth_type & 0x01); + + /* If both remote and local have enough IO capabilities, require + * MITM protection + */ + if (conn->remote_cap != HCI_IO_NO_INPUT_OUTPUT && + conn->io_capability != HCI_IO_NO_INPUT_OUTPUT) + return conn->remote_auth | 0x01; + + /* No MITM protection possible so ignore remote requirement */ + return (conn->remote_auth & ~0x01) | (conn->auth_type & 0x01); +} + +static u8 bredr_oob_data_present(struct hci_conn *conn) +{ + struct hci_dev *hdev = conn->hdev; + struct oob_data *data; + + data = hci_find_remote_oob_data(hdev, &conn->dst, BDADDR_BREDR); + if (!data) + return 0x00; + + if (bredr_sc_enabled(hdev)) { + /* When Secure Connections is enabled, then just + * return the present value stored with the OOB + * data. The stored value contains the right present + * information. However it can only be trusted when + * not in Secure Connection Only mode. + */ + if (!hci_dev_test_flag(hdev, HCI_SC_ONLY)) + return data->present; + + /* When Secure Connections Only mode is enabled, then + * the P-256 values are required. If they are not + * available, then do not declare that OOB data is + * present. + */ + if (!crypto_memneq(data->rand256, ZERO_KEY, 16) || + !crypto_memneq(data->hash256, ZERO_KEY, 16)) + return 0x00; + + return 0x02; + } + + /* When Secure Connections is not enabled or actually + * not supported by the hardware, then check that if + * P-192 data values are present. + */ + if (!crypto_memneq(data->rand192, ZERO_KEY, 16) || + !crypto_memneq(data->hash192, ZERO_KEY, 16)) + return 0x00; + + return 0x01; +} + +static void hci_io_capa_request_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_io_capa_request *ev = data; + struct hci_conn *conn; + + bt_dev_dbg(hdev, ""); + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); + if (!conn || !hci_conn_ssp_enabled(conn)) + goto unlock; + + hci_conn_hold(conn); + + if (!hci_dev_test_flag(hdev, HCI_MGMT)) + goto unlock; + + /* Allow pairing if we're pairable, the initiators of the + * pairing or if the remote is not requesting bonding. + */ + if (hci_dev_test_flag(hdev, HCI_BONDABLE) || + test_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags) || + (conn->remote_auth & ~0x01) == HCI_AT_NO_BONDING) { + struct hci_cp_io_capability_reply cp; + + bacpy(&cp.bdaddr, &ev->bdaddr); + /* Change the IO capability from KeyboardDisplay + * to DisplayYesNo as it is not supported by BT spec. */ + cp.capability = (conn->io_capability == 0x04) ? + HCI_IO_DISPLAY_YESNO : conn->io_capability; + + /* If we are initiators, there is no remote information yet */ + if (conn->remote_auth == 0xff) { + /* Request MITM protection if our IO caps allow it + * except for the no-bonding case. + */ + if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT && + conn->auth_type != HCI_AT_NO_BONDING) + conn->auth_type |= 0x01; + } else { + conn->auth_type = hci_get_auth_req(conn); + } + + /* If we're not bondable, force one of the non-bondable + * authentication requirement values. + */ + if (!hci_dev_test_flag(hdev, HCI_BONDABLE)) + conn->auth_type &= HCI_AT_NO_BONDING_MITM; + + cp.authentication = conn->auth_type; + cp.oob_data = bredr_oob_data_present(conn); + + hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_REPLY, + sizeof(cp), &cp); + } else { + struct hci_cp_io_capability_neg_reply cp; + + bacpy(&cp.bdaddr, &ev->bdaddr); + cp.reason = HCI_ERROR_PAIRING_NOT_ALLOWED; + + hci_send_cmd(hdev, HCI_OP_IO_CAPABILITY_NEG_REPLY, + sizeof(cp), &cp); + } + +unlock: + hci_dev_unlock(hdev); +} + +static void hci_io_capa_reply_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_io_capa_reply *ev = data; + struct hci_conn *conn; + + bt_dev_dbg(hdev, ""); + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); + if (!conn) + goto unlock; + + conn->remote_cap = ev->capability; + conn->remote_auth = ev->authentication; + +unlock: + hci_dev_unlock(hdev); +} + +static void hci_user_confirm_request_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_user_confirm_req *ev = data; + int loc_mitm, rem_mitm, confirm_hint = 0; + struct hci_conn *conn; + + bt_dev_dbg(hdev, ""); + + hci_dev_lock(hdev); + + if (!hci_dev_test_flag(hdev, HCI_MGMT)) + goto unlock; + + conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); + if (!conn) + goto unlock; + + loc_mitm = (conn->auth_type & 0x01); + rem_mitm = (conn->remote_auth & 0x01); + + /* If we require MITM but the remote device can't provide that + * (it has NoInputNoOutput) then reject the confirmation + * request. We check the security level here since it doesn't + * necessarily match conn->auth_type. + */ + if (conn->pending_sec_level > BT_SECURITY_MEDIUM && + conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) { + bt_dev_dbg(hdev, "Rejecting request: remote device can't provide MITM"); + hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_NEG_REPLY, + sizeof(ev->bdaddr), &ev->bdaddr); + goto unlock; + } + + /* If no side requires MITM protection; auto-accept */ + if ((!loc_mitm || conn->remote_cap == HCI_IO_NO_INPUT_OUTPUT) && + (!rem_mitm || conn->io_capability == HCI_IO_NO_INPUT_OUTPUT)) { + + /* If we're not the initiators request authorization to + * proceed from user space (mgmt_user_confirm with + * confirm_hint set to 1). The exception is if neither + * side had MITM or if the local IO capability is + * NoInputNoOutput, in which case we do auto-accept + */ + if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && + conn->io_capability != HCI_IO_NO_INPUT_OUTPUT && + (loc_mitm || rem_mitm)) { + bt_dev_dbg(hdev, "Confirming auto-accept as acceptor"); + confirm_hint = 1; + goto confirm; + } + + /* If there already exists link key in local host, leave the + * decision to user space since the remote device could be + * legitimate or malicious. + */ + if (hci_find_link_key(hdev, &ev->bdaddr)) { + bt_dev_dbg(hdev, "Local host already has link key"); + confirm_hint = 1; + goto confirm; + } + + BT_DBG("Auto-accept of user confirmation with %ums delay", + hdev->auto_accept_delay); + + if (hdev->auto_accept_delay > 0) { + int delay = msecs_to_jiffies(hdev->auto_accept_delay); + queue_delayed_work(conn->hdev->workqueue, + &conn->auto_accept_work, delay); + goto unlock; + } + + hci_send_cmd(hdev, HCI_OP_USER_CONFIRM_REPLY, + sizeof(ev->bdaddr), &ev->bdaddr); + goto unlock; + } + +confirm: + mgmt_user_confirm_request(hdev, &ev->bdaddr, ACL_LINK, 0, + le32_to_cpu(ev->passkey), confirm_hint); + +unlock: + hci_dev_unlock(hdev); +} + +static void hci_user_passkey_request_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_user_passkey_req *ev = data; + + bt_dev_dbg(hdev, ""); + + if (hci_dev_test_flag(hdev, HCI_MGMT)) + mgmt_user_passkey_request(hdev, &ev->bdaddr, ACL_LINK, 0); +} + +static void hci_user_passkey_notify_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_user_passkey_notify *ev = data; + struct hci_conn *conn; + + bt_dev_dbg(hdev, ""); + + conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); + if (!conn) + return; + + conn->passkey_notify = __le32_to_cpu(ev->passkey); + conn->passkey_entered = 0; + + if (hci_dev_test_flag(hdev, HCI_MGMT)) + mgmt_user_passkey_notify(hdev, &conn->dst, conn->type, + conn->dst_type, conn->passkey_notify, + conn->passkey_entered); +} + +static void hci_keypress_notify_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_keypress_notify *ev = data; + struct hci_conn *conn; + + bt_dev_dbg(hdev, ""); + + conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); + if (!conn) + return; + + switch (ev->type) { + case HCI_KEYPRESS_STARTED: + conn->passkey_entered = 0; + return; + + case HCI_KEYPRESS_ENTERED: + conn->passkey_entered++; + break; + + case HCI_KEYPRESS_ERASED: + conn->passkey_entered--; + break; + + case HCI_KEYPRESS_CLEARED: + conn->passkey_entered = 0; + break; + + case HCI_KEYPRESS_COMPLETED: + return; + } + + if (hci_dev_test_flag(hdev, HCI_MGMT)) + mgmt_user_passkey_notify(hdev, &conn->dst, conn->type, + conn->dst_type, conn->passkey_notify, + conn->passkey_entered); +} + +static void hci_simple_pair_complete_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_simple_pair_complete *ev = data; + struct hci_conn *conn; + + bt_dev_dbg(hdev, ""); + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); + if (!conn || !hci_conn_ssp_enabled(conn)) + goto unlock; + + /* Reset the authentication requirement to unknown */ + conn->remote_auth = 0xff; + + /* To avoid duplicate auth_failed events to user space we check + * the HCI_CONN_AUTH_PEND flag which will be set if we + * initiated the authentication. A traditional auth_complete + * event gets always produced as initiator and is also mapped to + * the mgmt_auth_failed event */ + if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) && ev->status) + mgmt_auth_failed(conn, ev->status); + + hci_conn_drop(conn); + +unlock: + hci_dev_unlock(hdev); +} + +static void hci_remote_host_features_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_remote_host_features *ev = data; + struct inquiry_entry *ie; + struct hci_conn *conn; + + bt_dev_dbg(hdev, ""); + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &ev->bdaddr); + if (conn) + memcpy(conn->features[1], ev->features, 8); + + ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr); + if (ie) + ie->data.ssp_mode = (ev->features[0] & LMP_HOST_SSP); + + hci_dev_unlock(hdev); +} + +static void hci_remote_oob_data_request_evt(struct hci_dev *hdev, void *edata, + struct sk_buff *skb) +{ + struct hci_ev_remote_oob_data_request *ev = edata; + struct oob_data *data; + + bt_dev_dbg(hdev, ""); + + hci_dev_lock(hdev); + + if (!hci_dev_test_flag(hdev, HCI_MGMT)) + goto unlock; + + data = hci_find_remote_oob_data(hdev, &ev->bdaddr, BDADDR_BREDR); + if (!data) { + struct hci_cp_remote_oob_data_neg_reply cp; + + bacpy(&cp.bdaddr, &ev->bdaddr); + hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_NEG_REPLY, + sizeof(cp), &cp); + goto unlock; + } + + if (bredr_sc_enabled(hdev)) { + struct hci_cp_remote_oob_ext_data_reply cp; + + bacpy(&cp.bdaddr, &ev->bdaddr); + if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) { + memset(cp.hash192, 0, sizeof(cp.hash192)); + memset(cp.rand192, 0, sizeof(cp.rand192)); + } else { + memcpy(cp.hash192, data->hash192, sizeof(cp.hash192)); + memcpy(cp.rand192, data->rand192, sizeof(cp.rand192)); + } + memcpy(cp.hash256, data->hash256, sizeof(cp.hash256)); + memcpy(cp.rand256, data->rand256, sizeof(cp.rand256)); + + hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_EXT_DATA_REPLY, + sizeof(cp), &cp); + } else { + struct hci_cp_remote_oob_data_reply cp; + + bacpy(&cp.bdaddr, &ev->bdaddr); + memcpy(cp.hash, data->hash192, sizeof(cp.hash)); + memcpy(cp.rand, data->rand192, sizeof(cp.rand)); + + hci_send_cmd(hdev, HCI_OP_REMOTE_OOB_DATA_REPLY, + sizeof(cp), &cp); + } + +unlock: + hci_dev_unlock(hdev); +} + +#if IS_ENABLED(CONFIG_BT_HS) +static void hci_chan_selected_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_channel_selected *ev = data; + struct hci_conn *hcon; + + bt_dev_dbg(hdev, "handle 0x%2.2x", ev->phy_handle); + + hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); + if (!hcon) + return; + + amp_read_loc_assoc_final_data(hdev, hcon); +} + +static void hci_phy_link_complete_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_phy_link_complete *ev = data; + struct hci_conn *hcon, *bredr_hcon; + + bt_dev_dbg(hdev, "handle 0x%2.2x status 0x%2.2x", ev->phy_handle, + ev->status); + + hci_dev_lock(hdev); + + hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); + if (!hcon) + goto unlock; + + if (!hcon->amp_mgr) + goto unlock; + + if (ev->status) { + hci_conn_del(hcon); + goto unlock; + } + + bredr_hcon = hcon->amp_mgr->l2cap_conn->hcon; + + hcon->state = BT_CONNECTED; + bacpy(&hcon->dst, &bredr_hcon->dst); + + hci_conn_hold(hcon); + hcon->disc_timeout = HCI_DISCONN_TIMEOUT; + hci_conn_drop(hcon); + + hci_debugfs_create_conn(hcon); + hci_conn_add_sysfs(hcon); + + amp_physical_cfm(bredr_hcon, hcon); + +unlock: + hci_dev_unlock(hdev); +} + +static void hci_loglink_complete_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_logical_link_complete *ev = data; + struct hci_conn *hcon; + struct hci_chan *hchan; + struct amp_mgr *mgr; + + bt_dev_dbg(hdev, "log_handle 0x%4.4x phy_handle 0x%2.2x status 0x%2.2x", + le16_to_cpu(ev->handle), ev->phy_handle, ev->status); + + hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); + if (!hcon) + return; + + /* Create AMP hchan */ + hchan = hci_chan_create(hcon); + if (!hchan) + return; + + hchan->handle = le16_to_cpu(ev->handle); + hchan->amp = true; + + BT_DBG("hcon %p mgr %p hchan %p", hcon, hcon->amp_mgr, hchan); + + mgr = hcon->amp_mgr; + if (mgr && mgr->bredr_chan) { + struct l2cap_chan *bredr_chan = mgr->bredr_chan; + + l2cap_chan_lock(bredr_chan); + + bredr_chan->conn->mtu = hdev->block_mtu; + l2cap_logical_cfm(bredr_chan, hchan, 0); + hci_conn_hold(hcon); + + l2cap_chan_unlock(bredr_chan); + } +} + +static void hci_disconn_loglink_complete_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_disconn_logical_link_complete *ev = data; + struct hci_chan *hchan; + + bt_dev_dbg(hdev, "handle 0x%4.4x status 0x%2.2x", + le16_to_cpu(ev->handle), ev->status); + + if (ev->status) + return; + + hci_dev_lock(hdev); + + hchan = hci_chan_lookup_handle(hdev, le16_to_cpu(ev->handle)); + if (!hchan || !hchan->amp) + goto unlock; + + amp_destroy_logical_link(hchan, ev->reason); + +unlock: + hci_dev_unlock(hdev); +} + +static void hci_disconn_phylink_complete_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_disconn_phy_link_complete *ev = data; + struct hci_conn *hcon; + + bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); + + if (ev->status) + return; + + hci_dev_lock(hdev); + + hcon = hci_conn_hash_lookup_handle(hdev, ev->phy_handle); + if (hcon && hcon->type == AMP_LINK) { + hcon->state = BT_CLOSED; + hci_disconn_cfm(hcon, ev->reason); + hci_conn_del(hcon); + } + + hci_dev_unlock(hdev); +} +#endif + +static void le_conn_update_addr(struct hci_conn *conn, bdaddr_t *bdaddr, + u8 bdaddr_type, bdaddr_t *local_rpa) +{ + if (conn->out) { + conn->dst_type = bdaddr_type; + conn->resp_addr_type = bdaddr_type; + bacpy(&conn->resp_addr, bdaddr); + + /* Check if the controller has set a Local RPA then it must be + * used instead or hdev->rpa. + */ + if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) { + conn->init_addr_type = ADDR_LE_DEV_RANDOM; + bacpy(&conn->init_addr, local_rpa); + } else if (hci_dev_test_flag(conn->hdev, HCI_PRIVACY)) { + conn->init_addr_type = ADDR_LE_DEV_RANDOM; + bacpy(&conn->init_addr, &conn->hdev->rpa); + } else { + hci_copy_identity_address(conn->hdev, &conn->init_addr, + &conn->init_addr_type); + } + } else { + conn->resp_addr_type = conn->hdev->adv_addr_type; + /* Check if the controller has set a Local RPA then it must be + * used instead or hdev->rpa. + */ + if (local_rpa && bacmp(local_rpa, BDADDR_ANY)) { + conn->resp_addr_type = ADDR_LE_DEV_RANDOM; + bacpy(&conn->resp_addr, local_rpa); + } else if (conn->hdev->adv_addr_type == ADDR_LE_DEV_RANDOM) { + /* In case of ext adv, resp_addr will be updated in + * Adv Terminated event. + */ + if (!ext_adv_capable(conn->hdev)) + bacpy(&conn->resp_addr, + &conn->hdev->random_addr); + } else { + bacpy(&conn->resp_addr, &conn->hdev->bdaddr); + } + + conn->init_addr_type = bdaddr_type; + bacpy(&conn->init_addr, bdaddr); + + /* For incoming connections, set the default minimum + * and maximum connection interval. They will be used + * to check if the parameters are in range and if not + * trigger the connection update procedure. + */ + conn->le_conn_min_interval = conn->hdev->le_conn_min_interval; + conn->le_conn_max_interval = conn->hdev->le_conn_max_interval; + } +} + +static void le_conn_complete_evt(struct hci_dev *hdev, u8 status, + bdaddr_t *bdaddr, u8 bdaddr_type, + bdaddr_t *local_rpa, u8 role, u16 handle, + u16 interval, u16 latency, + u16 supervision_timeout) +{ + struct hci_conn_params *params; + struct hci_conn *conn; + struct smp_irk *irk; + u8 addr_type; + + hci_dev_lock(hdev); + + /* All controllers implicitly stop advertising in the event of a + * connection, so ensure that the state bit is cleared. + */ + hci_dev_clear_flag(hdev, HCI_LE_ADV); + + conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, bdaddr); + if (!conn) { + /* In case of error status and there is no connection pending + * just unlock as there is nothing to cleanup. + */ + if (status) + goto unlock; + + conn = hci_conn_add(hdev, LE_LINK, bdaddr, role); + if (!conn) { + bt_dev_err(hdev, "no memory for new connection"); + goto unlock; + } + + conn->dst_type = bdaddr_type; + + /* If we didn't have a hci_conn object previously + * but we're in central role this must be something + * initiated using an accept list. Since accept list based + * connections are not "first class citizens" we don't + * have full tracking of them. Therefore, we go ahead + * with a "best effort" approach of determining the + * initiator address based on the HCI_PRIVACY flag. + */ + if (conn->out) { + conn->resp_addr_type = bdaddr_type; + bacpy(&conn->resp_addr, bdaddr); + if (hci_dev_test_flag(hdev, HCI_PRIVACY)) { + conn->init_addr_type = ADDR_LE_DEV_RANDOM; + bacpy(&conn->init_addr, &hdev->rpa); + } else { + hci_copy_identity_address(hdev, + &conn->init_addr, + &conn->init_addr_type); + } + } + } else { + cancel_delayed_work(&conn->le_conn_timeout); + } + + /* The HCI_LE_Connection_Complete event is only sent once per connection. + * Processing it more than once per connection can corrupt kernel memory. + * + * As the connection handle is set here for the first time, it indicates + * whether the connection is already set up. + */ + if (conn->handle != HCI_CONN_HANDLE_UNSET) { + bt_dev_err(hdev, "Ignoring HCI_Connection_Complete for existing connection"); + goto unlock; + } + + le_conn_update_addr(conn, bdaddr, bdaddr_type, local_rpa); + + /* Lookup the identity address from the stored connection + * address and address type. + * + * When establishing connections to an identity address, the + * connection procedure will store the resolvable random + * address first. Now if it can be converted back into the + * identity address, start using the identity address from + * now on. + */ + irk = hci_get_irk(hdev, &conn->dst, conn->dst_type); + if (irk) { + bacpy(&conn->dst, &irk->bdaddr); + conn->dst_type = irk->addr_type; + } + + conn->dst_type = ev_bdaddr_type(hdev, conn->dst_type, NULL); + + if (handle > HCI_CONN_HANDLE_MAX) { + bt_dev_err(hdev, "Invalid handle: 0x%4.4x > 0x%4.4x", handle, + HCI_CONN_HANDLE_MAX); + status = HCI_ERROR_INVALID_PARAMETERS; + } + + /* All connection failure handling is taken care of by the + * hci_conn_failed function which is triggered by the HCI + * request completion callbacks used for connecting. + */ + if (status) + goto unlock; + + /* Drop the connection if it has been aborted */ + if (test_bit(HCI_CONN_CANCEL, &conn->flags)) { + hci_conn_drop(conn); + goto unlock; + } + + if (conn->dst_type == ADDR_LE_DEV_PUBLIC) + addr_type = BDADDR_LE_PUBLIC; + else + addr_type = BDADDR_LE_RANDOM; + + /* Drop the connection if the device is blocked */ + if (hci_bdaddr_list_lookup(&hdev->reject_list, &conn->dst, addr_type)) { + hci_conn_drop(conn); + goto unlock; + } + + if (!test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags)) + mgmt_device_connected(hdev, conn, NULL, 0); + + conn->sec_level = BT_SECURITY_LOW; + conn->handle = handle; + conn->state = BT_CONFIG; + + /* Store current advertising instance as connection advertising instance + * when sotfware rotation is in use so it can be re-enabled when + * disconnected. + */ + if (!ext_adv_capable(hdev)) + conn->adv_instance = hdev->cur_adv_instance; + + conn->le_conn_interval = interval; + conn->le_conn_latency = latency; + conn->le_supv_timeout = supervision_timeout; + + hci_debugfs_create_conn(conn); + hci_conn_add_sysfs(conn); + + /* The remote features procedure is defined for central + * role only. So only in case of an initiated connection + * request the remote features. + * + * If the local controller supports peripheral-initiated features + * exchange, then requesting the remote features in peripheral + * role is possible. Otherwise just transition into the + * connected state without requesting the remote features. + */ + if (conn->out || + (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) { + struct hci_cp_le_read_remote_features cp; + + cp.handle = __cpu_to_le16(conn->handle); + + hci_send_cmd(hdev, HCI_OP_LE_READ_REMOTE_FEATURES, + sizeof(cp), &cp); + + hci_conn_hold(conn); + } else { + conn->state = BT_CONNECTED; + hci_connect_cfm(conn, status); + } + + params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst, + conn->dst_type); + if (params) { + hci_pend_le_list_del_init(params); + if (params->conn) { + hci_conn_drop(params->conn); + hci_conn_put(params->conn); + params->conn = NULL; + } + } + +unlock: + hci_update_passive_scan(hdev); + hci_dev_unlock(hdev); +} + +static void hci_le_conn_complete_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_le_conn_complete *ev = data; + + bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); + + le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type, + NULL, ev->role, le16_to_cpu(ev->handle), + le16_to_cpu(ev->interval), + le16_to_cpu(ev->latency), + le16_to_cpu(ev->supervision_timeout)); +} + +static void hci_le_enh_conn_complete_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_le_enh_conn_complete *ev = data; + + bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); + + le_conn_complete_evt(hdev, ev->status, &ev->bdaddr, ev->bdaddr_type, + &ev->local_rpa, ev->role, le16_to_cpu(ev->handle), + le16_to_cpu(ev->interval), + le16_to_cpu(ev->latency), + le16_to_cpu(ev->supervision_timeout)); +} + +static void hci_le_ext_adv_term_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_evt_le_ext_adv_set_term *ev = data; + struct hci_conn *conn; + struct adv_info *adv, *n; + + bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); + + /* The Bluetooth Core 5.3 specification clearly states that this event + * shall not be sent when the Host disables the advertising set. So in + * case of HCI_ERROR_CANCELLED_BY_HOST, just ignore the event. + * + * When the Host disables an advertising set, all cleanup is done via + * its command callback and not needed to be duplicated here. + */ + if (ev->status == HCI_ERROR_CANCELLED_BY_HOST) { + bt_dev_warn_ratelimited(hdev, "Unexpected advertising set terminated event"); + return; + } + + hci_dev_lock(hdev); + + adv = hci_find_adv_instance(hdev, ev->handle); + + if (ev->status) { + if (!adv) + goto unlock; + + /* Remove advertising as it has been terminated */ + hci_remove_adv_instance(hdev, ev->handle); + mgmt_advertising_removed(NULL, hdev, ev->handle); + + list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) { + if (adv->enabled) + goto unlock; + } + + /* We are no longer advertising, clear HCI_LE_ADV */ + hci_dev_clear_flag(hdev, HCI_LE_ADV); + goto unlock; + } + + if (adv) + adv->enabled = false; + + conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->conn_handle)); + if (conn) { + /* Store handle in the connection so the correct advertising + * instance can be re-enabled when disconnected. + */ + conn->adv_instance = ev->handle; + + if (hdev->adv_addr_type != ADDR_LE_DEV_RANDOM || + bacmp(&conn->resp_addr, BDADDR_ANY)) + goto unlock; + + if (!ev->handle) { + bacpy(&conn->resp_addr, &hdev->random_addr); + goto unlock; + } + + if (adv) + bacpy(&conn->resp_addr, &adv->random_addr); + } + +unlock: + hci_dev_unlock(hdev); +} + +static void hci_le_conn_update_complete_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_le_conn_update_complete *ev = data; + struct hci_conn *conn; + + bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); + + if (ev->status) + return; + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); + if (conn) { + conn->le_conn_interval = le16_to_cpu(ev->interval); + conn->le_conn_latency = le16_to_cpu(ev->latency); + conn->le_supv_timeout = le16_to_cpu(ev->supervision_timeout); + } + + hci_dev_unlock(hdev); +} + +/* This function requires the caller holds hdev->lock */ +static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev, + bdaddr_t *addr, + u8 addr_type, bool addr_resolved, + u8 adv_type) +{ + struct hci_conn *conn; + struct hci_conn_params *params; + + /* If the event is not connectable don't proceed further */ + if (adv_type != LE_ADV_IND && adv_type != LE_ADV_DIRECT_IND) + return NULL; + + /* Ignore if the device is blocked or hdev is suspended */ + if (hci_bdaddr_list_lookup(&hdev->reject_list, addr, addr_type) || + hdev->suspended) + return NULL; + + /* Most controller will fail if we try to create new connections + * while we have an existing one in peripheral role. + */ + if (hdev->conn_hash.le_num_peripheral > 0 && + (!test_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks) || + !(hdev->le_states[3] & 0x10))) + return NULL; + + /* If we're not connectable only connect devices that we have in + * our pend_le_conns list. + */ + params = hci_pend_le_action_lookup(&hdev->pend_le_conns, addr, + addr_type); + if (!params) + return NULL; + + if (!params->explicit_connect) { + switch (params->auto_connect) { + case HCI_AUTO_CONN_DIRECT: + /* Only devices advertising with ADV_DIRECT_IND are + * triggering a connection attempt. This is allowing + * incoming connections from peripheral devices. + */ + if (adv_type != LE_ADV_DIRECT_IND) + return NULL; + break; + case HCI_AUTO_CONN_ALWAYS: + /* Devices advertising with ADV_IND or ADV_DIRECT_IND + * are triggering a connection attempt. This means + * that incoming connections from peripheral device are + * accepted and also outgoing connections to peripheral + * devices are established when found. + */ + break; + default: + return NULL; + } + } + + conn = hci_connect_le(hdev, addr, addr_type, addr_resolved, + BT_SECURITY_LOW, hdev->def_le_autoconnect_timeout, + HCI_ROLE_MASTER); + if (!IS_ERR(conn)) { + /* If HCI_AUTO_CONN_EXPLICIT is set, conn is already owned + * by higher layer that tried to connect, if no then + * store the pointer since we don't really have any + * other owner of the object besides the params that + * triggered it. This way we can abort the connection if + * the parameters get removed and keep the reference + * count consistent once the connection is established. + */ + + if (!params->explicit_connect) + params->conn = hci_conn_get(conn); + + return conn; + } + + switch (PTR_ERR(conn)) { + case -EBUSY: + /* If hci_connect() returns -EBUSY it means there is already + * an LE connection attempt going on. Since controllers don't + * support more than one connection attempt at the time, we + * don't consider this an error case. + */ + break; + default: + BT_DBG("Failed to connect: err %ld", PTR_ERR(conn)); + return NULL; + } + + return NULL; +} + +static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr, + u8 bdaddr_type, bdaddr_t *direct_addr, + u8 direct_addr_type, s8 rssi, u8 *data, u8 len, + bool ext_adv, bool ctl_time, u64 instant) +{ + struct discovery_state *d = &hdev->discovery; + struct smp_irk *irk; + struct hci_conn *conn; + bool match, bdaddr_resolved; + u32 flags; + u8 *ptr; + + switch (type) { + case LE_ADV_IND: + case LE_ADV_DIRECT_IND: + case LE_ADV_SCAN_IND: + case LE_ADV_NONCONN_IND: + case LE_ADV_SCAN_RSP: + break; + default: + bt_dev_err_ratelimited(hdev, "unknown advertising packet " + "type: 0x%02x", type); + return; + } + + if (!ext_adv && len > HCI_MAX_AD_LENGTH) { + bt_dev_err_ratelimited(hdev, "legacy adv larger than 31 bytes"); + return; + } + + /* Find the end of the data in case the report contains padded zero + * bytes at the end causing an invalid length value. + * + * When data is NULL, len is 0 so there is no need for extra ptr + * check as 'ptr < data + 0' is already false in such case. + */ + for (ptr = data; ptr < data + len && *ptr; ptr += *ptr + 1) { + if (ptr + 1 + *ptr > data + len) + break; + } + + /* Adjust for actual length. This handles the case when remote + * device is advertising with incorrect data length. + */ + len = ptr - data; + + /* If the direct address is present, then this report is from + * a LE Direct Advertising Report event. In that case it is + * important to see if the address is matching the local + * controller address. + */ + if (!hci_dev_test_flag(hdev, HCI_MESH) && direct_addr) { + direct_addr_type = ev_bdaddr_type(hdev, direct_addr_type, + &bdaddr_resolved); + + /* Only resolvable random addresses are valid for these + * kind of reports and others can be ignored. + */ + if (!hci_bdaddr_is_rpa(direct_addr, direct_addr_type)) + return; + + /* If the controller is not using resolvable random + * addresses, then this report can be ignored. + */ + if (!hci_dev_test_flag(hdev, HCI_PRIVACY)) + return; + + /* If the local IRK of the controller does not match + * with the resolvable random address provided, then + * this report can be ignored. + */ + if (!smp_irk_matches(hdev, hdev->irk, direct_addr)) + return; + } + + /* Check if we need to convert to identity address */ + irk = hci_get_irk(hdev, bdaddr, bdaddr_type); + if (irk) { + bdaddr = &irk->bdaddr; + bdaddr_type = irk->addr_type; + } + + bdaddr_type = ev_bdaddr_type(hdev, bdaddr_type, &bdaddr_resolved); + + /* Check if we have been requested to connect to this device. + * + * direct_addr is set only for directed advertising reports (it is NULL + * for advertising reports) and is already verified to be RPA above. + */ + conn = check_pending_le_conn(hdev, bdaddr, bdaddr_type, bdaddr_resolved, + type); + if (!ext_adv && conn && type == LE_ADV_IND && len <= HCI_MAX_AD_LENGTH) { + /* Store report for later inclusion by + * mgmt_device_connected + */ + memcpy(conn->le_adv_data, data, len); + conn->le_adv_data_len = len; + } + + if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND) + flags = MGMT_DEV_FOUND_NOT_CONNECTABLE; + else + flags = 0; + + /* All scan results should be sent up for Mesh systems */ + if (hci_dev_test_flag(hdev, HCI_MESH)) { + mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, + rssi, flags, data, len, NULL, 0, instant); + return; + } + + /* Passive scanning shouldn't trigger any device found events, + * except for devices marked as CONN_REPORT for which we do send + * device found events, or advertisement monitoring requested. + */ + if (hdev->le_scan_type == LE_SCAN_PASSIVE) { + if (type == LE_ADV_DIRECT_IND) + return; + + if (!hci_pend_le_action_lookup(&hdev->pend_le_reports, + bdaddr, bdaddr_type) && + idr_is_empty(&hdev->adv_monitors_idr)) + return; + + mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, + rssi, flags, data, len, NULL, 0, 0); + return; + } + + /* When receiving a scan response, then there is no way to + * know if the remote device is connectable or not. However + * since scan responses are merged with a previously seen + * advertising report, the flags field from that report + * will be used. + * + * In the unlikely case that a controller just sends a scan + * response event that doesn't match the pending report, then + * it is marked as a standalone SCAN_RSP. + */ + if (type == LE_ADV_SCAN_RSP) + flags = MGMT_DEV_FOUND_SCAN_RSP; + + /* If there's nothing pending either store the data from this + * event or send an immediate device found event if the data + * should not be stored for later. + */ + if (!ext_adv && !has_pending_adv_report(hdev)) { + /* If the report will trigger a SCAN_REQ store it for + * later merging. + */ + if (type == LE_ADV_IND || type == LE_ADV_SCAN_IND) { + store_pending_adv_report(hdev, bdaddr, bdaddr_type, + rssi, flags, data, len); + return; + } + + mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, + rssi, flags, data, len, NULL, 0, 0); + return; + } + + /* Check if the pending report is for the same device as the new one */ + match = (!bacmp(bdaddr, &d->last_adv_addr) && + bdaddr_type == d->last_adv_addr_type); + + /* If the pending data doesn't match this report or this isn't a + * scan response (e.g. we got a duplicate ADV_IND) then force + * sending of the pending data. + */ + if (type != LE_ADV_SCAN_RSP || !match) { + /* Send out whatever is in the cache, but skip duplicates */ + if (!match) + mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK, + d->last_adv_addr_type, NULL, + d->last_adv_rssi, d->last_adv_flags, + d->last_adv_data, + d->last_adv_data_len, NULL, 0, 0); + + /* If the new report will trigger a SCAN_REQ store it for + * later merging. + */ + if (!ext_adv && (type == LE_ADV_IND || + type == LE_ADV_SCAN_IND)) { + store_pending_adv_report(hdev, bdaddr, bdaddr_type, + rssi, flags, data, len); + return; + } + + /* The advertising reports cannot be merged, so clear + * the pending report and send out a device found event. + */ + clear_pending_adv_report(hdev); + mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, + rssi, flags, data, len, NULL, 0, 0); + return; + } + + /* If we get here we've got a pending ADV_IND or ADV_SCAN_IND and + * the new event is a SCAN_RSP. We can therefore proceed with + * sending a merged device found event. + */ + mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK, + d->last_adv_addr_type, NULL, rssi, d->last_adv_flags, + d->last_adv_data, d->last_adv_data_len, data, len, 0); + clear_pending_adv_report(hdev); +} + +static void hci_le_adv_report_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_le_advertising_report *ev = data; + u64 instant = jiffies; + + if (!ev->num) + return; + + hci_dev_lock(hdev); + + while (ev->num--) { + struct hci_ev_le_advertising_info *info; + s8 rssi; + + info = hci_le_ev_skb_pull(hdev, skb, + HCI_EV_LE_ADVERTISING_REPORT, + sizeof(*info)); + if (!info) + break; + + if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_ADVERTISING_REPORT, + info->length + 1)) + break; + + if (info->length <= HCI_MAX_AD_LENGTH) { + rssi = info->data[info->length]; + process_adv_report(hdev, info->type, &info->bdaddr, + info->bdaddr_type, NULL, 0, rssi, + info->data, info->length, false, + false, instant); + } else { + bt_dev_err(hdev, "Dropping invalid advertising data"); + } + } + + hci_dev_unlock(hdev); +} + +static u8 ext_evt_type_to_legacy(struct hci_dev *hdev, u16 evt_type) +{ + if (evt_type & LE_EXT_ADV_LEGACY_PDU) { + switch (evt_type) { + case LE_LEGACY_ADV_IND: + return LE_ADV_IND; + case LE_LEGACY_ADV_DIRECT_IND: + return LE_ADV_DIRECT_IND; + case LE_LEGACY_ADV_SCAN_IND: + return LE_ADV_SCAN_IND; + case LE_LEGACY_NONCONN_IND: + return LE_ADV_NONCONN_IND; + case LE_LEGACY_SCAN_RSP_ADV: + case LE_LEGACY_SCAN_RSP_ADV_SCAN: + return LE_ADV_SCAN_RSP; + } + + goto invalid; + } + + if (evt_type & LE_EXT_ADV_CONN_IND) { + if (evt_type & LE_EXT_ADV_DIRECT_IND) + return LE_ADV_DIRECT_IND; + + return LE_ADV_IND; + } + + if (evt_type & LE_EXT_ADV_SCAN_RSP) + return LE_ADV_SCAN_RSP; + + if (evt_type & LE_EXT_ADV_SCAN_IND) + return LE_ADV_SCAN_IND; + + if (evt_type == LE_EXT_ADV_NON_CONN_IND || + evt_type & LE_EXT_ADV_DIRECT_IND) + return LE_ADV_NONCONN_IND; + +invalid: + bt_dev_err_ratelimited(hdev, "Unknown advertising packet type: 0x%02x", + evt_type); + + return LE_ADV_INVALID; +} + +static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_le_ext_adv_report *ev = data; + u64 instant = jiffies; + + if (!ev->num) + return; + + hci_dev_lock(hdev); + + while (ev->num--) { + struct hci_ev_le_ext_adv_info *info; + u8 legacy_evt_type; + u16 evt_type; + + info = hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT, + sizeof(*info)); + if (!info) + break; + + if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_EXT_ADV_REPORT, + info->length)) + break; + + evt_type = __le16_to_cpu(info->type); + legacy_evt_type = ext_evt_type_to_legacy(hdev, evt_type); + if (legacy_evt_type != LE_ADV_INVALID) { + process_adv_report(hdev, legacy_evt_type, &info->bdaddr, + info->bdaddr_type, NULL, 0, + info->rssi, info->data, info->length, + !(evt_type & LE_EXT_ADV_LEGACY_PDU), + false, instant); + } + } + + hci_dev_unlock(hdev); +} + +static int hci_le_pa_term_sync(struct hci_dev *hdev, __le16 handle) +{ + struct hci_cp_le_pa_term_sync cp; + + memset(&cp, 0, sizeof(cp)); + cp.handle = handle; + + return hci_send_cmd(hdev, HCI_OP_LE_PA_TERM_SYNC, sizeof(cp), &cp); +} + +static void hci_le_pa_sync_estabilished_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_le_pa_sync_established *ev = data; + int mask = hdev->link_mode; + __u8 flags = 0; + + bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); + + if (ev->status) + return; + + hci_dev_lock(hdev); + + hci_dev_clear_flag(hdev, HCI_PA_SYNC); + + mask |= hci_proto_connect_ind(hdev, &ev->bdaddr, ISO_LINK, &flags); + if (!(mask & HCI_LM_ACCEPT)) + hci_le_pa_term_sync(hdev, ev->handle); + + hci_dev_unlock(hdev); +} + +static void hci_le_remote_feat_complete_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_le_remote_feat_complete *ev = data; + struct hci_conn *conn; + + bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); + if (conn) { + if (!ev->status) + memcpy(conn->features[0], ev->features, 8); + + if (conn->state == BT_CONFIG) { + __u8 status; + + /* If the local controller supports peripheral-initiated + * features exchange, but the remote controller does + * not, then it is possible that the error code 0x1a + * for unsupported remote feature gets returned. + * + * In this specific case, allow the connection to + * transition into connected state and mark it as + * successful. + */ + if (!conn->out && ev->status == 0x1a && + (hdev->le_features[0] & HCI_LE_PERIPHERAL_FEATURES)) + status = 0x00; + else + status = ev->status; + + conn->state = BT_CONNECTED; + hci_connect_cfm(conn, status); + hci_conn_drop(conn); + } + } + + hci_dev_unlock(hdev); +} + +static void hci_le_ltk_request_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_le_ltk_req *ev = data; + struct hci_cp_le_ltk_reply cp; + struct hci_cp_le_ltk_neg_reply neg; + struct hci_conn *conn; + struct smp_ltk *ltk; + + bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle)); + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); + if (conn == NULL) + goto not_found; + + ltk = hci_find_ltk(hdev, &conn->dst, conn->dst_type, conn->role); + if (!ltk) + goto not_found; + + if (smp_ltk_is_sc(ltk)) { + /* With SC both EDiv and Rand are set to zero */ + if (ev->ediv || ev->rand) + goto not_found; + } else { + /* For non-SC keys check that EDiv and Rand match */ + if (ev->ediv != ltk->ediv || ev->rand != ltk->rand) + goto not_found; + } + + memcpy(cp.ltk, ltk->val, ltk->enc_size); + memset(cp.ltk + ltk->enc_size, 0, sizeof(cp.ltk) - ltk->enc_size); + cp.handle = cpu_to_le16(conn->handle); + + conn->pending_sec_level = smp_ltk_sec_level(ltk); + + conn->enc_key_size = ltk->enc_size; + + hci_send_cmd(hdev, HCI_OP_LE_LTK_REPLY, sizeof(cp), &cp); + + /* Ref. Bluetooth Core SPEC pages 1975 and 2004. STK is a + * temporary key used to encrypt a connection following + * pairing. It is used during the Encrypted Session Setup to + * distribute the keys. Later, security can be re-established + * using a distributed LTK. + */ + if (ltk->type == SMP_STK) { + set_bit(HCI_CONN_STK_ENCRYPT, &conn->flags); + list_del_rcu(<k->list); + kfree_rcu(ltk, rcu); + } else { + clear_bit(HCI_CONN_STK_ENCRYPT, &conn->flags); + } + + hci_dev_unlock(hdev); + + return; + +not_found: + neg.handle = ev->handle; + hci_send_cmd(hdev, HCI_OP_LE_LTK_NEG_REPLY, sizeof(neg), &neg); + hci_dev_unlock(hdev); +} + +static void send_conn_param_neg_reply(struct hci_dev *hdev, u16 handle, + u8 reason) +{ + struct hci_cp_le_conn_param_req_neg_reply cp; + + cp.handle = cpu_to_le16(handle); + cp.reason = reason; + + hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_NEG_REPLY, sizeof(cp), + &cp); +} + +static void hci_le_remote_conn_param_req_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_le_remote_conn_param_req *ev = data; + struct hci_cp_le_conn_param_req_reply cp; + struct hci_conn *hcon; + u16 handle, min, max, latency, timeout; + + bt_dev_dbg(hdev, "handle 0x%4.4x", __le16_to_cpu(ev->handle)); + + handle = le16_to_cpu(ev->handle); + min = le16_to_cpu(ev->interval_min); + max = le16_to_cpu(ev->interval_max); + latency = le16_to_cpu(ev->latency); + timeout = le16_to_cpu(ev->timeout); + + hcon = hci_conn_hash_lookup_handle(hdev, handle); + if (!hcon || hcon->state != BT_CONNECTED) + return send_conn_param_neg_reply(hdev, handle, + HCI_ERROR_UNKNOWN_CONN_ID); + + if (hci_check_conn_params(min, max, latency, timeout)) + return send_conn_param_neg_reply(hdev, handle, + HCI_ERROR_INVALID_LL_PARAMS); + + if (hcon->role == HCI_ROLE_MASTER) { + struct hci_conn_params *params; + u8 store_hint; + + hci_dev_lock(hdev); + + params = hci_conn_params_lookup(hdev, &hcon->dst, + hcon->dst_type); + if (params) { + params->conn_min_interval = min; + params->conn_max_interval = max; + params->conn_latency = latency; + params->supervision_timeout = timeout; + store_hint = 0x01; + } else { + store_hint = 0x00; + } + + hci_dev_unlock(hdev); + + mgmt_new_conn_param(hdev, &hcon->dst, hcon->dst_type, + store_hint, min, max, latency, timeout); + } + + cp.handle = ev->handle; + cp.interval_min = ev->interval_min; + cp.interval_max = ev->interval_max; + cp.latency = ev->latency; + cp.timeout = ev->timeout; + cp.min_ce_len = 0; + cp.max_ce_len = 0; + + hci_send_cmd(hdev, HCI_OP_LE_CONN_PARAM_REQ_REPLY, sizeof(cp), &cp); +} + +static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_le_direct_adv_report *ev = data; + u64 instant = jiffies; + int i; + + if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_DIRECT_ADV_REPORT, + flex_array_size(ev, info, ev->num))) + return; + + if (!ev->num) + return; + + hci_dev_lock(hdev); + + for (i = 0; i < ev->num; i++) { + struct hci_ev_le_direct_adv_info *info = &ev->info[i]; + + process_adv_report(hdev, info->type, &info->bdaddr, + info->bdaddr_type, &info->direct_addr, + info->direct_addr_type, info->rssi, NULL, 0, + false, false, instant); + } + + hci_dev_unlock(hdev); +} + +static void hci_le_phy_update_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_ev_le_phy_update_complete *ev = data; + struct hci_conn *conn; + + bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); + + if (ev->status) + return; + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(ev->handle)); + if (!conn) + goto unlock; + + conn->le_tx_phy = ev->tx_phy; + conn->le_rx_phy = ev->rx_phy; + +unlock: + hci_dev_unlock(hdev); +} + +static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_evt_le_cis_established *ev = data; + struct hci_conn *conn; + u16 handle = __le16_to_cpu(ev->handle); + + bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_handle(hdev, handle); + if (!conn) { + bt_dev_err(hdev, + "Unable to find connection with handle 0x%4.4x", + handle); + goto unlock; + } + + if (conn->type != ISO_LINK) { + bt_dev_err(hdev, + "Invalid connection link type handle 0x%4.4x", + handle); + goto unlock; + } + + if (conn->role == HCI_ROLE_SLAVE) { + __le32 interval; + + memset(&interval, 0, sizeof(interval)); + + memcpy(&interval, ev->c_latency, sizeof(ev->c_latency)); + conn->iso_qos.in.interval = le32_to_cpu(interval); + memcpy(&interval, ev->p_latency, sizeof(ev->p_latency)); + conn->iso_qos.out.interval = le32_to_cpu(interval); + conn->iso_qos.in.latency = le16_to_cpu(ev->interval); + conn->iso_qos.out.latency = le16_to_cpu(ev->interval); + conn->iso_qos.in.sdu = le16_to_cpu(ev->c_mtu); + conn->iso_qos.out.sdu = le16_to_cpu(ev->p_mtu); + conn->iso_qos.in.phy = ev->c_phy; + conn->iso_qos.out.phy = ev->p_phy; + } + + if (!ev->status) { + conn->state = BT_CONNECTED; + hci_debugfs_create_conn(conn); + hci_conn_add_sysfs(conn); + hci_iso_setup_path(conn); + goto unlock; + } + + hci_connect_cfm(conn, ev->status); + hci_conn_del(conn); + +unlock: + hci_dev_unlock(hdev); +} + +static void hci_le_reject_cis(struct hci_dev *hdev, __le16 handle) +{ + struct hci_cp_le_reject_cis cp; + + memset(&cp, 0, sizeof(cp)); + cp.handle = handle; + cp.reason = HCI_ERROR_REJ_BAD_ADDR; + hci_send_cmd(hdev, HCI_OP_LE_REJECT_CIS, sizeof(cp), &cp); +} + +static void hci_le_accept_cis(struct hci_dev *hdev, __le16 handle) +{ + struct hci_cp_le_accept_cis cp; + + memset(&cp, 0, sizeof(cp)); + cp.handle = handle; + hci_send_cmd(hdev, HCI_OP_LE_ACCEPT_CIS, sizeof(cp), &cp); +} + +static void hci_le_cis_req_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_evt_le_cis_req *ev = data; + u16 acl_handle, cis_handle; + struct hci_conn *acl, *cis; + int mask; + __u8 flags = 0; + + acl_handle = __le16_to_cpu(ev->acl_handle); + cis_handle = __le16_to_cpu(ev->cis_handle); + + bt_dev_dbg(hdev, "acl 0x%4.4x handle 0x%4.4x cig 0x%2.2x cis 0x%2.2x", + acl_handle, cis_handle, ev->cig_id, ev->cis_id); + + hci_dev_lock(hdev); + + acl = hci_conn_hash_lookup_handle(hdev, acl_handle); + if (!acl) + goto unlock; + + mask = hci_proto_connect_ind(hdev, &acl->dst, ISO_LINK, &flags); + if (!(mask & HCI_LM_ACCEPT)) { + hci_le_reject_cis(hdev, ev->cis_handle); + goto unlock; + } + + cis = hci_conn_hash_lookup_handle(hdev, cis_handle); + if (!cis) { + cis = hci_conn_add(hdev, ISO_LINK, &acl->dst, HCI_ROLE_SLAVE); + if (!cis) { + hci_le_reject_cis(hdev, ev->cis_handle); + goto unlock; + } + cis->handle = cis_handle; + } + + cis->iso_qos.cig = ev->cig_id; + cis->iso_qos.cis = ev->cis_id; + + if (!(flags & HCI_PROTO_DEFER)) { + hci_le_accept_cis(hdev, ev->cis_handle); + } else { + cis->state = BT_CONNECT2; + hci_connect_cfm(cis, 0); + } + +unlock: + hci_dev_unlock(hdev); +} + +static void hci_le_create_big_complete_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_evt_le_create_big_complete *ev = data; + struct hci_conn *conn; + + BT_DBG("%s status 0x%2.2x", hdev->name, ev->status); + + if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_CREATE_BIG_COMPLETE, + flex_array_size(ev, bis_handle, ev->num_bis))) + return; + + hci_dev_lock(hdev); + + conn = hci_conn_hash_lookup_big(hdev, ev->handle); + if (!conn) + goto unlock; + + if (conn->type != ISO_LINK) { + bt_dev_err(hdev, + "Invalid connection link type handle 0x%2.2x", + ev->handle); + goto unlock; + } + + if (ev->num_bis) + conn->handle = __le16_to_cpu(ev->bis_handle[0]); + + if (!ev->status) { + conn->state = BT_CONNECTED; + hci_debugfs_create_conn(conn); + hci_conn_add_sysfs(conn); + hci_iso_setup_path(conn); + goto unlock; + } + + hci_connect_cfm(conn, ev->status); + hci_conn_del(conn); + +unlock: + hci_dev_unlock(hdev); +} + +static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_evt_le_big_sync_estabilished *ev = data; + struct hci_conn *bis; + int i; + + bt_dev_dbg(hdev, "status 0x%2.2x", ev->status); + + if (!hci_le_ev_skb_pull(hdev, skb, HCI_EVT_LE_BIG_SYNC_ESTABILISHED, + flex_array_size(ev, bis, ev->num_bis))) + return; + + if (ev->status) + return; + + hci_dev_lock(hdev); + + for (i = 0; i < ev->num_bis; i++) { + u16 handle = le16_to_cpu(ev->bis[i]); + __le32 interval; + + bis = hci_conn_hash_lookup_handle(hdev, handle); + if (!bis) { + bis = hci_conn_add(hdev, ISO_LINK, BDADDR_ANY, + HCI_ROLE_SLAVE); + if (!bis) + continue; + bis->handle = handle; + } + + bis->iso_qos.big = ev->handle; + memset(&interval, 0, sizeof(interval)); + memcpy(&interval, ev->latency, sizeof(ev->latency)); + bis->iso_qos.in.interval = le32_to_cpu(interval); + /* Convert ISO Interval (1.25 ms slots) to latency (ms) */ + bis->iso_qos.in.latency = le16_to_cpu(ev->interval) * 125 / 100; + bis->iso_qos.in.sdu = le16_to_cpu(ev->max_pdu); + + hci_iso_setup_path(bis); + } + + hci_dev_unlock(hdev); +} + +static void hci_le_big_info_adv_report_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) +{ + struct hci_evt_le_big_info_adv_report *ev = data; + int mask = hdev->link_mode; + __u8 flags = 0; + + bt_dev_dbg(hdev, "sync_handle 0x%4.4x", le16_to_cpu(ev->sync_handle)); + + hci_dev_lock(hdev); + + mask |= hci_proto_connect_ind(hdev, BDADDR_ANY, ISO_LINK, &flags); + if (!(mask & HCI_LM_ACCEPT)) + hci_le_pa_term_sync(hdev, ev->sync_handle); + + hci_dev_unlock(hdev); +} + +#define HCI_LE_EV_VL(_op, _func, _min_len, _max_len) \ +[_op] = { \ + .func = _func, \ + .min_len = _min_len, \ + .max_len = _max_len, \ +} + +#define HCI_LE_EV(_op, _func, _len) \ + HCI_LE_EV_VL(_op, _func, _len, _len) + +#define HCI_LE_EV_STATUS(_op, _func) \ + HCI_LE_EV(_op, _func, sizeof(struct hci_ev_status)) + +/* Entries in this table shall have their position according to the subevent + * opcode they handle so the use of the macros above is recommend since it does + * attempt to initialize at its proper index using Designated Initializers that + * way events without a callback function can be ommited. + */ +static const struct hci_le_ev { + void (*func)(struct hci_dev *hdev, void *data, struct sk_buff *skb); + u16 min_len; + u16 max_len; +} hci_le_ev_table[U8_MAX + 1] = { + /* [0x01 = HCI_EV_LE_CONN_COMPLETE] */ + HCI_LE_EV(HCI_EV_LE_CONN_COMPLETE, hci_le_conn_complete_evt, + sizeof(struct hci_ev_le_conn_complete)), + /* [0x02 = HCI_EV_LE_ADVERTISING_REPORT] */ + HCI_LE_EV_VL(HCI_EV_LE_ADVERTISING_REPORT, hci_le_adv_report_evt, + sizeof(struct hci_ev_le_advertising_report), + HCI_MAX_EVENT_SIZE), + /* [0x03 = HCI_EV_LE_CONN_UPDATE_COMPLETE] */ + HCI_LE_EV(HCI_EV_LE_CONN_UPDATE_COMPLETE, + hci_le_conn_update_complete_evt, + sizeof(struct hci_ev_le_conn_update_complete)), + /* [0x04 = HCI_EV_LE_REMOTE_FEAT_COMPLETE] */ + HCI_LE_EV(HCI_EV_LE_REMOTE_FEAT_COMPLETE, + hci_le_remote_feat_complete_evt, + sizeof(struct hci_ev_le_remote_feat_complete)), + /* [0x05 = HCI_EV_LE_LTK_REQ] */ + HCI_LE_EV(HCI_EV_LE_LTK_REQ, hci_le_ltk_request_evt, + sizeof(struct hci_ev_le_ltk_req)), + /* [0x06 = HCI_EV_LE_REMOTE_CONN_PARAM_REQ] */ + HCI_LE_EV(HCI_EV_LE_REMOTE_CONN_PARAM_REQ, + hci_le_remote_conn_param_req_evt, + sizeof(struct hci_ev_le_remote_conn_param_req)), + /* [0x0a = HCI_EV_LE_ENHANCED_CONN_COMPLETE] */ + HCI_LE_EV(HCI_EV_LE_ENHANCED_CONN_COMPLETE, + hci_le_enh_conn_complete_evt, + sizeof(struct hci_ev_le_enh_conn_complete)), + /* [0x0b = HCI_EV_LE_DIRECT_ADV_REPORT] */ + HCI_LE_EV_VL(HCI_EV_LE_DIRECT_ADV_REPORT, hci_le_direct_adv_report_evt, + sizeof(struct hci_ev_le_direct_adv_report), + HCI_MAX_EVENT_SIZE), + /* [0x0c = HCI_EV_LE_PHY_UPDATE_COMPLETE] */ + HCI_LE_EV(HCI_EV_LE_PHY_UPDATE_COMPLETE, hci_le_phy_update_evt, + sizeof(struct hci_ev_le_phy_update_complete)), + /* [0x0d = HCI_EV_LE_EXT_ADV_REPORT] */ + HCI_LE_EV_VL(HCI_EV_LE_EXT_ADV_REPORT, hci_le_ext_adv_report_evt, + sizeof(struct hci_ev_le_ext_adv_report), + HCI_MAX_EVENT_SIZE), + /* [0x0e = HCI_EV_LE_PA_SYNC_ESTABLISHED] */ + HCI_LE_EV(HCI_EV_LE_PA_SYNC_ESTABLISHED, + hci_le_pa_sync_estabilished_evt, + sizeof(struct hci_ev_le_pa_sync_established)), + /* [0x12 = HCI_EV_LE_EXT_ADV_SET_TERM] */ + HCI_LE_EV(HCI_EV_LE_EXT_ADV_SET_TERM, hci_le_ext_adv_term_evt, + sizeof(struct hci_evt_le_ext_adv_set_term)), + /* [0x19 = HCI_EVT_LE_CIS_ESTABLISHED] */ + HCI_LE_EV(HCI_EVT_LE_CIS_ESTABLISHED, hci_le_cis_estabilished_evt, + sizeof(struct hci_evt_le_cis_established)), + /* [0x1a = HCI_EVT_LE_CIS_REQ] */ + HCI_LE_EV(HCI_EVT_LE_CIS_REQ, hci_le_cis_req_evt, + sizeof(struct hci_evt_le_cis_req)), + /* [0x1b = HCI_EVT_LE_CREATE_BIG_COMPLETE] */ + HCI_LE_EV_VL(HCI_EVT_LE_CREATE_BIG_COMPLETE, + hci_le_create_big_complete_evt, + sizeof(struct hci_evt_le_create_big_complete), + HCI_MAX_EVENT_SIZE), + /* [0x1d = HCI_EV_LE_BIG_SYNC_ESTABILISHED] */ + HCI_LE_EV_VL(HCI_EVT_LE_BIG_SYNC_ESTABILISHED, + hci_le_big_sync_established_evt, + sizeof(struct hci_evt_le_big_sync_estabilished), + HCI_MAX_EVENT_SIZE), + /* [0x22 = HCI_EVT_LE_BIG_INFO_ADV_REPORT] */ + HCI_LE_EV_VL(HCI_EVT_LE_BIG_INFO_ADV_REPORT, + hci_le_big_info_adv_report_evt, + sizeof(struct hci_evt_le_big_info_adv_report), + HCI_MAX_EVENT_SIZE), +}; + +static void hci_le_meta_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb, u16 *opcode, u8 *status, + hci_req_complete_t *req_complete, + hci_req_complete_skb_t *req_complete_skb) +{ + struct hci_ev_le_meta *ev = data; + const struct hci_le_ev *subev; + + bt_dev_dbg(hdev, "subevent 0x%2.2x", ev->subevent); + + /* Only match event if command OGF is for LE */ + if (hdev->sent_cmd && + hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) == 0x08 && + hci_skb_event(hdev->sent_cmd) == ev->subevent) { + *opcode = hci_skb_opcode(hdev->sent_cmd); + hci_req_cmd_complete(hdev, *opcode, 0x00, req_complete, + req_complete_skb); + } + + subev = &hci_le_ev_table[ev->subevent]; + if (!subev->func) + return; + + if (skb->len < subev->min_len) { + bt_dev_err(hdev, "unexpected subevent 0x%2.2x length: %u < %u", + ev->subevent, skb->len, subev->min_len); + return; + } + + /* Just warn if the length is over max_len size it still be + * possible to partially parse the event so leave to callback to + * decide if that is acceptable. + */ + if (skb->len > subev->max_len) + bt_dev_warn(hdev, "unexpected subevent 0x%2.2x length: %u > %u", + ev->subevent, skb->len, subev->max_len); + data = hci_le_ev_skb_pull(hdev, skb, ev->subevent, subev->min_len); + if (!data) + return; + + subev->func(hdev, data, skb); +} + +static bool hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode, + u8 event, struct sk_buff *skb) +{ + struct hci_ev_cmd_complete *ev; + struct hci_event_hdr *hdr; + + if (!skb) + return false; + + hdr = hci_ev_skb_pull(hdev, skb, event, sizeof(*hdr)); + if (!hdr) + return false; + + if (event) { + if (hdr->evt != event) + return false; + return true; + } + + /* Check if request ended in Command Status - no way to retrieve + * any extra parameters in this case. + */ + if (hdr->evt == HCI_EV_CMD_STATUS) + return false; + + if (hdr->evt != HCI_EV_CMD_COMPLETE) { + bt_dev_err(hdev, "last event is not cmd complete (0x%2.2x)", + hdr->evt); + return false; + } + + ev = hci_cc_skb_pull(hdev, skb, opcode, sizeof(*ev)); + if (!ev) + return false; + + if (opcode != __le16_to_cpu(ev->opcode)) { + BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode, + __le16_to_cpu(ev->opcode)); + return false; + } + + return true; +} + +static void hci_store_wake_reason(struct hci_dev *hdev, u8 event, + struct sk_buff *skb) +{ + struct hci_ev_le_advertising_info *adv; + struct hci_ev_le_direct_adv_info *direct_adv; + struct hci_ev_le_ext_adv_info *ext_adv; + const struct hci_ev_conn_complete *conn_complete = (void *)skb->data; + const struct hci_ev_conn_request *conn_request = (void *)skb->data; + + hci_dev_lock(hdev); + + /* If we are currently suspended and this is the first BT event seen, + * save the wake reason associated with the event. + */ + if (!hdev->suspended || hdev->wake_reason) + goto unlock; + + /* Default to remote wake. Values for wake_reason are documented in the + * Bluez mgmt api docs. + */ + hdev->wake_reason = MGMT_WAKE_REASON_REMOTE_WAKE; + + /* Once configured for remote wakeup, we should only wake up for + * reconnections. It's useful to see which device is waking us up so + * keep track of the bdaddr of the connection event that woke us up. + */ + if (event == HCI_EV_CONN_REQUEST) { + bacpy(&hdev->wake_addr, &conn_complete->bdaddr); + hdev->wake_addr_type = BDADDR_BREDR; + } else if (event == HCI_EV_CONN_COMPLETE) { + bacpy(&hdev->wake_addr, &conn_request->bdaddr); + hdev->wake_addr_type = BDADDR_BREDR; + } else if (event == HCI_EV_LE_META) { + struct hci_ev_le_meta *le_ev = (void *)skb->data; + u8 subevent = le_ev->subevent; + u8 *ptr = &skb->data[sizeof(*le_ev)]; + u8 num_reports = *ptr; + + if ((subevent == HCI_EV_LE_ADVERTISING_REPORT || + subevent == HCI_EV_LE_DIRECT_ADV_REPORT || + subevent == HCI_EV_LE_EXT_ADV_REPORT) && + num_reports) { + adv = (void *)(ptr + 1); + direct_adv = (void *)(ptr + 1); + ext_adv = (void *)(ptr + 1); + + switch (subevent) { + case HCI_EV_LE_ADVERTISING_REPORT: + bacpy(&hdev->wake_addr, &adv->bdaddr); + hdev->wake_addr_type = adv->bdaddr_type; + break; + case HCI_EV_LE_DIRECT_ADV_REPORT: + bacpy(&hdev->wake_addr, &direct_adv->bdaddr); + hdev->wake_addr_type = direct_adv->bdaddr_type; + break; + case HCI_EV_LE_EXT_ADV_REPORT: + bacpy(&hdev->wake_addr, &ext_adv->bdaddr); + hdev->wake_addr_type = ext_adv->bdaddr_type; + break; + } + } + } else { + hdev->wake_reason = MGMT_WAKE_REASON_UNEXPECTED; + } + +unlock: + hci_dev_unlock(hdev); +} + +#define HCI_EV_VL(_op, _func, _min_len, _max_len) \ +[_op] = { \ + .req = false, \ + .func = _func, \ + .min_len = _min_len, \ + .max_len = _max_len, \ +} + +#define HCI_EV(_op, _func, _len) \ + HCI_EV_VL(_op, _func, _len, _len) + +#define HCI_EV_STATUS(_op, _func) \ + HCI_EV(_op, _func, sizeof(struct hci_ev_status)) + +#define HCI_EV_REQ_VL(_op, _func, _min_len, _max_len) \ +[_op] = { \ + .req = true, \ + .func_req = _func, \ + .min_len = _min_len, \ + .max_len = _max_len, \ +} + +#define HCI_EV_REQ(_op, _func, _len) \ + HCI_EV_REQ_VL(_op, _func, _len, _len) + +/* Entries in this table shall have their position according to the event opcode + * they handle so the use of the macros above is recommend since it does attempt + * to initialize at its proper index using Designated Initializers that way + * events without a callback function don't have entered. + */ +static const struct hci_ev { + bool req; + union { + void (*func)(struct hci_dev *hdev, void *data, + struct sk_buff *skb); + void (*func_req)(struct hci_dev *hdev, void *data, + struct sk_buff *skb, u16 *opcode, u8 *status, + hci_req_complete_t *req_complete, + hci_req_complete_skb_t *req_complete_skb); + }; + u16 min_len; + u16 max_len; +} hci_ev_table[U8_MAX + 1] = { + /* [0x01 = HCI_EV_INQUIRY_COMPLETE] */ + HCI_EV_STATUS(HCI_EV_INQUIRY_COMPLETE, hci_inquiry_complete_evt), + /* [0x02 = HCI_EV_INQUIRY_RESULT] */ + HCI_EV_VL(HCI_EV_INQUIRY_RESULT, hci_inquiry_result_evt, + sizeof(struct hci_ev_inquiry_result), HCI_MAX_EVENT_SIZE), + /* [0x03 = HCI_EV_CONN_COMPLETE] */ + HCI_EV(HCI_EV_CONN_COMPLETE, hci_conn_complete_evt, + sizeof(struct hci_ev_conn_complete)), + /* [0x04 = HCI_EV_CONN_REQUEST] */ + HCI_EV(HCI_EV_CONN_REQUEST, hci_conn_request_evt, + sizeof(struct hci_ev_conn_request)), + /* [0x05 = HCI_EV_DISCONN_COMPLETE] */ + HCI_EV(HCI_EV_DISCONN_COMPLETE, hci_disconn_complete_evt, + sizeof(struct hci_ev_disconn_complete)), + /* [0x06 = HCI_EV_AUTH_COMPLETE] */ + HCI_EV(HCI_EV_AUTH_COMPLETE, hci_auth_complete_evt, + sizeof(struct hci_ev_auth_complete)), + /* [0x07 = HCI_EV_REMOTE_NAME] */ + HCI_EV(HCI_EV_REMOTE_NAME, hci_remote_name_evt, + sizeof(struct hci_ev_remote_name)), + /* [0x08 = HCI_EV_ENCRYPT_CHANGE] */ + HCI_EV(HCI_EV_ENCRYPT_CHANGE, hci_encrypt_change_evt, + sizeof(struct hci_ev_encrypt_change)), + /* [0x09 = HCI_EV_CHANGE_LINK_KEY_COMPLETE] */ + HCI_EV(HCI_EV_CHANGE_LINK_KEY_COMPLETE, + hci_change_link_key_complete_evt, + sizeof(struct hci_ev_change_link_key_complete)), + /* [0x0b = HCI_EV_REMOTE_FEATURES] */ + HCI_EV(HCI_EV_REMOTE_FEATURES, hci_remote_features_evt, + sizeof(struct hci_ev_remote_features)), + /* [0x0e = HCI_EV_CMD_COMPLETE] */ + HCI_EV_REQ_VL(HCI_EV_CMD_COMPLETE, hci_cmd_complete_evt, + sizeof(struct hci_ev_cmd_complete), HCI_MAX_EVENT_SIZE), + /* [0x0f = HCI_EV_CMD_STATUS] */ + HCI_EV_REQ(HCI_EV_CMD_STATUS, hci_cmd_status_evt, + sizeof(struct hci_ev_cmd_status)), + /* [0x10 = HCI_EV_CMD_STATUS] */ + HCI_EV(HCI_EV_HARDWARE_ERROR, hci_hardware_error_evt, + sizeof(struct hci_ev_hardware_error)), + /* [0x12 = HCI_EV_ROLE_CHANGE] */ + HCI_EV(HCI_EV_ROLE_CHANGE, hci_role_change_evt, + sizeof(struct hci_ev_role_change)), + /* [0x13 = HCI_EV_NUM_COMP_PKTS] */ + HCI_EV_VL(HCI_EV_NUM_COMP_PKTS, hci_num_comp_pkts_evt, + sizeof(struct hci_ev_num_comp_pkts), HCI_MAX_EVENT_SIZE), + /* [0x14 = HCI_EV_MODE_CHANGE] */ + HCI_EV(HCI_EV_MODE_CHANGE, hci_mode_change_evt, + sizeof(struct hci_ev_mode_change)), + /* [0x16 = HCI_EV_PIN_CODE_REQ] */ + HCI_EV(HCI_EV_PIN_CODE_REQ, hci_pin_code_request_evt, + sizeof(struct hci_ev_pin_code_req)), + /* [0x17 = HCI_EV_LINK_KEY_REQ] */ + HCI_EV(HCI_EV_LINK_KEY_REQ, hci_link_key_request_evt, + sizeof(struct hci_ev_link_key_req)), + /* [0x18 = HCI_EV_LINK_KEY_NOTIFY] */ + HCI_EV(HCI_EV_LINK_KEY_NOTIFY, hci_link_key_notify_evt, + sizeof(struct hci_ev_link_key_notify)), + /* [0x1c = HCI_EV_CLOCK_OFFSET] */ + HCI_EV(HCI_EV_CLOCK_OFFSET, hci_clock_offset_evt, + sizeof(struct hci_ev_clock_offset)), + /* [0x1d = HCI_EV_PKT_TYPE_CHANGE] */ + HCI_EV(HCI_EV_PKT_TYPE_CHANGE, hci_pkt_type_change_evt, + sizeof(struct hci_ev_pkt_type_change)), + /* [0x20 = HCI_EV_PSCAN_REP_MODE] */ + HCI_EV(HCI_EV_PSCAN_REP_MODE, hci_pscan_rep_mode_evt, + sizeof(struct hci_ev_pscan_rep_mode)), + /* [0x22 = HCI_EV_INQUIRY_RESULT_WITH_RSSI] */ + HCI_EV_VL(HCI_EV_INQUIRY_RESULT_WITH_RSSI, + hci_inquiry_result_with_rssi_evt, + sizeof(struct hci_ev_inquiry_result_rssi), + HCI_MAX_EVENT_SIZE), + /* [0x23 = HCI_EV_REMOTE_EXT_FEATURES] */ + HCI_EV(HCI_EV_REMOTE_EXT_FEATURES, hci_remote_ext_features_evt, + sizeof(struct hci_ev_remote_ext_features)), + /* [0x2c = HCI_EV_SYNC_CONN_COMPLETE] */ + HCI_EV(HCI_EV_SYNC_CONN_COMPLETE, hci_sync_conn_complete_evt, + sizeof(struct hci_ev_sync_conn_complete)), + /* [0x2d = HCI_EV_EXTENDED_INQUIRY_RESULT] */ + HCI_EV_VL(HCI_EV_EXTENDED_INQUIRY_RESULT, + hci_extended_inquiry_result_evt, + sizeof(struct hci_ev_ext_inquiry_result), HCI_MAX_EVENT_SIZE), + /* [0x30 = HCI_EV_KEY_REFRESH_COMPLETE] */ + HCI_EV(HCI_EV_KEY_REFRESH_COMPLETE, hci_key_refresh_complete_evt, + sizeof(struct hci_ev_key_refresh_complete)), + /* [0x31 = HCI_EV_IO_CAPA_REQUEST] */ + HCI_EV(HCI_EV_IO_CAPA_REQUEST, hci_io_capa_request_evt, + sizeof(struct hci_ev_io_capa_request)), + /* [0x32 = HCI_EV_IO_CAPA_REPLY] */ + HCI_EV(HCI_EV_IO_CAPA_REPLY, hci_io_capa_reply_evt, + sizeof(struct hci_ev_io_capa_reply)), + /* [0x33 = HCI_EV_USER_CONFIRM_REQUEST] */ + HCI_EV(HCI_EV_USER_CONFIRM_REQUEST, hci_user_confirm_request_evt, + sizeof(struct hci_ev_user_confirm_req)), + /* [0x34 = HCI_EV_USER_PASSKEY_REQUEST] */ + HCI_EV(HCI_EV_USER_PASSKEY_REQUEST, hci_user_passkey_request_evt, + sizeof(struct hci_ev_user_passkey_req)), + /* [0x35 = HCI_EV_REMOTE_OOB_DATA_REQUEST] */ + HCI_EV(HCI_EV_REMOTE_OOB_DATA_REQUEST, hci_remote_oob_data_request_evt, + sizeof(struct hci_ev_remote_oob_data_request)), + /* [0x36 = HCI_EV_SIMPLE_PAIR_COMPLETE] */ + HCI_EV(HCI_EV_SIMPLE_PAIR_COMPLETE, hci_simple_pair_complete_evt, + sizeof(struct hci_ev_simple_pair_complete)), + /* [0x3b = HCI_EV_USER_PASSKEY_NOTIFY] */ + HCI_EV(HCI_EV_USER_PASSKEY_NOTIFY, hci_user_passkey_notify_evt, + sizeof(struct hci_ev_user_passkey_notify)), + /* [0x3c = HCI_EV_KEYPRESS_NOTIFY] */ + HCI_EV(HCI_EV_KEYPRESS_NOTIFY, hci_keypress_notify_evt, + sizeof(struct hci_ev_keypress_notify)), + /* [0x3d = HCI_EV_REMOTE_HOST_FEATURES] */ + HCI_EV(HCI_EV_REMOTE_HOST_FEATURES, hci_remote_host_features_evt, + sizeof(struct hci_ev_remote_host_features)), + /* [0x3e = HCI_EV_LE_META] */ + HCI_EV_REQ_VL(HCI_EV_LE_META, hci_le_meta_evt, + sizeof(struct hci_ev_le_meta), HCI_MAX_EVENT_SIZE), +#if IS_ENABLED(CONFIG_BT_HS) + /* [0x40 = HCI_EV_PHY_LINK_COMPLETE] */ + HCI_EV(HCI_EV_PHY_LINK_COMPLETE, hci_phy_link_complete_evt, + sizeof(struct hci_ev_phy_link_complete)), + /* [0x41 = HCI_EV_CHANNEL_SELECTED] */ + HCI_EV(HCI_EV_CHANNEL_SELECTED, hci_chan_selected_evt, + sizeof(struct hci_ev_channel_selected)), + /* [0x42 = HCI_EV_DISCONN_PHY_LINK_COMPLETE] */ + HCI_EV(HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE, + hci_disconn_loglink_complete_evt, + sizeof(struct hci_ev_disconn_logical_link_complete)), + /* [0x45 = HCI_EV_LOGICAL_LINK_COMPLETE] */ + HCI_EV(HCI_EV_LOGICAL_LINK_COMPLETE, hci_loglink_complete_evt, + sizeof(struct hci_ev_logical_link_complete)), + /* [0x46 = HCI_EV_DISCONN_LOGICAL_LINK_COMPLETE] */ + HCI_EV(HCI_EV_DISCONN_PHY_LINK_COMPLETE, + hci_disconn_phylink_complete_evt, + sizeof(struct hci_ev_disconn_phy_link_complete)), +#endif + /* [0x48 = HCI_EV_NUM_COMP_BLOCKS] */ + HCI_EV(HCI_EV_NUM_COMP_BLOCKS, hci_num_comp_blocks_evt, + sizeof(struct hci_ev_num_comp_blocks)), + /* [0xff = HCI_EV_VENDOR] */ + HCI_EV_VL(HCI_EV_VENDOR, msft_vendor_evt, 0, HCI_MAX_EVENT_SIZE), +}; + +static void hci_event_func(struct hci_dev *hdev, u8 event, struct sk_buff *skb, + u16 *opcode, u8 *status, + hci_req_complete_t *req_complete, + hci_req_complete_skb_t *req_complete_skb) +{ + const struct hci_ev *ev = &hci_ev_table[event]; + void *data; + + if (!ev->func) + return; + + if (skb->len < ev->min_len) { + bt_dev_err(hdev, "unexpected event 0x%2.2x length: %u < %u", + event, skb->len, ev->min_len); + return; + } + + /* Just warn if the length is over max_len size it still be + * possible to partially parse the event so leave to callback to + * decide if that is acceptable. + */ + if (skb->len > ev->max_len) + bt_dev_warn_ratelimited(hdev, + "unexpected event 0x%2.2x length: %u > %u", + event, skb->len, ev->max_len); + + data = hci_ev_skb_pull(hdev, skb, event, ev->min_len); + if (!data) + return; + + if (ev->req) + ev->func_req(hdev, data, skb, opcode, status, req_complete, + req_complete_skb); + else + ev->func(hdev, data, skb); +} + +void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct hci_event_hdr *hdr = (void *) skb->data; + hci_req_complete_t req_complete = NULL; + hci_req_complete_skb_t req_complete_skb = NULL; + struct sk_buff *orig_skb = NULL; + u8 status = 0, event, req_evt = 0; + u16 opcode = HCI_OP_NOP; + + if (skb->len < sizeof(*hdr)) { + bt_dev_err(hdev, "Malformed HCI Event"); + goto done; + } + + kfree_skb(hdev->recv_event); + hdev->recv_event = skb_clone(skb, GFP_KERNEL); + + event = hdr->evt; + if (!event) { + bt_dev_warn(hdev, "Received unexpected HCI Event 0x%2.2x", + event); + goto done; + } + + /* Only match event if command OGF is not for LE */ + if (hdev->sent_cmd && + hci_opcode_ogf(hci_skb_opcode(hdev->sent_cmd)) != 0x08 && + hci_skb_event(hdev->sent_cmd) == event) { + hci_req_cmd_complete(hdev, hci_skb_opcode(hdev->sent_cmd), + status, &req_complete, &req_complete_skb); + req_evt = event; + } + + /* If it looks like we might end up having to call + * req_complete_skb, store a pristine copy of the skb since the + * various handlers may modify the original one through + * skb_pull() calls, etc. + */ + if (req_complete_skb || event == HCI_EV_CMD_STATUS || + event == HCI_EV_CMD_COMPLETE) + orig_skb = skb_clone(skb, GFP_KERNEL); + + skb_pull(skb, HCI_EVENT_HDR_SIZE); + + /* Store wake reason if we're suspended */ + hci_store_wake_reason(hdev, event, skb); + + bt_dev_dbg(hdev, "event 0x%2.2x", event); + + hci_event_func(hdev, event, skb, &opcode, &status, &req_complete, + &req_complete_skb); + + if (req_complete) { + req_complete(hdev, status, opcode); + } else if (req_complete_skb) { + if (!hci_get_cmd_complete(hdev, opcode, req_evt, orig_skb)) { + kfree_skb(orig_skb); + orig_skb = NULL; + } + req_complete_skb(hdev, status, opcode, orig_skb); + } + +done: + kfree_skb(orig_skb); + kfree_skb(skb); + hdev->stat.evt_rx++; +} diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c new file mode 100644 index 000000000..f7e006a36 --- /dev/null +++ b/net/bluetooth/hci_request.c @@ -0,0 +1,922 @@ +/* + BlueZ - Bluetooth protocol stack for Linux + + Copyright (C) 2014 Intel Corporation + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License version 2 as + published by the Free Software Foundation; + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + SOFTWARE IS DISCLAIMED. +*/ + +#include <linux/sched/signal.h> + +#include <net/bluetooth/bluetooth.h> +#include <net/bluetooth/hci_core.h> +#include <net/bluetooth/mgmt.h> + +#include "smp.h" +#include "hci_request.h" +#include "msft.h" +#include "eir.h" + +void hci_req_init(struct hci_request *req, struct hci_dev *hdev) +{ + skb_queue_head_init(&req->cmd_q); + req->hdev = hdev; + req->err = 0; +} + +void hci_req_purge(struct hci_request *req) +{ + skb_queue_purge(&req->cmd_q); +} + +bool hci_req_status_pend(struct hci_dev *hdev) +{ + return hdev->req_status == HCI_REQ_PEND; +} + +static int req_run(struct hci_request *req, hci_req_complete_t complete, + hci_req_complete_skb_t complete_skb) +{ + struct hci_dev *hdev = req->hdev; + struct sk_buff *skb; + unsigned long flags; + + bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q)); + + /* If an error occurred during request building, remove all HCI + * commands queued on the HCI request queue. + */ + if (req->err) { + skb_queue_purge(&req->cmd_q); + return req->err; + } + + /* Do not allow empty requests */ + if (skb_queue_empty(&req->cmd_q)) + return -ENODATA; + + skb = skb_peek_tail(&req->cmd_q); + if (complete) { + bt_cb(skb)->hci.req_complete = complete; + } else if (complete_skb) { + bt_cb(skb)->hci.req_complete_skb = complete_skb; + bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB; + } + + spin_lock_irqsave(&hdev->cmd_q.lock, flags); + skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q); + spin_unlock_irqrestore(&hdev->cmd_q.lock, flags); + + queue_work(hdev->workqueue, &hdev->cmd_work); + + return 0; +} + +int hci_req_run(struct hci_request *req, hci_req_complete_t complete) +{ + return req_run(req, complete, NULL); +} + +int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete) +{ + return req_run(req, NULL, complete); +} + +void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode, + struct sk_buff *skb) +{ + bt_dev_dbg(hdev, "result 0x%2.2x", result); + + if (hdev->req_status == HCI_REQ_PEND) { + hdev->req_result = result; + hdev->req_status = HCI_REQ_DONE; + if (skb) + hdev->req_skb = skb_get(skb); + wake_up_interruptible(&hdev->req_wait_q); + } +} + +/* Execute request and wait for completion. */ +int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req, + unsigned long opt), + unsigned long opt, u32 timeout, u8 *hci_status) +{ + struct hci_request req; + int err = 0; + + bt_dev_dbg(hdev, "start"); + + hci_req_init(&req, hdev); + + hdev->req_status = HCI_REQ_PEND; + + err = func(&req, opt); + if (err) { + if (hci_status) + *hci_status = HCI_ERROR_UNSPECIFIED; + return err; + } + + err = hci_req_run_skb(&req, hci_req_sync_complete); + if (err < 0) { + hdev->req_status = 0; + + /* ENODATA means the HCI request command queue is empty. + * This can happen when a request with conditionals doesn't + * trigger any commands to be sent. This is normal behavior + * and should not trigger an error return. + */ + if (err == -ENODATA) { + if (hci_status) + *hci_status = 0; + return 0; + } + + if (hci_status) + *hci_status = HCI_ERROR_UNSPECIFIED; + + return err; + } + + err = wait_event_interruptible_timeout(hdev->req_wait_q, + hdev->req_status != HCI_REQ_PEND, timeout); + + if (err == -ERESTARTSYS) + return -EINTR; + + switch (hdev->req_status) { + case HCI_REQ_DONE: + err = -bt_to_errno(hdev->req_result); + if (hci_status) + *hci_status = hdev->req_result; + break; + + case HCI_REQ_CANCELED: + err = -hdev->req_result; + if (hci_status) + *hci_status = HCI_ERROR_UNSPECIFIED; + break; + + default: + err = -ETIMEDOUT; + if (hci_status) + *hci_status = HCI_ERROR_UNSPECIFIED; + break; + } + + kfree_skb(hdev->req_skb); + hdev->req_skb = NULL; + hdev->req_status = hdev->req_result = 0; + + bt_dev_dbg(hdev, "end: err %d", err); + + return err; +} + +int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req, + unsigned long opt), + unsigned long opt, u32 timeout, u8 *hci_status) +{ + int ret; + + /* Serialize all requests */ + hci_req_sync_lock(hdev); + /* check the state after obtaing the lock to protect the HCI_UP + * against any races from hci_dev_do_close when the controller + * gets removed. + */ + if (test_bit(HCI_UP, &hdev->flags)) + ret = __hci_req_sync(hdev, req, opt, timeout, hci_status); + else + ret = -ENETDOWN; + hci_req_sync_unlock(hdev); + + return ret; +} + +struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen, + const void *param) +{ + int len = HCI_COMMAND_HDR_SIZE + plen; + struct hci_command_hdr *hdr; + struct sk_buff *skb; + + skb = bt_skb_alloc(len, GFP_ATOMIC); + if (!skb) + return NULL; + + hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE); + hdr->opcode = cpu_to_le16(opcode); + hdr->plen = plen; + + if (plen) + skb_put_data(skb, param, plen); + + bt_dev_dbg(hdev, "skb len %d", skb->len); + + hci_skb_pkt_type(skb) = HCI_COMMAND_PKT; + hci_skb_opcode(skb) = opcode; + + return skb; +} + +/* Queue a command to an asynchronous HCI request */ +void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen, + const void *param, u8 event) +{ + struct hci_dev *hdev = req->hdev; + struct sk_buff *skb; + + bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen); + + /* If an error occurred during request building, there is no point in + * queueing the HCI command. We can simply return. + */ + if (req->err) + return; + + skb = hci_prepare_cmd(hdev, opcode, plen, param); + if (!skb) { + bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)", + opcode); + req->err = -ENOMEM; + return; + } + + if (skb_queue_empty(&req->cmd_q)) + bt_cb(skb)->hci.req_flags |= HCI_REQ_START; + + hci_skb_event(skb) = event; + + skb_queue_tail(&req->cmd_q, skb); +} + +void hci_req_add(struct hci_request *req, u16 opcode, u32 plen, + const void *param) +{ + bt_dev_dbg(req->hdev, "HCI_REQ-0x%4.4x", opcode); + hci_req_add_ev(req, opcode, plen, param, 0); +} + +static void start_interleave_scan(struct hci_dev *hdev) +{ + hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER; + queue_delayed_work(hdev->req_workqueue, + &hdev->interleave_scan, 0); +} + +static bool is_interleave_scanning(struct hci_dev *hdev) +{ + return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE; +} + +static void cancel_interleave_scan(struct hci_dev *hdev) +{ + bt_dev_dbg(hdev, "cancelling interleave scan"); + + cancel_delayed_work_sync(&hdev->interleave_scan); + + hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE; +} + +/* Return true if interleave_scan wasn't started until exiting this function, + * otherwise, return false + */ +static bool __hci_update_interleaved_scan(struct hci_dev *hdev) +{ + /* Do interleaved scan only if all of the following are true: + * - There is at least one ADV monitor + * - At least one pending LE connection or one device to be scanned for + * - Monitor offloading is not supported + * If so, we should alternate between allowlist scan and one without + * any filters to save power. + */ + bool use_interleaving = hci_is_adv_monitoring(hdev) && + !(list_empty(&hdev->pend_le_conns) && + list_empty(&hdev->pend_le_reports)) && + hci_get_adv_monitor_offload_ext(hdev) == + HCI_ADV_MONITOR_EXT_NONE; + bool is_interleaving = is_interleave_scanning(hdev); + + if (use_interleaving && !is_interleaving) { + start_interleave_scan(hdev); + bt_dev_dbg(hdev, "starting interleave scan"); + return true; + } + + if (!use_interleaving && is_interleaving) + cancel_interleave_scan(hdev); + + return false; +} + +void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn) +{ + struct hci_dev *hdev = req->hdev; + + if (hdev->scanning_paused) { + bt_dev_dbg(hdev, "Scanning is paused for suspend"); + return; + } + + if (use_ext_scan(hdev)) { + struct hci_cp_le_set_ext_scan_enable cp; + + memset(&cp, 0, sizeof(cp)); + cp.enable = LE_SCAN_DISABLE; + hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp), + &cp); + } else { + struct hci_cp_le_set_scan_enable cp; + + memset(&cp, 0, sizeof(cp)); + cp.enable = LE_SCAN_DISABLE; + hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp); + } + + /* Disable address resolution */ + if (hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION) && !rpa_le_conn) { + __u8 enable = 0x00; + + hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable); + } +} + +static void del_from_accept_list(struct hci_request *req, bdaddr_t *bdaddr, + u8 bdaddr_type) +{ + struct hci_cp_le_del_from_accept_list cp; + + cp.bdaddr_type = bdaddr_type; + bacpy(&cp.bdaddr, bdaddr); + + bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from accept list", &cp.bdaddr, + cp.bdaddr_type); + hci_req_add(req, HCI_OP_LE_DEL_FROM_ACCEPT_LIST, sizeof(cp), &cp); + + if (use_ll_privacy(req->hdev)) { + struct smp_irk *irk; + + irk = hci_find_irk_by_addr(req->hdev, bdaddr, bdaddr_type); + if (irk) { + struct hci_cp_le_del_from_resolv_list cp; + + cp.bdaddr_type = bdaddr_type; + bacpy(&cp.bdaddr, bdaddr); + + hci_req_add(req, HCI_OP_LE_DEL_FROM_RESOLV_LIST, + sizeof(cp), &cp); + } + } +} + +/* Adds connection to accept list if needed. On error, returns -1. */ +static int add_to_accept_list(struct hci_request *req, + struct hci_conn_params *params, u8 *num_entries, + bool allow_rpa) +{ + struct hci_cp_le_add_to_accept_list cp; + struct hci_dev *hdev = req->hdev; + + /* Already in accept list */ + if (hci_bdaddr_list_lookup(&hdev->le_accept_list, ¶ms->addr, + params->addr_type)) + return 0; + + /* Select filter policy to accept all advertising */ + if (*num_entries >= hdev->le_accept_list_size) + return -1; + + /* Accept list can not be used with RPAs */ + if (!allow_rpa && + !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) && + hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type)) { + return -1; + } + + /* During suspend, only wakeable devices can be in accept list */ + if (hdev->suspended && + !(params->flags & HCI_CONN_FLAG_REMOTE_WAKEUP)) + return 0; + + *num_entries += 1; + cp.bdaddr_type = params->addr_type; + bacpy(&cp.bdaddr, ¶ms->addr); + + bt_dev_dbg(hdev, "Add %pMR (0x%x) to accept list", &cp.bdaddr, + cp.bdaddr_type); + hci_req_add(req, HCI_OP_LE_ADD_TO_ACCEPT_LIST, sizeof(cp), &cp); + + if (use_ll_privacy(hdev)) { + struct smp_irk *irk; + + irk = hci_find_irk_by_addr(hdev, ¶ms->addr, + params->addr_type); + if (irk) { + struct hci_cp_le_add_to_resolv_list cp; + + cp.bdaddr_type = params->addr_type; + bacpy(&cp.bdaddr, ¶ms->addr); + memcpy(cp.peer_irk, irk->val, 16); + + if (hci_dev_test_flag(hdev, HCI_PRIVACY)) + memcpy(cp.local_irk, hdev->irk, 16); + else + memset(cp.local_irk, 0, 16); + + hci_req_add(req, HCI_OP_LE_ADD_TO_RESOLV_LIST, + sizeof(cp), &cp); + } + } + + return 0; +} + +static u8 update_accept_list(struct hci_request *req) +{ + struct hci_dev *hdev = req->hdev; + struct hci_conn_params *params; + struct bdaddr_list *b; + u8 num_entries = 0; + bool pend_conn, pend_report; + /* We allow usage of accept list even with RPAs in suspend. In the worst + * case, we won't be able to wake from devices that use the privacy1.2 + * features. Additionally, once we support privacy1.2 and IRK + * offloading, we can update this to also check for those conditions. + */ + bool allow_rpa = hdev->suspended; + + if (use_ll_privacy(hdev)) + allow_rpa = true; + + /* Go through the current accept list programmed into the + * controller one by one and check if that address is still + * in the list of pending connections or list of devices to + * report. If not present in either list, then queue the + * command to remove it from the controller. + */ + list_for_each_entry(b, &hdev->le_accept_list, list) { + pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns, + &b->bdaddr, + b->bdaddr_type); + pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports, + &b->bdaddr, + b->bdaddr_type); + + /* If the device is not likely to connect or report, + * remove it from the accept list. + */ + if (!pend_conn && !pend_report) { + del_from_accept_list(req, &b->bdaddr, b->bdaddr_type); + continue; + } + + /* Accept list can not be used with RPAs */ + if (!allow_rpa && + !hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY) && + hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) { + return 0x00; + } + + num_entries++; + } + + /* Since all no longer valid accept list entries have been + * removed, walk through the list of pending connections + * and ensure that any new device gets programmed into + * the controller. + * + * If the list of the devices is larger than the list of + * available accept list entries in the controller, then + * just abort and return filer policy value to not use the + * accept list. + */ + list_for_each_entry(params, &hdev->pend_le_conns, action) { + if (add_to_accept_list(req, params, &num_entries, allow_rpa)) + return 0x00; + } + + /* After adding all new pending connections, walk through + * the list of pending reports and also add these to the + * accept list if there is still space. Abort if space runs out. + */ + list_for_each_entry(params, &hdev->pend_le_reports, action) { + if (add_to_accept_list(req, params, &num_entries, allow_rpa)) + return 0x00; + } + + /* Use the allowlist unless the following conditions are all true: + * - We are not currently suspending + * - There are 1 or more ADV monitors registered and it's not offloaded + * - Interleaved scanning is not currently using the allowlist + */ + if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended && + hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE && + hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST) + return 0x00; + + /* Select filter policy to use accept list */ + return 0x01; +} + +static bool scan_use_rpa(struct hci_dev *hdev) +{ + return hci_dev_test_flag(hdev, HCI_PRIVACY); +} + +static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval, + u16 window, u8 own_addr_type, u8 filter_policy, + bool filter_dup, bool addr_resolv) +{ + struct hci_dev *hdev = req->hdev; + + if (hdev->scanning_paused) { + bt_dev_dbg(hdev, "Scanning is paused for suspend"); + return; + } + + if (use_ll_privacy(hdev) && addr_resolv) { + u8 enable = 0x01; + + hci_req_add(req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable); + } + + /* Use ext scanning if set ext scan param and ext scan enable is + * supported + */ + if (use_ext_scan(hdev)) { + struct hci_cp_le_set_ext_scan_params *ext_param_cp; + struct hci_cp_le_set_ext_scan_enable ext_enable_cp; + struct hci_cp_le_scan_phy_params *phy_params; + u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2]; + u32 plen; + + ext_param_cp = (void *)data; + phy_params = (void *)ext_param_cp->data; + + memset(ext_param_cp, 0, sizeof(*ext_param_cp)); + ext_param_cp->own_addr_type = own_addr_type; + ext_param_cp->filter_policy = filter_policy; + + plen = sizeof(*ext_param_cp); + + if (scan_1m(hdev) || scan_2m(hdev)) { + ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M; + + memset(phy_params, 0, sizeof(*phy_params)); + phy_params->type = type; + phy_params->interval = cpu_to_le16(interval); + phy_params->window = cpu_to_le16(window); + + plen += sizeof(*phy_params); + phy_params++; + } + + if (scan_coded(hdev)) { + ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED; + + memset(phy_params, 0, sizeof(*phy_params)); + phy_params->type = type; + phy_params->interval = cpu_to_le16(interval); + phy_params->window = cpu_to_le16(window); + + plen += sizeof(*phy_params); + phy_params++; + } + + hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS, + plen, ext_param_cp); + + memset(&ext_enable_cp, 0, sizeof(ext_enable_cp)); + ext_enable_cp.enable = LE_SCAN_ENABLE; + ext_enable_cp.filter_dup = filter_dup; + + hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, + sizeof(ext_enable_cp), &ext_enable_cp); + } else { + struct hci_cp_le_set_scan_param param_cp; + struct hci_cp_le_set_scan_enable enable_cp; + + memset(¶m_cp, 0, sizeof(param_cp)); + param_cp.type = type; + param_cp.interval = cpu_to_le16(interval); + param_cp.window = cpu_to_le16(window); + param_cp.own_address_type = own_addr_type; + param_cp.filter_policy = filter_policy; + hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp), + ¶m_cp); + + memset(&enable_cp, 0, sizeof(enable_cp)); + enable_cp.enable = LE_SCAN_ENABLE; + enable_cp.filter_dup = filter_dup; + hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp), + &enable_cp); + } +} + +/* Returns true if an le connection is in the scanning state */ +static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev) +{ + struct hci_conn_hash *h = &hdev->conn_hash; + struct hci_conn *c; + + rcu_read_lock(); + + list_for_each_entry_rcu(c, &h->list, list) { + if (c->type == LE_LINK && c->state == BT_CONNECT && + test_bit(HCI_CONN_SCANNING, &c->flags)) { + rcu_read_unlock(); + return true; + } + } + + rcu_read_unlock(); + + return false; +} + +static void set_random_addr(struct hci_request *req, bdaddr_t *rpa); +static int hci_update_random_address(struct hci_request *req, + bool require_privacy, bool use_rpa, + u8 *own_addr_type) +{ + struct hci_dev *hdev = req->hdev; + int err; + + /* If privacy is enabled use a resolvable private address. If + * current RPA has expired or there is something else than + * the current RPA in use, then generate a new one. + */ + if (use_rpa) { + /* If Controller supports LL Privacy use own address type is + * 0x03 + */ + if (use_ll_privacy(hdev)) + *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED; + else + *own_addr_type = ADDR_LE_DEV_RANDOM; + + if (rpa_valid(hdev)) + return 0; + + err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa); + if (err < 0) { + bt_dev_err(hdev, "failed to generate new RPA"); + return err; + } + + set_random_addr(req, &hdev->rpa); + + return 0; + } + + /* In case of required privacy without resolvable private address, + * use an non-resolvable private address. This is useful for active + * scanning and non-connectable advertising. + */ + if (require_privacy) { + bdaddr_t nrpa; + + while (true) { + /* The non-resolvable private address is generated + * from random six bytes with the two most significant + * bits cleared. + */ + get_random_bytes(&nrpa, 6); + nrpa.b[5] &= 0x3f; + + /* The non-resolvable private address shall not be + * equal to the public address. + */ + if (bacmp(&hdev->bdaddr, &nrpa)) + break; + } + + *own_addr_type = ADDR_LE_DEV_RANDOM; + set_random_addr(req, &nrpa); + return 0; + } + + /* If forcing static address is in use or there is no public + * address use the static address as random address (but skip + * the HCI command if the current random address is already the + * static one. + * + * In case BR/EDR has been disabled on a dual-mode controller + * and a static address has been configured, then use that + * address instead of the public BR/EDR address. + */ + if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) || + !bacmp(&hdev->bdaddr, BDADDR_ANY) || + (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) && + bacmp(&hdev->static_addr, BDADDR_ANY))) { + *own_addr_type = ADDR_LE_DEV_RANDOM; + if (bacmp(&hdev->static_addr, &hdev->random_addr)) + hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, + &hdev->static_addr); + return 0; + } + + /* Neither privacy nor static address is being used so use a + * public address. + */ + *own_addr_type = ADDR_LE_DEV_PUBLIC; + + return 0; +} + +/* Ensure to call hci_req_add_le_scan_disable() first to disable the + * controller based address resolution to be able to reconfigure + * resolving list. + */ +void hci_req_add_le_passive_scan(struct hci_request *req) +{ + struct hci_dev *hdev = req->hdev; + u8 own_addr_type; + u8 filter_policy; + u16 window, interval; + /* Default is to enable duplicates filter */ + u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE; + /* Background scanning should run with address resolution */ + bool addr_resolv = true; + + if (hdev->scanning_paused) { + bt_dev_dbg(hdev, "Scanning is paused for suspend"); + return; + } + + /* Set require_privacy to false since no SCAN_REQ are send + * during passive scanning. Not using an non-resolvable address + * here is important so that peer devices using direct + * advertising with our address will be correctly reported + * by the controller. + */ + if (hci_update_random_address(req, false, scan_use_rpa(hdev), + &own_addr_type)) + return; + + if (hdev->enable_advmon_interleave_scan && + __hci_update_interleaved_scan(hdev)) + return; + + bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state); + /* Adding or removing entries from the accept list must + * happen before enabling scanning. The controller does + * not allow accept list modification while scanning. + */ + filter_policy = update_accept_list(req); + + /* When the controller is using random resolvable addresses and + * with that having LE privacy enabled, then controllers with + * Extended Scanner Filter Policies support can now enable support + * for handling directed advertising. + * + * So instead of using filter polices 0x00 (no accept list) + * and 0x01 (accept list enabled) use the new filter policies + * 0x02 (no accept list) and 0x03 (accept list enabled). + */ + if (hci_dev_test_flag(hdev, HCI_PRIVACY) && + (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)) + filter_policy |= 0x02; + + if (hdev->suspended) { + window = hdev->le_scan_window_suspend; + interval = hdev->le_scan_int_suspend; + } else if (hci_is_le_conn_scanning(hdev)) { + window = hdev->le_scan_window_connect; + interval = hdev->le_scan_int_connect; + } else if (hci_is_adv_monitoring(hdev)) { + window = hdev->le_scan_window_adv_monitor; + interval = hdev->le_scan_int_adv_monitor; + + /* Disable duplicates filter when scanning for advertisement + * monitor for the following reasons. + * + * For HW pattern filtering (ex. MSFT), Realtek and Qualcomm + * controllers ignore RSSI_Sampling_Period when the duplicates + * filter is enabled. + * + * For SW pattern filtering, when we're not doing interleaved + * scanning, it is necessary to disable duplicates filter, + * otherwise hosts can only receive one advertisement and it's + * impossible to know if a peer is still in range. + */ + filter_dup = LE_SCAN_FILTER_DUP_DISABLE; + } else { + window = hdev->le_scan_window; + interval = hdev->le_scan_interval; + } + + bt_dev_dbg(hdev, "LE passive scan with accept list = %d", + filter_policy); + hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window, + own_addr_type, filter_policy, filter_dup, + addr_resolv); +} + +static int hci_req_add_le_interleaved_scan(struct hci_request *req, + unsigned long opt) +{ + struct hci_dev *hdev = req->hdev; + int ret = 0; + + hci_dev_lock(hdev); + + if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) + hci_req_add_le_scan_disable(req, false); + hci_req_add_le_passive_scan(req); + + switch (hdev->interleave_scan_state) { + case INTERLEAVE_SCAN_ALLOWLIST: + bt_dev_dbg(hdev, "next state: allowlist"); + hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER; + break; + case INTERLEAVE_SCAN_NO_FILTER: + bt_dev_dbg(hdev, "next state: no filter"); + hdev->interleave_scan_state = INTERLEAVE_SCAN_ALLOWLIST; + break; + case INTERLEAVE_SCAN_NONE: + BT_ERR("unexpected error"); + ret = -1; + } + + hci_dev_unlock(hdev); + + return ret; +} + +static void interleave_scan_work(struct work_struct *work) +{ + struct hci_dev *hdev = container_of(work, struct hci_dev, + interleave_scan.work); + u8 status; + unsigned long timeout; + + if (hdev->interleave_scan_state == INTERLEAVE_SCAN_ALLOWLIST) { + timeout = msecs_to_jiffies(hdev->advmon_allowlist_duration); + } else if (hdev->interleave_scan_state == INTERLEAVE_SCAN_NO_FILTER) { + timeout = msecs_to_jiffies(hdev->advmon_no_filter_duration); + } else { + bt_dev_err(hdev, "unexpected error"); + return; + } + + hci_req_sync(hdev, hci_req_add_le_interleaved_scan, 0, + HCI_CMD_TIMEOUT, &status); + + /* Don't continue interleaving if it was canceled */ + if (is_interleave_scanning(hdev)) + queue_delayed_work(hdev->req_workqueue, + &hdev->interleave_scan, timeout); +} + +static void set_random_addr(struct hci_request *req, bdaddr_t *rpa) +{ + struct hci_dev *hdev = req->hdev; + + /* If we're advertising or initiating an LE connection we can't + * go ahead and change the random address at this time. This is + * because the eventual initiator address used for the + * subsequently created connection will be undefined (some + * controllers use the new address and others the one we had + * when the operation started). + * + * In this kind of scenario skip the update and let the random + * address be updated at the next cycle. + */ + if (hci_dev_test_flag(hdev, HCI_LE_ADV) || + hci_lookup_le_connect(hdev)) { + bt_dev_dbg(hdev, "Deferring random address update"); + hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); + return; + } + + hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa); +} + +void hci_request_setup(struct hci_dev *hdev) +{ + INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work); +} + +void hci_request_cancel_all(struct hci_dev *hdev) +{ + __hci_cmd_sync_cancel(hdev, ENODEV); + + cancel_interleave_scan(hdev); +} diff --git a/net/bluetooth/hci_request.h b/net/bluetooth/hci_request.h new file mode 100644 index 000000000..0be75cf0e --- /dev/null +++ b/net/bluetooth/hci_request.h @@ -0,0 +1,75 @@ +/* + BlueZ - Bluetooth protocol stack for Linux + Copyright (C) 2014 Intel Corporation + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License version 2 as + published by the Free Software Foundation; + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + SOFTWARE IS DISCLAIMED. +*/ + +#include <asm/unaligned.h> + +#define HCI_REQ_DONE 0 +#define HCI_REQ_PEND 1 +#define HCI_REQ_CANCELED 2 + +#define hci_req_sync_lock(hdev) mutex_lock(&hdev->req_lock) +#define hci_req_sync_unlock(hdev) mutex_unlock(&hdev->req_lock) + +#define HCI_REQ_DONE 0 +#define HCI_REQ_PEND 1 +#define HCI_REQ_CANCELED 2 + +struct hci_request { + struct hci_dev *hdev; + struct sk_buff_head cmd_q; + + /* If something goes wrong when building the HCI request, the error + * value is stored in this field. + */ + int err; +}; + +void hci_req_init(struct hci_request *req, struct hci_dev *hdev); +void hci_req_purge(struct hci_request *req); +bool hci_req_status_pend(struct hci_dev *hdev); +int hci_req_run(struct hci_request *req, hci_req_complete_t complete); +int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete); +void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode, + struct sk_buff *skb); +void hci_req_add(struct hci_request *req, u16 opcode, u32 plen, + const void *param); +void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen, + const void *param, u8 event); +void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status, + hci_req_complete_t *req_complete, + hci_req_complete_skb_t *req_complete_skb); + +int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req, + unsigned long opt), + unsigned long opt, u32 timeout, u8 *hci_status); +int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req, + unsigned long opt), + unsigned long opt, u32 timeout, u8 *hci_status); + +struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen, + const void *param); + +void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn); +void hci_req_add_le_passive_scan(struct hci_request *req); + +void hci_request_setup(struct hci_dev *hdev); +void hci_request_cancel_all(struct hci_dev *hdev); diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c new file mode 100644 index 000000000..484fc2a8e --- /dev/null +++ b/net/bluetooth/hci_sock.c @@ -0,0 +1,2208 @@ +/* + BlueZ - Bluetooth protocol stack for Linux + Copyright (C) 2000-2001 Qualcomm Incorporated + + Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License version 2 as + published by the Free Software Foundation; + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + SOFTWARE IS DISCLAIMED. +*/ + +/* Bluetooth HCI sockets. */ +#include <linux/compat.h> +#include <linux/export.h> +#include <linux/utsname.h> +#include <linux/sched.h> +#include <asm/unaligned.h> + +#include <net/bluetooth/bluetooth.h> +#include <net/bluetooth/hci_core.h> +#include <net/bluetooth/hci_mon.h> +#include <net/bluetooth/mgmt.h> + +#include "mgmt_util.h" + +static LIST_HEAD(mgmt_chan_list); +static DEFINE_MUTEX(mgmt_chan_list_lock); + +static DEFINE_IDA(sock_cookie_ida); + +static atomic_t monitor_promisc = ATOMIC_INIT(0); + +/* ----- HCI socket interface ----- */ + +/* Socket info */ +#define hci_pi(sk) ((struct hci_pinfo *) sk) + +struct hci_pinfo { + struct bt_sock bt; + struct hci_dev *hdev; + struct hci_filter filter; + __u8 cmsg_mask; + unsigned short channel; + unsigned long flags; + __u32 cookie; + char comm[TASK_COMM_LEN]; + __u16 mtu; +}; + +static struct hci_dev *hci_hdev_from_sock(struct sock *sk) +{ + struct hci_dev *hdev = hci_pi(sk)->hdev; + + if (!hdev) + return ERR_PTR(-EBADFD); + if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) + return ERR_PTR(-EPIPE); + return hdev; +} + +void hci_sock_set_flag(struct sock *sk, int nr) +{ + set_bit(nr, &hci_pi(sk)->flags); +} + +void hci_sock_clear_flag(struct sock *sk, int nr) +{ + clear_bit(nr, &hci_pi(sk)->flags); +} + +int hci_sock_test_flag(struct sock *sk, int nr) +{ + return test_bit(nr, &hci_pi(sk)->flags); +} + +unsigned short hci_sock_get_channel(struct sock *sk) +{ + return hci_pi(sk)->channel; +} + +u32 hci_sock_get_cookie(struct sock *sk) +{ + return hci_pi(sk)->cookie; +} + +static bool hci_sock_gen_cookie(struct sock *sk) +{ + int id = hci_pi(sk)->cookie; + + if (!id) { + id = ida_simple_get(&sock_cookie_ida, 1, 0, GFP_KERNEL); + if (id < 0) + id = 0xffffffff; + + hci_pi(sk)->cookie = id; + get_task_comm(hci_pi(sk)->comm, current); + return true; + } + + return false; +} + +static void hci_sock_free_cookie(struct sock *sk) +{ + int id = hci_pi(sk)->cookie; + + if (id) { + hci_pi(sk)->cookie = 0xffffffff; + ida_simple_remove(&sock_cookie_ida, id); + } +} + +static inline int hci_test_bit(int nr, const void *addr) +{ + return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31)); +} + +/* Security filter */ +#define HCI_SFLT_MAX_OGF 5 + +struct hci_sec_filter { + __u32 type_mask; + __u32 event_mask[2]; + __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4]; +}; + +static const struct hci_sec_filter hci_sec_filter = { + /* Packet types */ + 0x10, + /* Events */ + { 0x1000d9fe, 0x0000b00c }, + /* Commands */ + { + { 0x0 }, + /* OGF_LINK_CTL */ + { 0xbe000006, 0x00000001, 0x00000000, 0x00 }, + /* OGF_LINK_POLICY */ + { 0x00005200, 0x00000000, 0x00000000, 0x00 }, + /* OGF_HOST_CTL */ + { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 }, + /* OGF_INFO_PARAM */ + { 0x000002be, 0x00000000, 0x00000000, 0x00 }, + /* OGF_STATUS_PARAM */ + { 0x000000ea, 0x00000000, 0x00000000, 0x00 } + } +}; + +static struct bt_sock_list hci_sk_list = { + .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock) +}; + +static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb) +{ + struct hci_filter *flt; + int flt_type, flt_event; + + /* Apply filter */ + flt = &hci_pi(sk)->filter; + + flt_type = hci_skb_pkt_type(skb) & HCI_FLT_TYPE_BITS; + + if (!test_bit(flt_type, &flt->type_mask)) + return true; + + /* Extra filter for event packets only */ + if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT) + return false; + + flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS); + + if (!hci_test_bit(flt_event, &flt->event_mask)) + return true; + + /* Check filter only when opcode is set */ + if (!flt->opcode) + return false; + + if (flt_event == HCI_EV_CMD_COMPLETE && + flt->opcode != get_unaligned((__le16 *)(skb->data + 3))) + return true; + + if (flt_event == HCI_EV_CMD_STATUS && + flt->opcode != get_unaligned((__le16 *)(skb->data + 4))) + return true; + + return false; +} + +/* Send frame to RAW socket */ +void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct sock *sk; + struct sk_buff *skb_copy = NULL; + + BT_DBG("hdev %p len %d", hdev, skb->len); + + read_lock(&hci_sk_list.lock); + + sk_for_each(sk, &hci_sk_list.head) { + struct sk_buff *nskb; + + if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev) + continue; + + /* Don't send frame to the socket it came from */ + if (skb->sk == sk) + continue; + + if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) { + if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT && + hci_skb_pkt_type(skb) != HCI_EVENT_PKT && + hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT && + hci_skb_pkt_type(skb) != HCI_SCODATA_PKT && + hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) + continue; + if (is_filtered_packet(sk, skb)) + continue; + } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) { + if (!bt_cb(skb)->incoming) + continue; + if (hci_skb_pkt_type(skb) != HCI_EVENT_PKT && + hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT && + hci_skb_pkt_type(skb) != HCI_SCODATA_PKT && + hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) + continue; + } else { + /* Don't send frame to other channel types */ + continue; + } + + if (!skb_copy) { + /* Create a private copy with headroom */ + skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true); + if (!skb_copy) + continue; + + /* Put type byte before the data */ + memcpy(skb_push(skb_copy, 1), &hci_skb_pkt_type(skb), 1); + } + + nskb = skb_clone(skb_copy, GFP_ATOMIC); + if (!nskb) + continue; + + if (sock_queue_rcv_skb(sk, nskb)) + kfree_skb(nskb); + } + + read_unlock(&hci_sk_list.lock); + + kfree_skb(skb_copy); +} + +/* Send frame to sockets with specific channel */ +static void __hci_send_to_channel(unsigned short channel, struct sk_buff *skb, + int flag, struct sock *skip_sk) +{ + struct sock *sk; + + BT_DBG("channel %u len %d", channel, skb->len); + + sk_for_each(sk, &hci_sk_list.head) { + struct sk_buff *nskb; + + /* Ignore socket without the flag set */ + if (!hci_sock_test_flag(sk, flag)) + continue; + + /* Skip the original socket */ + if (sk == skip_sk) + continue; + + if (sk->sk_state != BT_BOUND) + continue; + + if (hci_pi(sk)->channel != channel) + continue; + + nskb = skb_clone(skb, GFP_ATOMIC); + if (!nskb) + continue; + + if (sock_queue_rcv_skb(sk, nskb)) + kfree_skb(nskb); + } + +} + +void hci_send_to_channel(unsigned short channel, struct sk_buff *skb, + int flag, struct sock *skip_sk) +{ + read_lock(&hci_sk_list.lock); + __hci_send_to_channel(channel, skb, flag, skip_sk); + read_unlock(&hci_sk_list.lock); +} + +/* Send frame to monitor socket */ +void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct sk_buff *skb_copy = NULL; + struct hci_mon_hdr *hdr; + __le16 opcode; + + if (!atomic_read(&monitor_promisc)) + return; + + BT_DBG("hdev %p len %d", hdev, skb->len); + + switch (hci_skb_pkt_type(skb)) { + case HCI_COMMAND_PKT: + opcode = cpu_to_le16(HCI_MON_COMMAND_PKT); + break; + case HCI_EVENT_PKT: + opcode = cpu_to_le16(HCI_MON_EVENT_PKT); + break; + case HCI_ACLDATA_PKT: + if (bt_cb(skb)->incoming) + opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT); + else + opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT); + break; + case HCI_SCODATA_PKT: + if (bt_cb(skb)->incoming) + opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT); + else + opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT); + break; + case HCI_ISODATA_PKT: + if (bt_cb(skb)->incoming) + opcode = cpu_to_le16(HCI_MON_ISO_RX_PKT); + else + opcode = cpu_to_le16(HCI_MON_ISO_TX_PKT); + break; + case HCI_DIAG_PKT: + opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG); + break; + default: + return; + } + + /* Create a private copy with headroom */ + skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true); + if (!skb_copy) + return; + + /* Put header before the data */ + hdr = skb_push(skb_copy, HCI_MON_HDR_SIZE); + hdr->opcode = opcode; + hdr->index = cpu_to_le16(hdev->id); + hdr->len = cpu_to_le16(skb->len); + + hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy, + HCI_SOCK_TRUSTED, NULL); + kfree_skb(skb_copy); +} + +void hci_send_monitor_ctrl_event(struct hci_dev *hdev, u16 event, + void *data, u16 data_len, ktime_t tstamp, + int flag, struct sock *skip_sk) +{ + struct sock *sk; + __le16 index; + + if (hdev) + index = cpu_to_le16(hdev->id); + else + index = cpu_to_le16(MGMT_INDEX_NONE); + + read_lock(&hci_sk_list.lock); + + sk_for_each(sk, &hci_sk_list.head) { + struct hci_mon_hdr *hdr; + struct sk_buff *skb; + + if (hci_pi(sk)->channel != HCI_CHANNEL_CONTROL) + continue; + + /* Ignore socket without the flag set */ + if (!hci_sock_test_flag(sk, flag)) + continue; + + /* Skip the original socket */ + if (sk == skip_sk) + continue; + + skb = bt_skb_alloc(6 + data_len, GFP_ATOMIC); + if (!skb) + continue; + + put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4)); + put_unaligned_le16(event, skb_put(skb, 2)); + + if (data) + skb_put_data(skb, data, data_len); + + skb->tstamp = tstamp; + + hdr = skb_push(skb, HCI_MON_HDR_SIZE); + hdr->opcode = cpu_to_le16(HCI_MON_CTRL_EVENT); + hdr->index = index; + hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE); + + __hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, + HCI_SOCK_TRUSTED, NULL); + kfree_skb(skb); + } + + read_unlock(&hci_sk_list.lock); +} + +static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event) +{ + struct hci_mon_hdr *hdr; + struct hci_mon_new_index *ni; + struct hci_mon_index_info *ii; + struct sk_buff *skb; + __le16 opcode; + + switch (event) { + case HCI_DEV_REG: + skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC); + if (!skb) + return NULL; + + ni = skb_put(skb, HCI_MON_NEW_INDEX_SIZE); + ni->type = hdev->dev_type; + ni->bus = hdev->bus; + bacpy(&ni->bdaddr, &hdev->bdaddr); + memcpy_and_pad(ni->name, sizeof(ni->name), hdev->name, + strnlen(hdev->name, sizeof(ni->name)), '\0'); + + opcode = cpu_to_le16(HCI_MON_NEW_INDEX); + break; + + case HCI_DEV_UNREG: + skb = bt_skb_alloc(0, GFP_ATOMIC); + if (!skb) + return NULL; + + opcode = cpu_to_le16(HCI_MON_DEL_INDEX); + break; + + case HCI_DEV_SETUP: + if (hdev->manufacturer == 0xffff) + return NULL; + fallthrough; + + case HCI_DEV_UP: + skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC); + if (!skb) + return NULL; + + ii = skb_put(skb, HCI_MON_INDEX_INFO_SIZE); + bacpy(&ii->bdaddr, &hdev->bdaddr); + ii->manufacturer = cpu_to_le16(hdev->manufacturer); + + opcode = cpu_to_le16(HCI_MON_INDEX_INFO); + break; + + case HCI_DEV_OPEN: + skb = bt_skb_alloc(0, GFP_ATOMIC); + if (!skb) + return NULL; + + opcode = cpu_to_le16(HCI_MON_OPEN_INDEX); + break; + + case HCI_DEV_CLOSE: + skb = bt_skb_alloc(0, GFP_ATOMIC); + if (!skb) + return NULL; + + opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX); + break; + + default: + return NULL; + } + + __net_timestamp(skb); + + hdr = skb_push(skb, HCI_MON_HDR_SIZE); + hdr->opcode = opcode; + hdr->index = cpu_to_le16(hdev->id); + hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE); + + return skb; +} + +static struct sk_buff *create_monitor_ctrl_open(struct sock *sk) +{ + struct hci_mon_hdr *hdr; + struct sk_buff *skb; + u16 format; + u8 ver[3]; + u32 flags; + + /* No message needed when cookie is not present */ + if (!hci_pi(sk)->cookie) + return NULL; + + switch (hci_pi(sk)->channel) { + case HCI_CHANNEL_RAW: + format = 0x0000; + ver[0] = BT_SUBSYS_VERSION; + put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1); + break; + case HCI_CHANNEL_USER: + format = 0x0001; + ver[0] = BT_SUBSYS_VERSION; + put_unaligned_le16(BT_SUBSYS_REVISION, ver + 1); + break; + case HCI_CHANNEL_CONTROL: + format = 0x0002; + mgmt_fill_version_info(ver); + break; + default: + /* No message for unsupported format */ + return NULL; + } + + skb = bt_skb_alloc(14 + TASK_COMM_LEN , GFP_ATOMIC); + if (!skb) + return NULL; + + flags = hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) ? 0x1 : 0x0; + + put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4)); + put_unaligned_le16(format, skb_put(skb, 2)); + skb_put_data(skb, ver, sizeof(ver)); + put_unaligned_le32(flags, skb_put(skb, 4)); + skb_put_u8(skb, TASK_COMM_LEN); + skb_put_data(skb, hci_pi(sk)->comm, TASK_COMM_LEN); + + __net_timestamp(skb); + + hdr = skb_push(skb, HCI_MON_HDR_SIZE); + hdr->opcode = cpu_to_le16(HCI_MON_CTRL_OPEN); + if (hci_pi(sk)->hdev) + hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id); + else + hdr->index = cpu_to_le16(HCI_DEV_NONE); + hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE); + + return skb; +} + +static struct sk_buff *create_monitor_ctrl_close(struct sock *sk) +{ + struct hci_mon_hdr *hdr; + struct sk_buff *skb; + + /* No message needed when cookie is not present */ + if (!hci_pi(sk)->cookie) + return NULL; + + switch (hci_pi(sk)->channel) { + case HCI_CHANNEL_RAW: + case HCI_CHANNEL_USER: + case HCI_CHANNEL_CONTROL: + break; + default: + /* No message for unsupported format */ + return NULL; + } + + skb = bt_skb_alloc(4, GFP_ATOMIC); + if (!skb) + return NULL; + + put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4)); + + __net_timestamp(skb); + + hdr = skb_push(skb, HCI_MON_HDR_SIZE); + hdr->opcode = cpu_to_le16(HCI_MON_CTRL_CLOSE); + if (hci_pi(sk)->hdev) + hdr->index = cpu_to_le16(hci_pi(sk)->hdev->id); + else + hdr->index = cpu_to_le16(HCI_DEV_NONE); + hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE); + + return skb; +} + +static struct sk_buff *create_monitor_ctrl_command(struct sock *sk, u16 index, + u16 opcode, u16 len, + const void *buf) +{ + struct hci_mon_hdr *hdr; + struct sk_buff *skb; + + skb = bt_skb_alloc(6 + len, GFP_ATOMIC); + if (!skb) + return NULL; + + put_unaligned_le32(hci_pi(sk)->cookie, skb_put(skb, 4)); + put_unaligned_le16(opcode, skb_put(skb, 2)); + + if (buf) + skb_put_data(skb, buf, len); + + __net_timestamp(skb); + + hdr = skb_push(skb, HCI_MON_HDR_SIZE); + hdr->opcode = cpu_to_le16(HCI_MON_CTRL_COMMAND); + hdr->index = cpu_to_le16(index); + hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE); + + return skb; +} + +static void __printf(2, 3) +send_monitor_note(struct sock *sk, const char *fmt, ...) +{ + size_t len; + struct hci_mon_hdr *hdr; + struct sk_buff *skb; + va_list args; + + va_start(args, fmt); + len = vsnprintf(NULL, 0, fmt, args); + va_end(args); + + skb = bt_skb_alloc(len + 1, GFP_ATOMIC); + if (!skb) + return; + + va_start(args, fmt); + vsprintf(skb_put(skb, len), fmt, args); + *(u8 *)skb_put(skb, 1) = 0; + va_end(args); + + __net_timestamp(skb); + + hdr = (void *)skb_push(skb, HCI_MON_HDR_SIZE); + hdr->opcode = cpu_to_le16(HCI_MON_SYSTEM_NOTE); + hdr->index = cpu_to_le16(HCI_DEV_NONE); + hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE); + + if (sock_queue_rcv_skb(sk, skb)) + kfree_skb(skb); +} + +static void send_monitor_replay(struct sock *sk) +{ + struct hci_dev *hdev; + + read_lock(&hci_dev_list_lock); + + list_for_each_entry(hdev, &hci_dev_list, list) { + struct sk_buff *skb; + + skb = create_monitor_event(hdev, HCI_DEV_REG); + if (!skb) + continue; + + if (sock_queue_rcv_skb(sk, skb)) + kfree_skb(skb); + + if (!test_bit(HCI_RUNNING, &hdev->flags)) + continue; + + skb = create_monitor_event(hdev, HCI_DEV_OPEN); + if (!skb) + continue; + + if (sock_queue_rcv_skb(sk, skb)) + kfree_skb(skb); + + if (test_bit(HCI_UP, &hdev->flags)) + skb = create_monitor_event(hdev, HCI_DEV_UP); + else if (hci_dev_test_flag(hdev, HCI_SETUP)) + skb = create_monitor_event(hdev, HCI_DEV_SETUP); + else + skb = NULL; + + if (skb) { + if (sock_queue_rcv_skb(sk, skb)) + kfree_skb(skb); + } + } + + read_unlock(&hci_dev_list_lock); +} + +static void send_monitor_control_replay(struct sock *mon_sk) +{ + struct sock *sk; + + read_lock(&hci_sk_list.lock); + + sk_for_each(sk, &hci_sk_list.head) { + struct sk_buff *skb; + + skb = create_monitor_ctrl_open(sk); + if (!skb) + continue; + + if (sock_queue_rcv_skb(mon_sk, skb)) + kfree_skb(skb); + } + + read_unlock(&hci_sk_list.lock); +} + +/* Generate internal stack event */ +static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data) +{ + struct hci_event_hdr *hdr; + struct hci_ev_stack_internal *ev; + struct sk_buff *skb; + + skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC); + if (!skb) + return; + + hdr = skb_put(skb, HCI_EVENT_HDR_SIZE); + hdr->evt = HCI_EV_STACK_INTERNAL; + hdr->plen = sizeof(*ev) + dlen; + + ev = skb_put(skb, sizeof(*ev) + dlen); + ev->type = type; + memcpy(ev->data, data, dlen); + + bt_cb(skb)->incoming = 1; + __net_timestamp(skb); + + hci_skb_pkt_type(skb) = HCI_EVENT_PKT; + hci_send_to_sock(hdev, skb); + kfree_skb(skb); +} + +void hci_sock_dev_event(struct hci_dev *hdev, int event) +{ + BT_DBG("hdev %s event %d", hdev->name, event); + + if (atomic_read(&monitor_promisc)) { + struct sk_buff *skb; + + /* Send event to monitor */ + skb = create_monitor_event(hdev, event); + if (skb) { + hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, + HCI_SOCK_TRUSTED, NULL); + kfree_skb(skb); + } + } + + if (event <= HCI_DEV_DOWN) { + struct hci_ev_si_device ev; + + /* Send event to sockets */ + ev.event = event; + ev.dev_id = hdev->id; + hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev); + } + + if (event == HCI_DEV_UNREG) { + struct sock *sk; + + /* Wake up sockets using this dead device */ + read_lock(&hci_sk_list.lock); + sk_for_each(sk, &hci_sk_list.head) { + if (hci_pi(sk)->hdev == hdev) { + sk->sk_err = EPIPE; + sk->sk_state_change(sk); + } + } + read_unlock(&hci_sk_list.lock); + } +} + +static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel) +{ + struct hci_mgmt_chan *c; + + list_for_each_entry(c, &mgmt_chan_list, list) { + if (c->channel == channel) + return c; + } + + return NULL; +} + +static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel) +{ + struct hci_mgmt_chan *c; + + mutex_lock(&mgmt_chan_list_lock); + c = __hci_mgmt_chan_find(channel); + mutex_unlock(&mgmt_chan_list_lock); + + return c; +} + +int hci_mgmt_chan_register(struct hci_mgmt_chan *c) +{ + if (c->channel < HCI_CHANNEL_CONTROL) + return -EINVAL; + + mutex_lock(&mgmt_chan_list_lock); + if (__hci_mgmt_chan_find(c->channel)) { + mutex_unlock(&mgmt_chan_list_lock); + return -EALREADY; + } + + list_add_tail(&c->list, &mgmt_chan_list); + + mutex_unlock(&mgmt_chan_list_lock); + + return 0; +} +EXPORT_SYMBOL(hci_mgmt_chan_register); + +void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c) +{ + mutex_lock(&mgmt_chan_list_lock); + list_del(&c->list); + mutex_unlock(&mgmt_chan_list_lock); +} +EXPORT_SYMBOL(hci_mgmt_chan_unregister); + +static int hci_sock_release(struct socket *sock) +{ + struct sock *sk = sock->sk; + struct hci_dev *hdev; + struct sk_buff *skb; + + BT_DBG("sock %p sk %p", sock, sk); + + if (!sk) + return 0; + + lock_sock(sk); + + switch (hci_pi(sk)->channel) { + case HCI_CHANNEL_MONITOR: + atomic_dec(&monitor_promisc); + break; + case HCI_CHANNEL_RAW: + case HCI_CHANNEL_USER: + case HCI_CHANNEL_CONTROL: + /* Send event to monitor */ + skb = create_monitor_ctrl_close(sk); + if (skb) { + hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, + HCI_SOCK_TRUSTED, NULL); + kfree_skb(skb); + } + + hci_sock_free_cookie(sk); + break; + } + + bt_sock_unlink(&hci_sk_list, sk); + + hdev = hci_pi(sk)->hdev; + if (hdev) { + if (hci_pi(sk)->channel == HCI_CHANNEL_USER && + !hci_dev_test_flag(hdev, HCI_UNREGISTER)) { + /* When releasing a user channel exclusive access, + * call hci_dev_do_close directly instead of calling + * hci_dev_close to ensure the exclusive access will + * be released and the controller brought back down. + * + * The checking of HCI_AUTO_OFF is not needed in this + * case since it will have been cleared already when + * opening the user channel. + * + * Make sure to also check that we haven't already + * unregistered since all the cleanup will have already + * been complete and hdev will get released when we put + * below. + */ + hci_dev_do_close(hdev); + hci_dev_clear_flag(hdev, HCI_USER_CHANNEL); + mgmt_index_added(hdev); + } + + atomic_dec(&hdev->promisc); + hci_dev_put(hdev); + } + + sock_orphan(sk); + release_sock(sk); + sock_put(sk); + return 0; +} + +static int hci_sock_reject_list_add(struct hci_dev *hdev, void __user *arg) +{ + bdaddr_t bdaddr; + int err; + + if (copy_from_user(&bdaddr, arg, sizeof(bdaddr))) + return -EFAULT; + + hci_dev_lock(hdev); + + err = hci_bdaddr_list_add(&hdev->reject_list, &bdaddr, BDADDR_BREDR); + + hci_dev_unlock(hdev); + + return err; +} + +static int hci_sock_reject_list_del(struct hci_dev *hdev, void __user *arg) +{ + bdaddr_t bdaddr; + int err; + + if (copy_from_user(&bdaddr, arg, sizeof(bdaddr))) + return -EFAULT; + + hci_dev_lock(hdev); + + err = hci_bdaddr_list_del(&hdev->reject_list, &bdaddr, BDADDR_BREDR); + + hci_dev_unlock(hdev); + + return err; +} + +/* Ioctls that require bound socket */ +static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd, + unsigned long arg) +{ + struct hci_dev *hdev = hci_hdev_from_sock(sk); + + if (IS_ERR(hdev)) + return PTR_ERR(hdev); + + if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) + return -EBUSY; + + if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) + return -EOPNOTSUPP; + + if (hdev->dev_type != HCI_PRIMARY) + return -EOPNOTSUPP; + + switch (cmd) { + case HCISETRAW: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + return -EOPNOTSUPP; + + case HCIGETCONNINFO: + return hci_get_conn_info(hdev, (void __user *)arg); + + case HCIGETAUTHINFO: + return hci_get_auth_info(hdev, (void __user *)arg); + + case HCIBLOCKADDR: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + return hci_sock_reject_list_add(hdev, (void __user *)arg); + + case HCIUNBLOCKADDR: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + return hci_sock_reject_list_del(hdev, (void __user *)arg); + } + + return -ENOIOCTLCMD; +} + +static int hci_sock_ioctl(struct socket *sock, unsigned int cmd, + unsigned long arg) +{ + void __user *argp = (void __user *)arg; + struct sock *sk = sock->sk; + int err; + + BT_DBG("cmd %x arg %lx", cmd, arg); + + /* Make sure the cmd is valid before doing anything */ + switch (cmd) { + case HCIGETDEVLIST: + case HCIGETDEVINFO: + case HCIGETCONNLIST: + case HCIDEVUP: + case HCIDEVDOWN: + case HCIDEVRESET: + case HCIDEVRESTAT: + case HCISETSCAN: + case HCISETAUTH: + case HCISETENCRYPT: + case HCISETPTYPE: + case HCISETLINKPOL: + case HCISETLINKMODE: + case HCISETACLMTU: + case HCISETSCOMTU: + case HCIINQUIRY: + case HCISETRAW: + case HCIGETCONNINFO: + case HCIGETAUTHINFO: + case HCIBLOCKADDR: + case HCIUNBLOCKADDR: + break; + default: + return -ENOIOCTLCMD; + } + + lock_sock(sk); + + if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) { + err = -EBADFD; + goto done; + } + + /* When calling an ioctl on an unbound raw socket, then ensure + * that the monitor gets informed. Ensure that the resulting event + * is only send once by checking if the cookie exists or not. The + * socket cookie will be only ever generated once for the lifetime + * of a given socket. + */ + if (hci_sock_gen_cookie(sk)) { + struct sk_buff *skb; + + /* Perform careful checks before setting the HCI_SOCK_TRUSTED + * flag. Make sure that not only the current task but also + * the socket opener has the required capability, since + * privileged programs can be tricked into making ioctl calls + * on HCI sockets, and the socket should not be marked as + * trusted simply because the ioctl caller is privileged. + */ + if (sk_capable(sk, CAP_NET_ADMIN)) + hci_sock_set_flag(sk, HCI_SOCK_TRUSTED); + + /* Send event to monitor */ + skb = create_monitor_ctrl_open(sk); + if (skb) { + hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, + HCI_SOCK_TRUSTED, NULL); + kfree_skb(skb); + } + } + + release_sock(sk); + + switch (cmd) { + case HCIGETDEVLIST: + return hci_get_dev_list(argp); + + case HCIGETDEVINFO: + return hci_get_dev_info(argp); + + case HCIGETCONNLIST: + return hci_get_conn_list(argp); + + case HCIDEVUP: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + return hci_dev_open(arg); + + case HCIDEVDOWN: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + return hci_dev_close(arg); + + case HCIDEVRESET: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + return hci_dev_reset(arg); + + case HCIDEVRESTAT: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + return hci_dev_reset_stat(arg); + + case HCISETSCAN: + case HCISETAUTH: + case HCISETENCRYPT: + case HCISETPTYPE: + case HCISETLINKPOL: + case HCISETLINKMODE: + case HCISETACLMTU: + case HCISETSCOMTU: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + return hci_dev_cmd(cmd, argp); + + case HCIINQUIRY: + return hci_inquiry(argp); + } + + lock_sock(sk); + + err = hci_sock_bound_ioctl(sk, cmd, arg); + +done: + release_sock(sk); + return err; +} + +#ifdef CONFIG_COMPAT +static int hci_sock_compat_ioctl(struct socket *sock, unsigned int cmd, + unsigned long arg) +{ + switch (cmd) { + case HCIDEVUP: + case HCIDEVDOWN: + case HCIDEVRESET: + case HCIDEVRESTAT: + return hci_sock_ioctl(sock, cmd, arg); + } + + return hci_sock_ioctl(sock, cmd, (unsigned long)compat_ptr(arg)); +} +#endif + +static int hci_sock_bind(struct socket *sock, struct sockaddr *addr, + int addr_len) +{ + struct sockaddr_hci haddr; + struct sock *sk = sock->sk; + struct hci_dev *hdev = NULL; + struct sk_buff *skb; + int len, err = 0; + + BT_DBG("sock %p sk %p", sock, sk); + + if (!addr) + return -EINVAL; + + memset(&haddr, 0, sizeof(haddr)); + len = min_t(unsigned int, sizeof(haddr), addr_len); + memcpy(&haddr, addr, len); + + if (haddr.hci_family != AF_BLUETOOTH) + return -EINVAL; + + lock_sock(sk); + + /* Allow detaching from dead device and attaching to alive device, if + * the caller wants to re-bind (instead of close) this socket in + * response to hci_sock_dev_event(HCI_DEV_UNREG) notification. + */ + hdev = hci_pi(sk)->hdev; + if (hdev && hci_dev_test_flag(hdev, HCI_UNREGISTER)) { + hci_pi(sk)->hdev = NULL; + sk->sk_state = BT_OPEN; + hci_dev_put(hdev); + } + hdev = NULL; + + if (sk->sk_state == BT_BOUND) { + err = -EALREADY; + goto done; + } + + switch (haddr.hci_channel) { + case HCI_CHANNEL_RAW: + if (hci_pi(sk)->hdev) { + err = -EALREADY; + goto done; + } + + if (haddr.hci_dev != HCI_DEV_NONE) { + hdev = hci_dev_get(haddr.hci_dev); + if (!hdev) { + err = -ENODEV; + goto done; + } + + atomic_inc(&hdev->promisc); + } + + hci_pi(sk)->channel = haddr.hci_channel; + + if (!hci_sock_gen_cookie(sk)) { + /* In the case when a cookie has already been assigned, + * then there has been already an ioctl issued against + * an unbound socket and with that triggered an open + * notification. Send a close notification first to + * allow the state transition to bounded. + */ + skb = create_monitor_ctrl_close(sk); + if (skb) { + hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, + HCI_SOCK_TRUSTED, NULL); + kfree_skb(skb); + } + } + + if (capable(CAP_NET_ADMIN)) + hci_sock_set_flag(sk, HCI_SOCK_TRUSTED); + + hci_pi(sk)->hdev = hdev; + + /* Send event to monitor */ + skb = create_monitor_ctrl_open(sk); + if (skb) { + hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, + HCI_SOCK_TRUSTED, NULL); + kfree_skb(skb); + } + break; + + case HCI_CHANNEL_USER: + if (hci_pi(sk)->hdev) { + err = -EALREADY; + goto done; + } + + if (haddr.hci_dev == HCI_DEV_NONE) { + err = -EINVAL; + goto done; + } + + if (!capable(CAP_NET_ADMIN)) { + err = -EPERM; + goto done; + } + + hdev = hci_dev_get(haddr.hci_dev); + if (!hdev) { + err = -ENODEV; + goto done; + } + + if (test_bit(HCI_INIT, &hdev->flags) || + hci_dev_test_flag(hdev, HCI_SETUP) || + hci_dev_test_flag(hdev, HCI_CONFIG) || + (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) && + test_bit(HCI_UP, &hdev->flags))) { + err = -EBUSY; + hci_dev_put(hdev); + goto done; + } + + if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) { + err = -EUSERS; + hci_dev_put(hdev); + goto done; + } + + mgmt_index_removed(hdev); + + err = hci_dev_open(hdev->id); + if (err) { + if (err == -EALREADY) { + /* In case the transport is already up and + * running, clear the error here. + * + * This can happen when opening a user + * channel and HCI_AUTO_OFF grace period + * is still active. + */ + err = 0; + } else { + hci_dev_clear_flag(hdev, HCI_USER_CHANNEL); + mgmt_index_added(hdev); + hci_dev_put(hdev); + goto done; + } + } + + hci_pi(sk)->channel = haddr.hci_channel; + + if (!hci_sock_gen_cookie(sk)) { + /* In the case when a cookie has already been assigned, + * this socket will transition from a raw socket into + * a user channel socket. For a clean transition, send + * the close notification first. + */ + skb = create_monitor_ctrl_close(sk); + if (skb) { + hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, + HCI_SOCK_TRUSTED, NULL); + kfree_skb(skb); + } + } + + /* The user channel is restricted to CAP_NET_ADMIN + * capabilities and with that implicitly trusted. + */ + hci_sock_set_flag(sk, HCI_SOCK_TRUSTED); + + hci_pi(sk)->hdev = hdev; + + /* Send event to monitor */ + skb = create_monitor_ctrl_open(sk); + if (skb) { + hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, + HCI_SOCK_TRUSTED, NULL); + kfree_skb(skb); + } + + atomic_inc(&hdev->promisc); + break; + + case HCI_CHANNEL_MONITOR: + if (haddr.hci_dev != HCI_DEV_NONE) { + err = -EINVAL; + goto done; + } + + if (!capable(CAP_NET_RAW)) { + err = -EPERM; + goto done; + } + + hci_pi(sk)->channel = haddr.hci_channel; + + /* The monitor interface is restricted to CAP_NET_RAW + * capabilities and with that implicitly trusted. + */ + hci_sock_set_flag(sk, HCI_SOCK_TRUSTED); + + send_monitor_note(sk, "Linux version %s (%s)", + init_utsname()->release, + init_utsname()->machine); + send_monitor_note(sk, "Bluetooth subsystem version %u.%u", + BT_SUBSYS_VERSION, BT_SUBSYS_REVISION); + send_monitor_replay(sk); + send_monitor_control_replay(sk); + + atomic_inc(&monitor_promisc); + break; + + case HCI_CHANNEL_LOGGING: + if (haddr.hci_dev != HCI_DEV_NONE) { + err = -EINVAL; + goto done; + } + + if (!capable(CAP_NET_ADMIN)) { + err = -EPERM; + goto done; + } + + hci_pi(sk)->channel = haddr.hci_channel; + break; + + default: + if (!hci_mgmt_chan_find(haddr.hci_channel)) { + err = -EINVAL; + goto done; + } + + if (haddr.hci_dev != HCI_DEV_NONE) { + err = -EINVAL; + goto done; + } + + /* Users with CAP_NET_ADMIN capabilities are allowed + * access to all management commands and events. For + * untrusted users the interface is restricted and + * also only untrusted events are sent. + */ + if (capable(CAP_NET_ADMIN)) + hci_sock_set_flag(sk, HCI_SOCK_TRUSTED); + + hci_pi(sk)->channel = haddr.hci_channel; + + /* At the moment the index and unconfigured index events + * are enabled unconditionally. Setting them on each + * socket when binding keeps this functionality. They + * however might be cleared later and then sending of these + * events will be disabled, but that is then intentional. + * + * This also enables generic events that are safe to be + * received by untrusted users. Example for such events + * are changes to settings, class of device, name etc. + */ + if (hci_pi(sk)->channel == HCI_CHANNEL_CONTROL) { + if (!hci_sock_gen_cookie(sk)) { + /* In the case when a cookie has already been + * assigned, this socket will transition from + * a raw socket into a control socket. To + * allow for a clean transition, send the + * close notification first. + */ + skb = create_monitor_ctrl_close(sk); + if (skb) { + hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, + HCI_SOCK_TRUSTED, NULL); + kfree_skb(skb); + } + } + + /* Send event to monitor */ + skb = create_monitor_ctrl_open(sk); + if (skb) { + hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, + HCI_SOCK_TRUSTED, NULL); + kfree_skb(skb); + } + + hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS); + hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS); + hci_sock_set_flag(sk, HCI_MGMT_OPTION_EVENTS); + hci_sock_set_flag(sk, HCI_MGMT_SETTING_EVENTS); + hci_sock_set_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS); + hci_sock_set_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS); + } + break; + } + + /* Default MTU to HCI_MAX_FRAME_SIZE if not set */ + if (!hci_pi(sk)->mtu) + hci_pi(sk)->mtu = HCI_MAX_FRAME_SIZE; + + sk->sk_state = BT_BOUND; + +done: + release_sock(sk); + return err; +} + +static int hci_sock_getname(struct socket *sock, struct sockaddr *addr, + int peer) +{ + struct sockaddr_hci *haddr = (struct sockaddr_hci *)addr; + struct sock *sk = sock->sk; + struct hci_dev *hdev; + int err = 0; + + BT_DBG("sock %p sk %p", sock, sk); + + if (peer) + return -EOPNOTSUPP; + + lock_sock(sk); + + hdev = hci_hdev_from_sock(sk); + if (IS_ERR(hdev)) { + err = PTR_ERR(hdev); + goto done; + } + + haddr->hci_family = AF_BLUETOOTH; + haddr->hci_dev = hdev->id; + haddr->hci_channel= hci_pi(sk)->channel; + err = sizeof(*haddr); + +done: + release_sock(sk); + return err; +} + +static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg, + struct sk_buff *skb) +{ + __u8 mask = hci_pi(sk)->cmsg_mask; + + if (mask & HCI_CMSG_DIR) { + int incoming = bt_cb(skb)->incoming; + put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming), + &incoming); + } + + if (mask & HCI_CMSG_TSTAMP) { +#ifdef CONFIG_COMPAT + struct old_timeval32 ctv; +#endif + struct __kernel_old_timeval tv; + void *data; + int len; + + skb_get_timestamp(skb, &tv); + + data = &tv; + len = sizeof(tv); +#ifdef CONFIG_COMPAT + if (!COMPAT_USE_64BIT_TIME && + (msg->msg_flags & MSG_CMSG_COMPAT)) { + ctv.tv_sec = tv.tv_sec; + ctv.tv_usec = tv.tv_usec; + data = &ctv; + len = sizeof(ctv); + } +#endif + + put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data); + } +} + +static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg, + size_t len, int flags) +{ + struct sock *sk = sock->sk; + struct sk_buff *skb; + int copied, err; + unsigned int skblen; + + BT_DBG("sock %p, sk %p", sock, sk); + + if (flags & MSG_OOB) + return -EOPNOTSUPP; + + if (hci_pi(sk)->channel == HCI_CHANNEL_LOGGING) + return -EOPNOTSUPP; + + if (sk->sk_state == BT_CLOSED) + return 0; + + skb = skb_recv_datagram(sk, flags, &err); + if (!skb) + return err; + + skblen = skb->len; + copied = skb->len; + if (len < copied) { + msg->msg_flags |= MSG_TRUNC; + copied = len; + } + + skb_reset_transport_header(skb); + err = skb_copy_datagram_msg(skb, 0, msg, copied); + + switch (hci_pi(sk)->channel) { + case HCI_CHANNEL_RAW: + hci_sock_cmsg(sk, msg, skb); + break; + case HCI_CHANNEL_USER: + case HCI_CHANNEL_MONITOR: + sock_recv_timestamp(msg, sk, skb); + break; + default: + if (hci_mgmt_chan_find(hci_pi(sk)->channel)) + sock_recv_timestamp(msg, sk, skb); + break; + } + + skb_free_datagram(sk, skb); + + if (flags & MSG_TRUNC) + copied = skblen; + + return err ? : copied; +} + +static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk, + struct sk_buff *skb) +{ + u8 *cp; + struct mgmt_hdr *hdr; + u16 opcode, index, len; + struct hci_dev *hdev = NULL; + const struct hci_mgmt_handler *handler; + bool var_len, no_hdev; + int err; + + BT_DBG("got %d bytes", skb->len); + + if (skb->len < sizeof(*hdr)) + return -EINVAL; + + hdr = (void *)skb->data; + opcode = __le16_to_cpu(hdr->opcode); + index = __le16_to_cpu(hdr->index); + len = __le16_to_cpu(hdr->len); + + if (len != skb->len - sizeof(*hdr)) { + err = -EINVAL; + goto done; + } + + if (chan->channel == HCI_CHANNEL_CONTROL) { + struct sk_buff *cmd; + + /* Send event to monitor */ + cmd = create_monitor_ctrl_command(sk, index, opcode, len, + skb->data + sizeof(*hdr)); + if (cmd) { + hci_send_to_channel(HCI_CHANNEL_MONITOR, cmd, + HCI_SOCK_TRUSTED, NULL); + kfree_skb(cmd); + } + } + + if (opcode >= chan->handler_count || + chan->handlers[opcode].func == NULL) { + BT_DBG("Unknown op %u", opcode); + err = mgmt_cmd_status(sk, index, opcode, + MGMT_STATUS_UNKNOWN_COMMAND); + goto done; + } + + handler = &chan->handlers[opcode]; + + if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) && + !(handler->flags & HCI_MGMT_UNTRUSTED)) { + err = mgmt_cmd_status(sk, index, opcode, + MGMT_STATUS_PERMISSION_DENIED); + goto done; + } + + if (index != MGMT_INDEX_NONE) { + hdev = hci_dev_get(index); + if (!hdev) { + err = mgmt_cmd_status(sk, index, opcode, + MGMT_STATUS_INVALID_INDEX); + goto done; + } + + if (hci_dev_test_flag(hdev, HCI_SETUP) || + hci_dev_test_flag(hdev, HCI_CONFIG) || + hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { + err = mgmt_cmd_status(sk, index, opcode, + MGMT_STATUS_INVALID_INDEX); + goto done; + } + + if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) && + !(handler->flags & HCI_MGMT_UNCONFIGURED)) { + err = mgmt_cmd_status(sk, index, opcode, + MGMT_STATUS_INVALID_INDEX); + goto done; + } + } + + if (!(handler->flags & HCI_MGMT_HDEV_OPTIONAL)) { + no_hdev = (handler->flags & HCI_MGMT_NO_HDEV); + if (no_hdev != !hdev) { + err = mgmt_cmd_status(sk, index, opcode, + MGMT_STATUS_INVALID_INDEX); + goto done; + } + } + + var_len = (handler->flags & HCI_MGMT_VAR_LEN); + if ((var_len && len < handler->data_len) || + (!var_len && len != handler->data_len)) { + err = mgmt_cmd_status(sk, index, opcode, + MGMT_STATUS_INVALID_PARAMS); + goto done; + } + + if (hdev && chan->hdev_init) + chan->hdev_init(sk, hdev); + + cp = skb->data + sizeof(*hdr); + + err = handler->func(sk, hdev, cp, len); + if (err < 0) + goto done; + + err = skb->len; + +done: + if (hdev) + hci_dev_put(hdev); + + return err; +} + +static int hci_logging_frame(struct sock *sk, struct sk_buff *skb, + unsigned int flags) +{ + struct hci_mon_hdr *hdr; + struct hci_dev *hdev; + u16 index; + int err; + + /* The logging frame consists at minimum of the standard header, + * the priority byte, the ident length byte and at least one string + * terminator NUL byte. Anything shorter are invalid packets. + */ + if (skb->len < sizeof(*hdr) + 3) + return -EINVAL; + + hdr = (void *)skb->data; + + if (__le16_to_cpu(hdr->len) != skb->len - sizeof(*hdr)) + return -EINVAL; + + if (__le16_to_cpu(hdr->opcode) == 0x0000) { + __u8 priority = skb->data[sizeof(*hdr)]; + __u8 ident_len = skb->data[sizeof(*hdr) + 1]; + + /* Only the priorities 0-7 are valid and with that any other + * value results in an invalid packet. + * + * The priority byte is followed by an ident length byte and + * the NUL terminated ident string. Check that the ident + * length is not overflowing the packet and also that the + * ident string itself is NUL terminated. In case the ident + * length is zero, the length value actually doubles as NUL + * terminator identifier. + * + * The message follows the ident string (if present) and + * must be NUL terminated. Otherwise it is not a valid packet. + */ + if (priority > 7 || skb->data[skb->len - 1] != 0x00 || + ident_len > skb->len - sizeof(*hdr) - 3 || + skb->data[sizeof(*hdr) + ident_len + 1] != 0x00) + return -EINVAL; + } else { + return -EINVAL; + } + + index = __le16_to_cpu(hdr->index); + + if (index != MGMT_INDEX_NONE) { + hdev = hci_dev_get(index); + if (!hdev) + return -ENODEV; + } else { + hdev = NULL; + } + + hdr->opcode = cpu_to_le16(HCI_MON_USER_LOGGING); + + hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, HCI_SOCK_TRUSTED, NULL); + err = skb->len; + + if (hdev) + hci_dev_put(hdev); + + return err; +} + +static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg, + size_t len) +{ + struct sock *sk = sock->sk; + struct hci_mgmt_chan *chan; + struct hci_dev *hdev; + struct sk_buff *skb; + int err; + const unsigned int flags = msg->msg_flags; + + BT_DBG("sock %p sk %p", sock, sk); + + if (flags & MSG_OOB) + return -EOPNOTSUPP; + + if (flags & ~(MSG_DONTWAIT | MSG_NOSIGNAL | MSG_ERRQUEUE | MSG_CMSG_COMPAT)) + return -EINVAL; + + if (len < 4 || len > hci_pi(sk)->mtu) + return -EINVAL; + + skb = bt_skb_sendmsg(sk, msg, len, len, 0, 0); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + lock_sock(sk); + + switch (hci_pi(sk)->channel) { + case HCI_CHANNEL_RAW: + case HCI_CHANNEL_USER: + break; + case HCI_CHANNEL_MONITOR: + err = -EOPNOTSUPP; + goto drop; + case HCI_CHANNEL_LOGGING: + err = hci_logging_frame(sk, skb, flags); + goto drop; + default: + mutex_lock(&mgmt_chan_list_lock); + chan = __hci_mgmt_chan_find(hci_pi(sk)->channel); + if (chan) + err = hci_mgmt_cmd(chan, sk, skb); + else + err = -EINVAL; + + mutex_unlock(&mgmt_chan_list_lock); + goto drop; + } + + hdev = hci_hdev_from_sock(sk); + if (IS_ERR(hdev)) { + err = PTR_ERR(hdev); + goto drop; + } + + if (!test_bit(HCI_UP, &hdev->flags)) { + err = -ENETDOWN; + goto drop; + } + + hci_skb_pkt_type(skb) = skb->data[0]; + skb_pull(skb, 1); + + if (hci_pi(sk)->channel == HCI_CHANNEL_USER) { + /* No permission check is needed for user channel + * since that gets enforced when binding the socket. + * + * However check that the packet type is valid. + */ + if (hci_skb_pkt_type(skb) != HCI_COMMAND_PKT && + hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT && + hci_skb_pkt_type(skb) != HCI_SCODATA_PKT && + hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) { + err = -EINVAL; + goto drop; + } + + skb_queue_tail(&hdev->raw_q, skb); + queue_work(hdev->workqueue, &hdev->tx_work); + } else if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) { + u16 opcode = get_unaligned_le16(skb->data); + u16 ogf = hci_opcode_ogf(opcode); + u16 ocf = hci_opcode_ocf(opcode); + + if (((ogf > HCI_SFLT_MAX_OGF) || + !hci_test_bit(ocf & HCI_FLT_OCF_BITS, + &hci_sec_filter.ocf_mask[ogf])) && + !capable(CAP_NET_RAW)) { + err = -EPERM; + goto drop; + } + + /* Since the opcode has already been extracted here, store + * a copy of the value for later use by the drivers. + */ + hci_skb_opcode(skb) = opcode; + + if (ogf == 0x3f) { + skb_queue_tail(&hdev->raw_q, skb); + queue_work(hdev->workqueue, &hdev->tx_work); + } else { + /* Stand-alone HCI commands must be flagged as + * single-command requests. + */ + bt_cb(skb)->hci.req_flags |= HCI_REQ_START; + + skb_queue_tail(&hdev->cmd_q, skb); + queue_work(hdev->workqueue, &hdev->cmd_work); + } + } else { + if (!capable(CAP_NET_RAW)) { + err = -EPERM; + goto drop; + } + + if (hci_skb_pkt_type(skb) != HCI_ACLDATA_PKT && + hci_skb_pkt_type(skb) != HCI_SCODATA_PKT && + hci_skb_pkt_type(skb) != HCI_ISODATA_PKT) { + err = -EINVAL; + goto drop; + } + + skb_queue_tail(&hdev->raw_q, skb); + queue_work(hdev->workqueue, &hdev->tx_work); + } + + err = len; + +done: + release_sock(sk); + return err; + +drop: + kfree_skb(skb); + goto done; +} + +static int hci_sock_setsockopt_old(struct socket *sock, int level, int optname, + sockptr_t optval, unsigned int len) +{ + struct hci_ufilter uf = { .opcode = 0 }; + struct sock *sk = sock->sk; + int err = 0, opt = 0; + + BT_DBG("sk %p, opt %d", sk, optname); + + lock_sock(sk); + + if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) { + err = -EBADFD; + goto done; + } + + switch (optname) { + case HCI_DATA_DIR: + if (copy_from_sockptr(&opt, optval, sizeof(opt))) { + err = -EFAULT; + break; + } + + if (opt) + hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR; + else + hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR; + break; + + case HCI_TIME_STAMP: + if (copy_from_sockptr(&opt, optval, sizeof(opt))) { + err = -EFAULT; + break; + } + + if (opt) + hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP; + else + hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP; + break; + + case HCI_FILTER: + { + struct hci_filter *f = &hci_pi(sk)->filter; + + uf.type_mask = f->type_mask; + uf.opcode = f->opcode; + uf.event_mask[0] = *((u32 *) f->event_mask + 0); + uf.event_mask[1] = *((u32 *) f->event_mask + 1); + } + + len = min_t(unsigned int, len, sizeof(uf)); + if (copy_from_sockptr(&uf, optval, len)) { + err = -EFAULT; + break; + } + + if (!capable(CAP_NET_RAW)) { + uf.type_mask &= hci_sec_filter.type_mask; + uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0); + uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1); + } + + { + struct hci_filter *f = &hci_pi(sk)->filter; + + f->type_mask = uf.type_mask; + f->opcode = uf.opcode; + *((u32 *) f->event_mask + 0) = uf.event_mask[0]; + *((u32 *) f->event_mask + 1) = uf.event_mask[1]; + } + break; + + default: + err = -ENOPROTOOPT; + break; + } + +done: + release_sock(sk); + return err; +} + +static int hci_sock_setsockopt(struct socket *sock, int level, int optname, + sockptr_t optval, unsigned int len) +{ + struct sock *sk = sock->sk; + int err = 0; + u16 opt; + + BT_DBG("sk %p, opt %d", sk, optname); + + if (level == SOL_HCI) + return hci_sock_setsockopt_old(sock, level, optname, optval, + len); + + if (level != SOL_BLUETOOTH) + return -ENOPROTOOPT; + + lock_sock(sk); + + switch (optname) { + case BT_SNDMTU: + case BT_RCVMTU: + switch (hci_pi(sk)->channel) { + /* Don't allow changing MTU for channels that are meant for HCI + * traffic only. + */ + case HCI_CHANNEL_RAW: + case HCI_CHANNEL_USER: + err = -ENOPROTOOPT; + goto done; + } + + if (copy_from_sockptr(&opt, optval, sizeof(opt))) { + err = -EFAULT; + break; + } + + hci_pi(sk)->mtu = opt; + break; + + default: + err = -ENOPROTOOPT; + break; + } + +done: + release_sock(sk); + return err; +} + +static int hci_sock_getsockopt_old(struct socket *sock, int level, int optname, + char __user *optval, int __user *optlen) +{ + struct hci_ufilter uf; + struct sock *sk = sock->sk; + int len, opt, err = 0; + + BT_DBG("sk %p, opt %d", sk, optname); + + if (get_user(len, optlen)) + return -EFAULT; + + lock_sock(sk); + + if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) { + err = -EBADFD; + goto done; + } + + switch (optname) { + case HCI_DATA_DIR: + if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR) + opt = 1; + else + opt = 0; + + if (put_user(opt, optval)) + err = -EFAULT; + break; + + case HCI_TIME_STAMP: + if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP) + opt = 1; + else + opt = 0; + + if (put_user(opt, optval)) + err = -EFAULT; + break; + + case HCI_FILTER: + { + struct hci_filter *f = &hci_pi(sk)->filter; + + memset(&uf, 0, sizeof(uf)); + uf.type_mask = f->type_mask; + uf.opcode = f->opcode; + uf.event_mask[0] = *((u32 *) f->event_mask + 0); + uf.event_mask[1] = *((u32 *) f->event_mask + 1); + } + + len = min_t(unsigned int, len, sizeof(uf)); + if (copy_to_user(optval, &uf, len)) + err = -EFAULT; + break; + + default: + err = -ENOPROTOOPT; + break; + } + +done: + release_sock(sk); + return err; +} + +static int hci_sock_getsockopt(struct socket *sock, int level, int optname, + char __user *optval, int __user *optlen) +{ + struct sock *sk = sock->sk; + int err = 0; + + BT_DBG("sk %p, opt %d", sk, optname); + + if (level == SOL_HCI) + return hci_sock_getsockopt_old(sock, level, optname, optval, + optlen); + + if (level != SOL_BLUETOOTH) + return -ENOPROTOOPT; + + lock_sock(sk); + + switch (optname) { + case BT_SNDMTU: + case BT_RCVMTU: + if (put_user(hci_pi(sk)->mtu, (u16 __user *)optval)) + err = -EFAULT; + break; + + default: + err = -ENOPROTOOPT; + break; + } + + release_sock(sk); + return err; +} + +static void hci_sock_destruct(struct sock *sk) +{ + mgmt_cleanup(sk); + skb_queue_purge(&sk->sk_receive_queue); + skb_queue_purge(&sk->sk_write_queue); +} + +static const struct proto_ops hci_sock_ops = { + .family = PF_BLUETOOTH, + .owner = THIS_MODULE, + .release = hci_sock_release, + .bind = hci_sock_bind, + .getname = hci_sock_getname, + .sendmsg = hci_sock_sendmsg, + .recvmsg = hci_sock_recvmsg, + .ioctl = hci_sock_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = hci_sock_compat_ioctl, +#endif + .poll = datagram_poll, + .listen = sock_no_listen, + .shutdown = sock_no_shutdown, + .setsockopt = hci_sock_setsockopt, + .getsockopt = hci_sock_getsockopt, + .connect = sock_no_connect, + .socketpair = sock_no_socketpair, + .accept = sock_no_accept, + .mmap = sock_no_mmap +}; + +static struct proto hci_sk_proto = { + .name = "HCI", + .owner = THIS_MODULE, + .obj_size = sizeof(struct hci_pinfo) +}; + +static int hci_sock_create(struct net *net, struct socket *sock, int protocol, + int kern) +{ + struct sock *sk; + + BT_DBG("sock %p", sock); + + if (sock->type != SOCK_RAW) + return -ESOCKTNOSUPPORT; + + sock->ops = &hci_sock_ops; + + sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern); + if (!sk) + return -ENOMEM; + + sock_init_data(sock, sk); + + sock_reset_flag(sk, SOCK_ZAPPED); + + sk->sk_protocol = protocol; + + sock->state = SS_UNCONNECTED; + sk->sk_state = BT_OPEN; + sk->sk_destruct = hci_sock_destruct; + + bt_sock_link(&hci_sk_list, sk); + return 0; +} + +static const struct net_proto_family hci_sock_family_ops = { + .family = PF_BLUETOOTH, + .owner = THIS_MODULE, + .create = hci_sock_create, +}; + +int __init hci_sock_init(void) +{ + int err; + + BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr)); + + err = proto_register(&hci_sk_proto, 0); + if (err < 0) + return err; + + err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops); + if (err < 0) { + BT_ERR("HCI socket registration failed"); + goto error; + } + + err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL); + if (err < 0) { + BT_ERR("Failed to create HCI proc file"); + bt_sock_unregister(BTPROTO_HCI); + goto error; + } + + BT_INFO("HCI socket layer initialized"); + + return 0; + +error: + proto_unregister(&hci_sk_proto); + return err; +} + +void hci_sock_cleanup(void) +{ + bt_procfs_cleanup(&init_net, "hci"); + bt_sock_unregister(BTPROTO_HCI); + proto_unregister(&hci_sk_proto); +} diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c new file mode 100644 index 000000000..d74fe13f3 --- /dev/null +++ b/net/bluetooth/hci_sync.c @@ -0,0 +1,6316 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * BlueZ - Bluetooth protocol stack for Linux + * + * Copyright (C) 2021 Intel Corporation + */ + +#include <linux/property.h> + +#include <net/bluetooth/bluetooth.h> +#include <net/bluetooth/hci_core.h> +#include <net/bluetooth/mgmt.h> + +#include "hci_request.h" +#include "hci_codec.h" +#include "hci_debugfs.h" +#include "smp.h" +#include "eir.h" +#include "msft.h" +#include "aosp.h" +#include "leds.h" + +static void hci_cmd_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode, + struct sk_buff *skb) +{ + bt_dev_dbg(hdev, "result 0x%2.2x", result); + + if (hdev->req_status != HCI_REQ_PEND) + return; + + hdev->req_result = result; + hdev->req_status = HCI_REQ_DONE; + + if (skb) { + struct sock *sk = hci_skb_sk(skb); + + /* Drop sk reference if set */ + if (sk) + sock_put(sk); + + hdev->req_skb = skb_get(skb); + } + + wake_up_interruptible(&hdev->req_wait_q); +} + +static struct sk_buff *hci_cmd_sync_alloc(struct hci_dev *hdev, u16 opcode, + u32 plen, const void *param, + struct sock *sk) +{ + int len = HCI_COMMAND_HDR_SIZE + plen; + struct hci_command_hdr *hdr; + struct sk_buff *skb; + + skb = bt_skb_alloc(len, GFP_ATOMIC); + if (!skb) + return NULL; + + hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE); + hdr->opcode = cpu_to_le16(opcode); + hdr->plen = plen; + + if (plen) + skb_put_data(skb, param, plen); + + bt_dev_dbg(hdev, "skb len %d", skb->len); + + hci_skb_pkt_type(skb) = HCI_COMMAND_PKT; + hci_skb_opcode(skb) = opcode; + + /* Grab a reference if command needs to be associated with a sock (e.g. + * likely mgmt socket that initiated the command). + */ + if (sk) { + hci_skb_sk(skb) = sk; + sock_hold(sk); + } + + return skb; +} + +static void hci_cmd_sync_add(struct hci_request *req, u16 opcode, u32 plen, + const void *param, u8 event, struct sock *sk) +{ + struct hci_dev *hdev = req->hdev; + struct sk_buff *skb; + + bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen); + + /* If an error occurred during request building, there is no point in + * queueing the HCI command. We can simply return. + */ + if (req->err) + return; + + skb = hci_cmd_sync_alloc(hdev, opcode, plen, param, sk); + if (!skb) { + bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)", + opcode); + req->err = -ENOMEM; + return; + } + + if (skb_queue_empty(&req->cmd_q)) + bt_cb(skb)->hci.req_flags |= HCI_REQ_START; + + hci_skb_event(skb) = event; + + skb_queue_tail(&req->cmd_q, skb); +} + +static int hci_cmd_sync_run(struct hci_request *req) +{ + struct hci_dev *hdev = req->hdev; + struct sk_buff *skb; + unsigned long flags; + + bt_dev_dbg(hdev, "length %u", skb_queue_len(&req->cmd_q)); + + /* If an error occurred during request building, remove all HCI + * commands queued on the HCI request queue. + */ + if (req->err) { + skb_queue_purge(&req->cmd_q); + return req->err; + } + + /* Do not allow empty requests */ + if (skb_queue_empty(&req->cmd_q)) + return -ENODATA; + + skb = skb_peek_tail(&req->cmd_q); + bt_cb(skb)->hci.req_complete_skb = hci_cmd_sync_complete; + bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB; + + spin_lock_irqsave(&hdev->cmd_q.lock, flags); + skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q); + spin_unlock_irqrestore(&hdev->cmd_q.lock, flags); + + queue_work(hdev->workqueue, &hdev->cmd_work); + + return 0; +} + +/* This function requires the caller holds hdev->req_lock. */ +struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen, + const void *param, u8 event, u32 timeout, + struct sock *sk) +{ + struct hci_request req; + struct sk_buff *skb; + int err = 0; + + bt_dev_dbg(hdev, "Opcode 0x%4.4x", opcode); + + hci_req_init(&req, hdev); + + hci_cmd_sync_add(&req, opcode, plen, param, event, sk); + + hdev->req_status = HCI_REQ_PEND; + + err = hci_cmd_sync_run(&req); + if (err < 0) + return ERR_PTR(err); + + err = wait_event_interruptible_timeout(hdev->req_wait_q, + hdev->req_status != HCI_REQ_PEND, + timeout); + + if (err == -ERESTARTSYS) + return ERR_PTR(-EINTR); + + switch (hdev->req_status) { + case HCI_REQ_DONE: + err = -bt_to_errno(hdev->req_result); + break; + + case HCI_REQ_CANCELED: + err = -hdev->req_result; + break; + + default: + err = -ETIMEDOUT; + break; + } + + hdev->req_status = 0; + hdev->req_result = 0; + skb = hdev->req_skb; + hdev->req_skb = NULL; + + bt_dev_dbg(hdev, "end: err %d", err); + + if (err < 0) { + kfree_skb(skb); + return ERR_PTR(err); + } + + return skb; +} +EXPORT_SYMBOL(__hci_cmd_sync_sk); + +/* This function requires the caller holds hdev->req_lock. */ +struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen, + const void *param, u32 timeout) +{ + return __hci_cmd_sync_sk(hdev, opcode, plen, param, 0, timeout, NULL); +} +EXPORT_SYMBOL(__hci_cmd_sync); + +/* Send HCI command and wait for command complete event */ +struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen, + const void *param, u32 timeout) +{ + struct sk_buff *skb; + + if (!test_bit(HCI_UP, &hdev->flags)) + return ERR_PTR(-ENETDOWN); + + bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen); + + hci_req_sync_lock(hdev); + skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout); + hci_req_sync_unlock(hdev); + + return skb; +} +EXPORT_SYMBOL(hci_cmd_sync); + +/* This function requires the caller holds hdev->req_lock. */ +struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen, + const void *param, u8 event, u32 timeout) +{ + return __hci_cmd_sync_sk(hdev, opcode, plen, param, event, timeout, + NULL); +} +EXPORT_SYMBOL(__hci_cmd_sync_ev); + +/* This function requires the caller holds hdev->req_lock. */ +int __hci_cmd_sync_status_sk(struct hci_dev *hdev, u16 opcode, u32 plen, + const void *param, u8 event, u32 timeout, + struct sock *sk) +{ + struct sk_buff *skb; + u8 status; + + skb = __hci_cmd_sync_sk(hdev, opcode, plen, param, event, timeout, sk); + if (IS_ERR(skb)) { + if (!event) + bt_dev_err(hdev, "Opcode 0x%4.4x failed: %ld", opcode, + PTR_ERR(skb)); + return PTR_ERR(skb); + } + + /* If command return a status event skb will be set to NULL as there are + * no parameters, in case of failure IS_ERR(skb) would have be set to + * the actual error would be found with PTR_ERR(skb). + */ + if (!skb) + return 0; + + status = skb->data[0]; + + kfree_skb(skb); + + return status; +} +EXPORT_SYMBOL(__hci_cmd_sync_status_sk); + +int __hci_cmd_sync_status(struct hci_dev *hdev, u16 opcode, u32 plen, + const void *param, u32 timeout) +{ + return __hci_cmd_sync_status_sk(hdev, opcode, plen, param, 0, timeout, + NULL); +} +EXPORT_SYMBOL(__hci_cmd_sync_status); + +static void hci_cmd_sync_work(struct work_struct *work) +{ + struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_sync_work); + + bt_dev_dbg(hdev, ""); + + /* Dequeue all entries and run them */ + while (1) { + struct hci_cmd_sync_work_entry *entry; + + mutex_lock(&hdev->cmd_sync_work_lock); + entry = list_first_entry_or_null(&hdev->cmd_sync_work_list, + struct hci_cmd_sync_work_entry, + list); + if (entry) + list_del(&entry->list); + mutex_unlock(&hdev->cmd_sync_work_lock); + + if (!entry) + break; + + bt_dev_dbg(hdev, "entry %p", entry); + + if (entry->func) { + int err; + + hci_req_sync_lock(hdev); + err = entry->func(hdev, entry->data); + if (entry->destroy) + entry->destroy(hdev, entry->data, err); + hci_req_sync_unlock(hdev); + } + + kfree(entry); + } +} + +static void hci_cmd_sync_cancel_work(struct work_struct *work) +{ + struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_sync_cancel_work); + + cancel_delayed_work_sync(&hdev->cmd_timer); + cancel_delayed_work_sync(&hdev->ncmd_timer); + atomic_set(&hdev->cmd_cnt, 1); + + wake_up_interruptible(&hdev->req_wait_q); +} + +static int hci_scan_disable_sync(struct hci_dev *hdev); +static int scan_disable_sync(struct hci_dev *hdev, void *data) +{ + return hci_scan_disable_sync(hdev); +} + +static int hci_inquiry_sync(struct hci_dev *hdev, u8 length); +static int interleaved_inquiry_sync(struct hci_dev *hdev, void *data) +{ + return hci_inquiry_sync(hdev, DISCOV_INTERLEAVED_INQUIRY_LEN); +} + +static void le_scan_disable(struct work_struct *work) +{ + struct hci_dev *hdev = container_of(work, struct hci_dev, + le_scan_disable.work); + int status; + + bt_dev_dbg(hdev, ""); + hci_dev_lock(hdev); + + if (!hci_dev_test_flag(hdev, HCI_LE_SCAN)) + goto _return; + + cancel_delayed_work(&hdev->le_scan_restart); + + status = hci_cmd_sync_queue(hdev, scan_disable_sync, NULL, NULL); + if (status) { + bt_dev_err(hdev, "failed to disable LE scan: %d", status); + goto _return; + } + + hdev->discovery.scan_start = 0; + + /* If we were running LE only scan, change discovery state. If + * we were running both LE and BR/EDR inquiry simultaneously, + * and BR/EDR inquiry is already finished, stop discovery, + * otherwise BR/EDR inquiry will stop discovery when finished. + * If we will resolve remote device name, do not change + * discovery state. + */ + + if (hdev->discovery.type == DISCOV_TYPE_LE) + goto discov_stopped; + + if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED) + goto _return; + + if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) { + if (!test_bit(HCI_INQUIRY, &hdev->flags) && + hdev->discovery.state != DISCOVERY_RESOLVING) + goto discov_stopped; + + goto _return; + } + + status = hci_cmd_sync_queue(hdev, interleaved_inquiry_sync, NULL, NULL); + if (status) { + bt_dev_err(hdev, "inquiry failed: status %d", status); + goto discov_stopped; + } + + goto _return; + +discov_stopped: + hci_discovery_set_state(hdev, DISCOVERY_STOPPED); + +_return: + hci_dev_unlock(hdev); +} + +static int hci_le_set_scan_enable_sync(struct hci_dev *hdev, u8 val, + u8 filter_dup); +static int hci_le_scan_restart_sync(struct hci_dev *hdev) +{ + /* If controller is not scanning we are done. */ + if (!hci_dev_test_flag(hdev, HCI_LE_SCAN)) + return 0; + + if (hdev->scanning_paused) { + bt_dev_dbg(hdev, "Scanning is paused for suspend"); + return 0; + } + + hci_le_set_scan_enable_sync(hdev, LE_SCAN_DISABLE, 0x00); + return hci_le_set_scan_enable_sync(hdev, LE_SCAN_ENABLE, + LE_SCAN_FILTER_DUP_ENABLE); +} + +static void le_scan_restart(struct work_struct *work) +{ + struct hci_dev *hdev = container_of(work, struct hci_dev, + le_scan_restart.work); + unsigned long timeout, duration, scan_start, now; + int status; + + bt_dev_dbg(hdev, ""); + + status = hci_le_scan_restart_sync(hdev); + if (status) { + bt_dev_err(hdev, "failed to restart LE scan: status %d", + status); + return; + } + + hci_dev_lock(hdev); + + if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) || + !hdev->discovery.scan_start) + goto unlock; + + /* When the scan was started, hdev->le_scan_disable has been queued + * after duration from scan_start. During scan restart this job + * has been canceled, and we need to queue it again after proper + * timeout, to make sure that scan does not run indefinitely. + */ + duration = hdev->discovery.scan_duration; + scan_start = hdev->discovery.scan_start; + now = jiffies; + if (now - scan_start <= duration) { + int elapsed; + + if (now >= scan_start) + elapsed = now - scan_start; + else + elapsed = ULONG_MAX - scan_start + now; + + timeout = duration - elapsed; + } else { + timeout = 0; + } + + queue_delayed_work(hdev->req_workqueue, + &hdev->le_scan_disable, timeout); + +unlock: + hci_dev_unlock(hdev); +} + +static int reenable_adv_sync(struct hci_dev *hdev, void *data) +{ + bt_dev_dbg(hdev, ""); + + if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) && + list_empty(&hdev->adv_instances)) + return 0; + + if (hdev->cur_adv_instance) { + return hci_schedule_adv_instance_sync(hdev, + hdev->cur_adv_instance, + true); + } else { + if (ext_adv_capable(hdev)) { + hci_start_ext_adv_sync(hdev, 0x00); + } else { + hci_update_adv_data_sync(hdev, 0x00); + hci_update_scan_rsp_data_sync(hdev, 0x00); + hci_enable_advertising_sync(hdev); + } + } + + return 0; +} + +static void reenable_adv(struct work_struct *work) +{ + struct hci_dev *hdev = container_of(work, struct hci_dev, + reenable_adv_work); + int status; + + bt_dev_dbg(hdev, ""); + + hci_dev_lock(hdev); + + status = hci_cmd_sync_queue(hdev, reenable_adv_sync, NULL, NULL); + if (status) + bt_dev_err(hdev, "failed to reenable ADV: %d", status); + + hci_dev_unlock(hdev); +} + +static void cancel_adv_timeout(struct hci_dev *hdev) +{ + if (hdev->adv_instance_timeout) { + hdev->adv_instance_timeout = 0; + cancel_delayed_work(&hdev->adv_instance_expire); + } +} + +/* For a single instance: + * - force == true: The instance will be removed even when its remaining + * lifetime is not zero. + * - force == false: the instance will be deactivated but kept stored unless + * the remaining lifetime is zero. + * + * For instance == 0x00: + * - force == true: All instances will be removed regardless of their timeout + * setting. + * - force == false: Only instances that have a timeout will be removed. + */ +int hci_clear_adv_instance_sync(struct hci_dev *hdev, struct sock *sk, + u8 instance, bool force) +{ + struct adv_info *adv_instance, *n, *next_instance = NULL; + int err; + u8 rem_inst; + + /* Cancel any timeout concerning the removed instance(s). */ + if (!instance || hdev->cur_adv_instance == instance) + cancel_adv_timeout(hdev); + + /* Get the next instance to advertise BEFORE we remove + * the current one. This can be the same instance again + * if there is only one instance. + */ + if (instance && hdev->cur_adv_instance == instance) + next_instance = hci_get_next_instance(hdev, instance); + + if (instance == 0x00) { + list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, + list) { + if (!(force || adv_instance->timeout)) + continue; + + rem_inst = adv_instance->instance; + err = hci_remove_adv_instance(hdev, rem_inst); + if (!err) + mgmt_advertising_removed(sk, hdev, rem_inst); + } + } else { + adv_instance = hci_find_adv_instance(hdev, instance); + + if (force || (adv_instance && adv_instance->timeout && + !adv_instance->remaining_time)) { + /* Don't advertise a removed instance. */ + if (next_instance && + next_instance->instance == instance) + next_instance = NULL; + + err = hci_remove_adv_instance(hdev, instance); + if (!err) + mgmt_advertising_removed(sk, hdev, instance); + } + } + + if (!hdev_is_powered(hdev) || hci_dev_test_flag(hdev, HCI_ADVERTISING)) + return 0; + + if (next_instance && !ext_adv_capable(hdev)) + return hci_schedule_adv_instance_sync(hdev, + next_instance->instance, + false); + + return 0; +} + +static int adv_timeout_expire_sync(struct hci_dev *hdev, void *data) +{ + u8 instance = *(u8 *)data; + + kfree(data); + + hci_clear_adv_instance_sync(hdev, NULL, instance, false); + + if (list_empty(&hdev->adv_instances)) + return hci_disable_advertising_sync(hdev); + + return 0; +} + +static void adv_timeout_expire(struct work_struct *work) +{ + u8 *inst_ptr; + struct hci_dev *hdev = container_of(work, struct hci_dev, + adv_instance_expire.work); + + bt_dev_dbg(hdev, ""); + + hci_dev_lock(hdev); + + hdev->adv_instance_timeout = 0; + + if (hdev->cur_adv_instance == 0x00) + goto unlock; + + inst_ptr = kmalloc(1, GFP_KERNEL); + if (!inst_ptr) + goto unlock; + + *inst_ptr = hdev->cur_adv_instance; + hci_cmd_sync_queue(hdev, adv_timeout_expire_sync, inst_ptr, NULL); + +unlock: + hci_dev_unlock(hdev); +} + +void hci_cmd_sync_init(struct hci_dev *hdev) +{ + INIT_WORK(&hdev->cmd_sync_work, hci_cmd_sync_work); + INIT_LIST_HEAD(&hdev->cmd_sync_work_list); + mutex_init(&hdev->cmd_sync_work_lock); + mutex_init(&hdev->unregister_lock); + + INIT_WORK(&hdev->cmd_sync_cancel_work, hci_cmd_sync_cancel_work); + INIT_WORK(&hdev->reenable_adv_work, reenable_adv); + INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable); + INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart); + INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire); +} + +void hci_cmd_sync_clear(struct hci_dev *hdev) +{ + struct hci_cmd_sync_work_entry *entry, *tmp; + + cancel_work_sync(&hdev->cmd_sync_work); + cancel_work_sync(&hdev->reenable_adv_work); + + mutex_lock(&hdev->cmd_sync_work_lock); + list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list) { + if (entry->destroy) + entry->destroy(hdev, entry->data, -ECANCELED); + + list_del(&entry->list); + kfree(entry); + } + mutex_unlock(&hdev->cmd_sync_work_lock); +} + +void __hci_cmd_sync_cancel(struct hci_dev *hdev, int err) +{ + bt_dev_dbg(hdev, "err 0x%2.2x", err); + + if (hdev->req_status == HCI_REQ_PEND) { + hdev->req_result = err; + hdev->req_status = HCI_REQ_CANCELED; + + cancel_delayed_work_sync(&hdev->cmd_timer); + cancel_delayed_work_sync(&hdev->ncmd_timer); + atomic_set(&hdev->cmd_cnt, 1); + + wake_up_interruptible(&hdev->req_wait_q); + } +} + +void hci_cmd_sync_cancel(struct hci_dev *hdev, int err) +{ + bt_dev_dbg(hdev, "err 0x%2.2x", err); + + if (hdev->req_status == HCI_REQ_PEND) { + hdev->req_result = err; + hdev->req_status = HCI_REQ_CANCELED; + + queue_work(hdev->workqueue, &hdev->cmd_sync_cancel_work); + } +} +EXPORT_SYMBOL(hci_cmd_sync_cancel); + +int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, + void *data, hci_cmd_sync_work_destroy_t destroy) +{ + struct hci_cmd_sync_work_entry *entry; + int err = 0; + + mutex_lock(&hdev->unregister_lock); + if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) { + err = -ENODEV; + goto unlock; + } + + entry = kmalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) { + err = -ENOMEM; + goto unlock; + } + entry->func = func; + entry->data = data; + entry->destroy = destroy; + + mutex_lock(&hdev->cmd_sync_work_lock); + list_add_tail(&entry->list, &hdev->cmd_sync_work_list); + mutex_unlock(&hdev->cmd_sync_work_lock); + + queue_work(hdev->req_workqueue, &hdev->cmd_sync_work); + +unlock: + mutex_unlock(&hdev->unregister_lock); + return err; +} +EXPORT_SYMBOL(hci_cmd_sync_queue); + +int hci_update_eir_sync(struct hci_dev *hdev) +{ + struct hci_cp_write_eir cp; + + bt_dev_dbg(hdev, ""); + + if (!hdev_is_powered(hdev)) + return 0; + + if (!lmp_ext_inq_capable(hdev)) + return 0; + + if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) + return 0; + + if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE)) + return 0; + + memset(&cp, 0, sizeof(cp)); + + eir_create(hdev, cp.data); + + if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0) + return 0; + + memcpy(hdev->eir, cp.data, sizeof(cp.data)); + + return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp, + HCI_CMD_TIMEOUT); +} + +static u8 get_service_classes(struct hci_dev *hdev) +{ + struct bt_uuid *uuid; + u8 val = 0; + + list_for_each_entry(uuid, &hdev->uuids, list) + val |= uuid->svc_hint; + + return val; +} + +int hci_update_class_sync(struct hci_dev *hdev) +{ + u8 cod[3]; + + bt_dev_dbg(hdev, ""); + + if (!hdev_is_powered(hdev)) + return 0; + + if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) + return 0; + + if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE)) + return 0; + + cod[0] = hdev->minor_class; + cod[1] = hdev->major_class; + cod[2] = get_service_classes(hdev); + + if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) + cod[1] |= 0x20; + + if (memcmp(cod, hdev->dev_class, 3) == 0) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_CLASS_OF_DEV, + sizeof(cod), cod, HCI_CMD_TIMEOUT); +} + +static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable) +{ + /* If there is no connection we are OK to advertise. */ + if (hci_conn_num(hdev, LE_LINK) == 0) + return true; + + /* Check le_states if there is any connection in peripheral role. */ + if (hdev->conn_hash.le_num_peripheral > 0) { + /* Peripheral connection state and non connectable mode + * bit 20. + */ + if (!connectable && !(hdev->le_states[2] & 0x10)) + return false; + + /* Peripheral connection state and connectable mode bit 38 + * and scannable bit 21. + */ + if (connectable && (!(hdev->le_states[4] & 0x40) || + !(hdev->le_states[2] & 0x20))) + return false; + } + + /* Check le_states if there is any connection in central role. */ + if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_peripheral) { + /* Central connection state and non connectable mode bit 18. */ + if (!connectable && !(hdev->le_states[2] & 0x02)) + return false; + + /* Central connection state and connectable mode bit 35 and + * scannable 19. + */ + if (connectable && (!(hdev->le_states[4] & 0x08) || + !(hdev->le_states[2] & 0x08))) + return false; + } + + return true; +} + +static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags) +{ + /* If privacy is not enabled don't use RPA */ + if (!hci_dev_test_flag(hdev, HCI_PRIVACY)) + return false; + + /* If basic privacy mode is enabled use RPA */ + if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) + return true; + + /* If limited privacy mode is enabled don't use RPA if we're + * both discoverable and bondable. + */ + if ((flags & MGMT_ADV_FLAG_DISCOV) && + hci_dev_test_flag(hdev, HCI_BONDABLE)) + return false; + + /* We're neither bondable nor discoverable in the limited + * privacy mode, therefore use RPA. + */ + return true; +} + +static int hci_set_random_addr_sync(struct hci_dev *hdev, bdaddr_t *rpa) +{ + /* If we're advertising or initiating an LE connection we can't + * go ahead and change the random address at this time. This is + * because the eventual initiator address used for the + * subsequently created connection will be undefined (some + * controllers use the new address and others the one we had + * when the operation started). + * + * In this kind of scenario skip the update and let the random + * address be updated at the next cycle. + */ + if (hci_dev_test_flag(hdev, HCI_LE_ADV) || + hci_lookup_le_connect(hdev)) { + bt_dev_dbg(hdev, "Deferring random address update"); + hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); + return 0; + } + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_RANDOM_ADDR, + 6, rpa, HCI_CMD_TIMEOUT); +} + +int hci_update_random_address_sync(struct hci_dev *hdev, bool require_privacy, + bool rpa, u8 *own_addr_type) +{ + int err; + + /* If privacy is enabled use a resolvable private address. If + * current RPA has expired or there is something else than + * the current RPA in use, then generate a new one. + */ + if (rpa) { + /* If Controller supports LL Privacy use own address type is + * 0x03 + */ + if (use_ll_privacy(hdev)) + *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED; + else + *own_addr_type = ADDR_LE_DEV_RANDOM; + + /* Check if RPA is valid */ + if (rpa_valid(hdev)) + return 0; + + err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa); + if (err < 0) { + bt_dev_err(hdev, "failed to generate new RPA"); + return err; + } + + err = hci_set_random_addr_sync(hdev, &hdev->rpa); + if (err) + return err; + + return 0; + } + + /* In case of required privacy without resolvable private address, + * use an non-resolvable private address. This is useful for active + * scanning and non-connectable advertising. + */ + if (require_privacy) { + bdaddr_t nrpa; + + while (true) { + /* The non-resolvable private address is generated + * from random six bytes with the two most significant + * bits cleared. + */ + get_random_bytes(&nrpa, 6); + nrpa.b[5] &= 0x3f; + + /* The non-resolvable private address shall not be + * equal to the public address. + */ + if (bacmp(&hdev->bdaddr, &nrpa)) + break; + } + + *own_addr_type = ADDR_LE_DEV_RANDOM; + + return hci_set_random_addr_sync(hdev, &nrpa); + } + + /* If forcing static address is in use or there is no public + * address use the static address as random address (but skip + * the HCI command if the current random address is already the + * static one. + * + * In case BR/EDR has been disabled on a dual-mode controller + * and a static address has been configured, then use that + * address instead of the public BR/EDR address. + */ + if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) || + !bacmp(&hdev->bdaddr, BDADDR_ANY) || + (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) && + bacmp(&hdev->static_addr, BDADDR_ANY))) { + *own_addr_type = ADDR_LE_DEV_RANDOM; + if (bacmp(&hdev->static_addr, &hdev->random_addr)) + return hci_set_random_addr_sync(hdev, + &hdev->static_addr); + return 0; + } + + /* Neither privacy nor static address is being used so use a + * public address. + */ + *own_addr_type = ADDR_LE_DEV_PUBLIC; + + return 0; +} + +static int hci_disable_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance) +{ + struct hci_cp_le_set_ext_adv_enable *cp; + struct hci_cp_ext_adv_set *set; + u8 data[sizeof(*cp) + sizeof(*set) * 1]; + u8 size; + + /* If request specifies an instance that doesn't exist, fail */ + if (instance > 0) { + struct adv_info *adv; + + adv = hci_find_adv_instance(hdev, instance); + if (!adv) + return -EINVAL; + + /* If not enabled there is nothing to do */ + if (!adv->enabled) + return 0; + } + + memset(data, 0, sizeof(data)); + + cp = (void *)data; + set = (void *)cp->data; + + /* Instance 0x00 indicates all advertising instances will be disabled */ + cp->num_of_sets = !!instance; + cp->enable = 0x00; + + set->handle = instance; + + size = sizeof(*cp) + sizeof(*set) * cp->num_of_sets; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE, + size, data, HCI_CMD_TIMEOUT); +} + +static int hci_set_adv_set_random_addr_sync(struct hci_dev *hdev, u8 instance, + bdaddr_t *random_addr) +{ + struct hci_cp_le_set_adv_set_rand_addr cp; + int err; + + if (!instance) { + /* Instance 0x00 doesn't have an adv_info, instead it uses + * hdev->random_addr to track its address so whenever it needs + * to be updated this also set the random address since + * hdev->random_addr is shared with scan state machine. + */ + err = hci_set_random_addr_sync(hdev, random_addr); + if (err) + return err; + } + + memset(&cp, 0, sizeof(cp)); + + cp.handle = instance; + bacpy(&cp.bdaddr, random_addr); + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_SET_RAND_ADDR, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +int hci_setup_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance) +{ + struct hci_cp_le_set_ext_adv_params cp; + bool connectable; + u32 flags; + bdaddr_t random_addr; + u8 own_addr_type; + int err; + struct adv_info *adv; + bool secondary_adv; + + if (instance > 0) { + adv = hci_find_adv_instance(hdev, instance); + if (!adv) + return -EINVAL; + } else { + adv = NULL; + } + + /* Updating parameters of an active instance will return a + * Command Disallowed error, so we must first disable the + * instance if it is active. + */ + if (adv && !adv->pending) { + err = hci_disable_ext_adv_instance_sync(hdev, instance); + if (err) + return err; + } + + flags = hci_adv_instance_flags(hdev, instance); + + /* If the "connectable" instance flag was not set, then choose between + * ADV_IND and ADV_NONCONN_IND based on the global connectable setting. + */ + connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) || + mgmt_get_connectable(hdev); + + if (!is_advertising_allowed(hdev, connectable)) + return -EPERM; + + /* Set require_privacy to true only when non-connectable + * advertising is used. In that case it is fine to use a + * non-resolvable private address. + */ + err = hci_get_random_address(hdev, !connectable, + adv_use_rpa(hdev, flags), adv, + &own_addr_type, &random_addr); + if (err < 0) + return err; + + memset(&cp, 0, sizeof(cp)); + + if (adv) { + hci_cpu_to_le24(adv->min_interval, cp.min_interval); + hci_cpu_to_le24(adv->max_interval, cp.max_interval); + cp.tx_power = adv->tx_power; + } else { + hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval); + hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval); + cp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE; + } + + secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK); + + if (connectable) { + if (secondary_adv) + cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND); + else + cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND); + } else if (hci_adv_instance_is_scannable(hdev, instance) || + (flags & MGMT_ADV_PARAM_SCAN_RSP)) { + if (secondary_adv) + cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND); + else + cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND); + } else { + if (secondary_adv) + cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND); + else + cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND); + } + + /* If Own_Address_Type equals 0x02 or 0x03, the Peer_Address parameter + * contains the peer’s Identity Address and the Peer_Address_Type + * parameter contains the peer’s Identity Type (i.e., 0x00 or 0x01). + * These parameters are used to locate the corresponding local IRK in + * the resolving list; this IRK is used to generate their own address + * used in the advertisement. + */ + if (own_addr_type == ADDR_LE_DEV_RANDOM_RESOLVED) + hci_copy_identity_address(hdev, &cp.peer_addr, + &cp.peer_addr_type); + + cp.own_addr_type = own_addr_type; + cp.channel_map = hdev->le_adv_channel_map; + cp.handle = instance; + + if (flags & MGMT_ADV_FLAG_SEC_2M) { + cp.primary_phy = HCI_ADV_PHY_1M; + cp.secondary_phy = HCI_ADV_PHY_2M; + } else if (flags & MGMT_ADV_FLAG_SEC_CODED) { + cp.primary_phy = HCI_ADV_PHY_CODED; + cp.secondary_phy = HCI_ADV_PHY_CODED; + } else { + /* In all other cases use 1M */ + cp.primary_phy = HCI_ADV_PHY_1M; + cp.secondary_phy = HCI_ADV_PHY_1M; + } + + err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); + if (err) + return err; + + if ((own_addr_type == ADDR_LE_DEV_RANDOM || + own_addr_type == ADDR_LE_DEV_RANDOM_RESOLVED) && + bacmp(&random_addr, BDADDR_ANY)) { + /* Check if random address need to be updated */ + if (adv) { + if (!bacmp(&random_addr, &adv->random_addr)) + return 0; + } else { + if (!bacmp(&random_addr, &hdev->random_addr)) + return 0; + } + + return hci_set_adv_set_random_addr_sync(hdev, instance, + &random_addr); + } + + return 0; +} + +static int hci_set_ext_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance) +{ + struct { + struct hci_cp_le_set_ext_scan_rsp_data cp; + u8 data[HCI_MAX_EXT_AD_LENGTH]; + } pdu; + u8 len; + struct adv_info *adv = NULL; + int err; + + memset(&pdu, 0, sizeof(pdu)); + + if (instance) { + adv = hci_find_adv_instance(hdev, instance); + if (!adv || !adv->scan_rsp_changed) + return 0; + } + + len = eir_create_scan_rsp(hdev, instance, pdu.data); + + pdu.cp.handle = instance; + pdu.cp.length = len; + pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE; + pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG; + + err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, + sizeof(pdu.cp) + len, &pdu.cp, + HCI_CMD_TIMEOUT); + if (err) + return err; + + if (adv) { + adv->scan_rsp_changed = false; + } else { + memcpy(hdev->scan_rsp_data, pdu.data, len); + hdev->scan_rsp_data_len = len; + } + + return 0; +} + +static int __hci_set_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance) +{ + struct hci_cp_le_set_scan_rsp_data cp; + u8 len; + + memset(&cp, 0, sizeof(cp)); + + len = eir_create_scan_rsp(hdev, instance, cp.data); + + if (hdev->scan_rsp_data_len == len && + !memcmp(cp.data, hdev->scan_rsp_data, len)) + return 0; + + memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data)); + hdev->scan_rsp_data_len = len; + + cp.length = len; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_RSP_DATA, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +int hci_update_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance) +{ + if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) + return 0; + + if (ext_adv_capable(hdev)) + return hci_set_ext_scan_rsp_data_sync(hdev, instance); + + return __hci_set_scan_rsp_data_sync(hdev, instance); +} + +int hci_enable_ext_advertising_sync(struct hci_dev *hdev, u8 instance) +{ + struct hci_cp_le_set_ext_adv_enable *cp; + struct hci_cp_ext_adv_set *set; + u8 data[sizeof(*cp) + sizeof(*set) * 1]; + struct adv_info *adv; + + if (instance > 0) { + adv = hci_find_adv_instance(hdev, instance); + if (!adv) + return -EINVAL; + /* If already enabled there is nothing to do */ + if (adv->enabled) + return 0; + } else { + adv = NULL; + } + + cp = (void *)data; + set = (void *)cp->data; + + memset(cp, 0, sizeof(*cp)); + + cp->enable = 0x01; + cp->num_of_sets = 0x01; + + memset(set, 0, sizeof(*set)); + + set->handle = instance; + + /* Set duration per instance since controller is responsible for + * scheduling it. + */ + if (adv && adv->timeout) { + u16 duration = adv->timeout * MSEC_PER_SEC; + + /* Time = N * 10 ms */ + set->duration = cpu_to_le16(duration / 10); + } + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE, + sizeof(*cp) + + sizeof(*set) * cp->num_of_sets, + data, HCI_CMD_TIMEOUT); +} + +int hci_start_ext_adv_sync(struct hci_dev *hdev, u8 instance) +{ + int err; + + err = hci_setup_ext_adv_instance_sync(hdev, instance); + if (err) + return err; + + err = hci_set_ext_scan_rsp_data_sync(hdev, instance); + if (err) + return err; + + return hci_enable_ext_advertising_sync(hdev, instance); +} + +static int hci_disable_per_advertising_sync(struct hci_dev *hdev, u8 instance) +{ + struct hci_cp_le_set_per_adv_enable cp; + + /* If periodic advertising already disabled there is nothing to do. */ + if (!hci_dev_test_flag(hdev, HCI_LE_PER_ADV)) + return 0; + + memset(&cp, 0, sizeof(cp)); + + cp.enable = 0x00; + cp.handle = instance; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +static int hci_set_per_adv_params_sync(struct hci_dev *hdev, u8 instance, + u16 min_interval, u16 max_interval) +{ + struct hci_cp_le_set_per_adv_params cp; + + memset(&cp, 0, sizeof(cp)); + + if (!min_interval) + min_interval = DISCOV_LE_PER_ADV_INT_MIN; + + if (!max_interval) + max_interval = DISCOV_LE_PER_ADV_INT_MAX; + + cp.handle = instance; + cp.min_interval = cpu_to_le16(min_interval); + cp.max_interval = cpu_to_le16(max_interval); + cp.periodic_properties = 0x0000; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_PARAMS, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +static int hci_set_per_adv_data_sync(struct hci_dev *hdev, u8 instance) +{ + struct { + struct hci_cp_le_set_per_adv_data cp; + u8 data[HCI_MAX_PER_AD_LENGTH]; + } pdu; + u8 len; + + memset(&pdu, 0, sizeof(pdu)); + + if (instance) { + struct adv_info *adv = hci_find_adv_instance(hdev, instance); + + if (!adv || !adv->periodic) + return 0; + } + + len = eir_create_per_adv_data(hdev, instance, pdu.data); + + pdu.cp.length = len; + pdu.cp.handle = instance; + pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_DATA, + sizeof(pdu.cp) + len, &pdu, + HCI_CMD_TIMEOUT); +} + +static int hci_enable_per_advertising_sync(struct hci_dev *hdev, u8 instance) +{ + struct hci_cp_le_set_per_adv_enable cp; + + /* If periodic advertising already enabled there is nothing to do. */ + if (hci_dev_test_flag(hdev, HCI_LE_PER_ADV)) + return 0; + + memset(&cp, 0, sizeof(cp)); + + cp.enable = 0x01; + cp.handle = instance; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PER_ADV_ENABLE, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +/* Checks if periodic advertising data contains a Basic Announcement and if it + * does generates a Broadcast ID and add Broadcast Announcement. + */ +static int hci_adv_bcast_annoucement(struct hci_dev *hdev, struct adv_info *adv) +{ + u8 bid[3]; + u8 ad[4 + 3]; + + /* Skip if NULL adv as instance 0x00 is used for general purpose + * advertising so it cannot used for the likes of Broadcast Announcement + * as it can be overwritten at any point. + */ + if (!adv) + return 0; + + /* Check if PA data doesn't contains a Basic Audio Announcement then + * there is nothing to do. + */ + if (!eir_get_service_data(adv->per_adv_data, adv->per_adv_data_len, + 0x1851, NULL)) + return 0; + + /* Check if advertising data already has a Broadcast Announcement since + * the process may want to control the Broadcast ID directly and in that + * case the kernel shall no interfere. + */ + if (eir_get_service_data(adv->adv_data, adv->adv_data_len, 0x1852, + NULL)) + return 0; + + /* Generate Broadcast ID */ + get_random_bytes(bid, sizeof(bid)); + eir_append_service_data(ad, 0, 0x1852, bid, sizeof(bid)); + hci_set_adv_instance_data(hdev, adv->instance, sizeof(ad), ad, 0, NULL); + + return hci_update_adv_data_sync(hdev, adv->instance); +} + +int hci_start_per_adv_sync(struct hci_dev *hdev, u8 instance, u8 data_len, + u8 *data, u32 flags, u16 min_interval, + u16 max_interval, u16 sync_interval) +{ + struct adv_info *adv = NULL; + int err; + bool added = false; + + hci_disable_per_advertising_sync(hdev, instance); + + if (instance) { + adv = hci_find_adv_instance(hdev, instance); + /* Create an instance if that could not be found */ + if (!adv) { + adv = hci_add_per_instance(hdev, instance, flags, + data_len, data, + sync_interval, + sync_interval); + if (IS_ERR(adv)) + return PTR_ERR(adv); + added = true; + } + } + + /* Only start advertising if instance 0 or if a dedicated instance has + * been added. + */ + if (!adv || added) { + err = hci_start_ext_adv_sync(hdev, instance); + if (err < 0) + goto fail; + + err = hci_adv_bcast_annoucement(hdev, adv); + if (err < 0) + goto fail; + } + + err = hci_set_per_adv_params_sync(hdev, instance, min_interval, + max_interval); + if (err < 0) + goto fail; + + err = hci_set_per_adv_data_sync(hdev, instance); + if (err < 0) + goto fail; + + err = hci_enable_per_advertising_sync(hdev, instance); + if (err < 0) + goto fail; + + return 0; + +fail: + if (added) + hci_remove_adv_instance(hdev, instance); + + return err; +} + +static int hci_start_adv_sync(struct hci_dev *hdev, u8 instance) +{ + int err; + + if (ext_adv_capable(hdev)) + return hci_start_ext_adv_sync(hdev, instance); + + err = hci_update_adv_data_sync(hdev, instance); + if (err) + return err; + + err = hci_update_scan_rsp_data_sync(hdev, instance); + if (err) + return err; + + return hci_enable_advertising_sync(hdev); +} + +int hci_enable_advertising_sync(struct hci_dev *hdev) +{ + struct adv_info *adv_instance; + struct hci_cp_le_set_adv_param cp; + u8 own_addr_type, enable = 0x01; + bool connectable; + u16 adv_min_interval, adv_max_interval; + u32 flags; + u8 status; + + if (ext_adv_capable(hdev)) + return hci_enable_ext_advertising_sync(hdev, + hdev->cur_adv_instance); + + flags = hci_adv_instance_flags(hdev, hdev->cur_adv_instance); + adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance); + + /* If the "connectable" instance flag was not set, then choose between + * ADV_IND and ADV_NONCONN_IND based on the global connectable setting. + */ + connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) || + mgmt_get_connectable(hdev); + + if (!is_advertising_allowed(hdev, connectable)) + return -EINVAL; + + status = hci_disable_advertising_sync(hdev); + if (status) + return status; + + /* Clear the HCI_LE_ADV bit temporarily so that the + * hci_update_random_address knows that it's safe to go ahead + * and write a new random address. The flag will be set back on + * as soon as the SET_ADV_ENABLE HCI command completes. + */ + hci_dev_clear_flag(hdev, HCI_LE_ADV); + + /* Set require_privacy to true only when non-connectable + * advertising is used. In that case it is fine to use a + * non-resolvable private address. + */ + status = hci_update_random_address_sync(hdev, !connectable, + adv_use_rpa(hdev, flags), + &own_addr_type); + if (status) + return status; + + memset(&cp, 0, sizeof(cp)); + + if (adv_instance) { + adv_min_interval = adv_instance->min_interval; + adv_max_interval = adv_instance->max_interval; + } else { + adv_min_interval = hdev->le_adv_min_interval; + adv_max_interval = hdev->le_adv_max_interval; + } + + if (connectable) { + cp.type = LE_ADV_IND; + } else { + if (hci_adv_instance_is_scannable(hdev, hdev->cur_adv_instance)) + cp.type = LE_ADV_SCAN_IND; + else + cp.type = LE_ADV_NONCONN_IND; + + if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) || + hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) { + adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN; + adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX; + } + } + + cp.min_interval = cpu_to_le16(adv_min_interval); + cp.max_interval = cpu_to_le16(adv_max_interval); + cp.own_address_type = own_addr_type; + cp.channel_map = hdev->le_adv_channel_map; + + status = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_PARAM, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); + if (status) + return status; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE, + sizeof(enable), &enable, HCI_CMD_TIMEOUT); +} + +static int enable_advertising_sync(struct hci_dev *hdev, void *data) +{ + return hci_enable_advertising_sync(hdev); +} + +int hci_enable_advertising(struct hci_dev *hdev) +{ + if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) && + list_empty(&hdev->adv_instances)) + return 0; + + return hci_cmd_sync_queue(hdev, enable_advertising_sync, NULL, NULL); +} + +int hci_remove_ext_adv_instance_sync(struct hci_dev *hdev, u8 instance, + struct sock *sk) +{ + int err; + + if (!ext_adv_capable(hdev)) + return 0; + + err = hci_disable_ext_adv_instance_sync(hdev, instance); + if (err) + return err; + + /* If request specifies an instance that doesn't exist, fail */ + if (instance > 0 && !hci_find_adv_instance(hdev, instance)) + return -EINVAL; + + return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_REMOVE_ADV_SET, + sizeof(instance), &instance, 0, + HCI_CMD_TIMEOUT, sk); +} + +static int remove_ext_adv_sync(struct hci_dev *hdev, void *data) +{ + struct adv_info *adv = data; + u8 instance = 0; + + if (adv) + instance = adv->instance; + + return hci_remove_ext_adv_instance_sync(hdev, instance, NULL); +} + +int hci_remove_ext_adv_instance(struct hci_dev *hdev, u8 instance) +{ + struct adv_info *adv = NULL; + + if (instance) { + adv = hci_find_adv_instance(hdev, instance); + if (!adv) + return -EINVAL; + } + + return hci_cmd_sync_queue(hdev, remove_ext_adv_sync, adv, NULL); +} + +int hci_le_terminate_big_sync(struct hci_dev *hdev, u8 handle, u8 reason) +{ + struct hci_cp_le_term_big cp; + + memset(&cp, 0, sizeof(cp)); + cp.handle = handle; + cp.reason = reason; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_TERM_BIG, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +static int hci_set_ext_adv_data_sync(struct hci_dev *hdev, u8 instance) +{ + struct { + struct hci_cp_le_set_ext_adv_data cp; + u8 data[HCI_MAX_EXT_AD_LENGTH]; + } pdu; + u8 len; + struct adv_info *adv = NULL; + int err; + + memset(&pdu, 0, sizeof(pdu)); + + if (instance) { + adv = hci_find_adv_instance(hdev, instance); + if (!adv || !adv->adv_data_changed) + return 0; + } + + len = eir_create_adv_data(hdev, instance, pdu.data); + + pdu.cp.length = len; + pdu.cp.handle = instance; + pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE; + pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG; + + err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_DATA, + sizeof(pdu.cp) + len, &pdu.cp, + HCI_CMD_TIMEOUT); + if (err) + return err; + + /* Update data if the command succeed */ + if (adv) { + adv->adv_data_changed = false; + } else { + memcpy(hdev->adv_data, pdu.data, len); + hdev->adv_data_len = len; + } + + return 0; +} + +static int hci_set_adv_data_sync(struct hci_dev *hdev, u8 instance) +{ + struct hci_cp_le_set_adv_data cp; + u8 len; + + memset(&cp, 0, sizeof(cp)); + + len = eir_create_adv_data(hdev, instance, cp.data); + + /* There's nothing to do if the data hasn't changed */ + if (hdev->adv_data_len == len && + memcmp(cp.data, hdev->adv_data, len) == 0) + return 0; + + memcpy(hdev->adv_data, cp.data, sizeof(cp.data)); + hdev->adv_data_len = len; + + cp.length = len; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_DATA, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +int hci_update_adv_data_sync(struct hci_dev *hdev, u8 instance) +{ + if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) + return 0; + + if (ext_adv_capable(hdev)) + return hci_set_ext_adv_data_sync(hdev, instance); + + return hci_set_adv_data_sync(hdev, instance); +} + +int hci_schedule_adv_instance_sync(struct hci_dev *hdev, u8 instance, + bool force) +{ + struct adv_info *adv = NULL; + u16 timeout; + + if (hci_dev_test_flag(hdev, HCI_ADVERTISING) && !ext_adv_capable(hdev)) + return -EPERM; + + if (hdev->adv_instance_timeout) + return -EBUSY; + + adv = hci_find_adv_instance(hdev, instance); + if (!adv) + return -ENOENT; + + /* A zero timeout means unlimited advertising. As long as there is + * only one instance, duration should be ignored. We still set a timeout + * in case further instances are being added later on. + * + * If the remaining lifetime of the instance is more than the duration + * then the timeout corresponds to the duration, otherwise it will be + * reduced to the remaining instance lifetime. + */ + if (adv->timeout == 0 || adv->duration <= adv->remaining_time) + timeout = adv->duration; + else + timeout = adv->remaining_time; + + /* The remaining time is being reduced unless the instance is being + * advertised without time limit. + */ + if (adv->timeout) + adv->remaining_time = adv->remaining_time - timeout; + + /* Only use work for scheduling instances with legacy advertising */ + if (!ext_adv_capable(hdev)) { + hdev->adv_instance_timeout = timeout; + queue_delayed_work(hdev->req_workqueue, + &hdev->adv_instance_expire, + msecs_to_jiffies(timeout * 1000)); + } + + /* If we're just re-scheduling the same instance again then do not + * execute any HCI commands. This happens when a single instance is + * being advertised. + */ + if (!force && hdev->cur_adv_instance == instance && + hci_dev_test_flag(hdev, HCI_LE_ADV)) + return 0; + + hdev->cur_adv_instance = instance; + + return hci_start_adv_sync(hdev, instance); +} + +static int hci_clear_adv_sets_sync(struct hci_dev *hdev, struct sock *sk) +{ + int err; + + if (!ext_adv_capable(hdev)) + return 0; + + /* Disable instance 0x00 to disable all instances */ + err = hci_disable_ext_adv_instance_sync(hdev, 0x00); + if (err) + return err; + + return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_CLEAR_ADV_SETS, + 0, NULL, 0, HCI_CMD_TIMEOUT, sk); +} + +static int hci_clear_adv_sync(struct hci_dev *hdev, struct sock *sk, bool force) +{ + struct adv_info *adv, *n; + int err = 0; + + if (ext_adv_capable(hdev)) + /* Remove all existing sets */ + err = hci_clear_adv_sets_sync(hdev, sk); + if (ext_adv_capable(hdev)) + return err; + + /* This is safe as long as there is no command send while the lock is + * held. + */ + hci_dev_lock(hdev); + + /* Cleanup non-ext instances */ + list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) { + u8 instance = adv->instance; + int err; + + if (!(force || adv->timeout)) + continue; + + err = hci_remove_adv_instance(hdev, instance); + if (!err) + mgmt_advertising_removed(sk, hdev, instance); + } + + hci_dev_unlock(hdev); + + return 0; +} + +static int hci_remove_adv_sync(struct hci_dev *hdev, u8 instance, + struct sock *sk) +{ + int err = 0; + + /* If we use extended advertising, instance has to be removed first. */ + if (ext_adv_capable(hdev)) + err = hci_remove_ext_adv_instance_sync(hdev, instance, sk); + if (ext_adv_capable(hdev)) + return err; + + /* This is safe as long as there is no command send while the lock is + * held. + */ + hci_dev_lock(hdev); + + err = hci_remove_adv_instance(hdev, instance); + if (!err) + mgmt_advertising_removed(sk, hdev, instance); + + hci_dev_unlock(hdev); + + return err; +} + +/* For a single instance: + * - force == true: The instance will be removed even when its remaining + * lifetime is not zero. + * - force == false: the instance will be deactivated but kept stored unless + * the remaining lifetime is zero. + * + * For instance == 0x00: + * - force == true: All instances will be removed regardless of their timeout + * setting. + * - force == false: Only instances that have a timeout will be removed. + */ +int hci_remove_advertising_sync(struct hci_dev *hdev, struct sock *sk, + u8 instance, bool force) +{ + struct adv_info *next = NULL; + int err; + + /* Cancel any timeout concerning the removed instance(s). */ + if (!instance || hdev->cur_adv_instance == instance) + cancel_adv_timeout(hdev); + + /* Get the next instance to advertise BEFORE we remove + * the current one. This can be the same instance again + * if there is only one instance. + */ + if (hdev->cur_adv_instance == instance) + next = hci_get_next_instance(hdev, instance); + + if (!instance) { + err = hci_clear_adv_sync(hdev, sk, force); + if (err) + return err; + } else { + struct adv_info *adv = hci_find_adv_instance(hdev, instance); + + if (force || (adv && adv->timeout && !adv->remaining_time)) { + /* Don't advertise a removed instance. */ + if (next && next->instance == instance) + next = NULL; + + err = hci_remove_adv_sync(hdev, instance, sk); + if (err) + return err; + } + } + + if (!hdev_is_powered(hdev) || hci_dev_test_flag(hdev, HCI_ADVERTISING)) + return 0; + + if (next && !ext_adv_capable(hdev)) + hci_schedule_adv_instance_sync(hdev, next->instance, false); + + return 0; +} + +int hci_read_rssi_sync(struct hci_dev *hdev, __le16 handle) +{ + struct hci_cp_read_rssi cp; + + cp.handle = handle; + return __hci_cmd_sync_status(hdev, HCI_OP_READ_RSSI, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +int hci_read_clock_sync(struct hci_dev *hdev, struct hci_cp_read_clock *cp) +{ + return __hci_cmd_sync_status(hdev, HCI_OP_READ_CLOCK, + sizeof(*cp), cp, HCI_CMD_TIMEOUT); +} + +int hci_read_tx_power_sync(struct hci_dev *hdev, __le16 handle, u8 type) +{ + struct hci_cp_read_tx_power cp; + + cp.handle = handle; + cp.type = type; + return __hci_cmd_sync_status(hdev, HCI_OP_READ_TX_POWER, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +int hci_disable_advertising_sync(struct hci_dev *hdev) +{ + u8 enable = 0x00; + int err = 0; + + /* If controller is not advertising we are done. */ + if (!hci_dev_test_flag(hdev, HCI_LE_ADV)) + return 0; + + if (ext_adv_capable(hdev)) + err = hci_disable_ext_adv_instance_sync(hdev, 0x00); + if (ext_adv_capable(hdev)) + return err; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE, + sizeof(enable), &enable, HCI_CMD_TIMEOUT); +} + +static int hci_le_set_ext_scan_enable_sync(struct hci_dev *hdev, u8 val, + u8 filter_dup) +{ + struct hci_cp_le_set_ext_scan_enable cp; + + memset(&cp, 0, sizeof(cp)); + cp.enable = val; + + if (hci_dev_test_flag(hdev, HCI_MESH)) + cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE; + else + cp.filter_dup = filter_dup; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +static int hci_le_set_scan_enable_sync(struct hci_dev *hdev, u8 val, + u8 filter_dup) +{ + struct hci_cp_le_set_scan_enable cp; + + if (use_ext_scan(hdev)) + return hci_le_set_ext_scan_enable_sync(hdev, val, filter_dup); + + memset(&cp, 0, sizeof(cp)); + cp.enable = val; + + if (val && hci_dev_test_flag(hdev, HCI_MESH)) + cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE; + else + cp.filter_dup = filter_dup; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_ENABLE, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +static int hci_le_set_addr_resolution_enable_sync(struct hci_dev *hdev, u8 val) +{ + if (!use_ll_privacy(hdev)) + return 0; + + /* If controller is not/already resolving we are done. */ + if (val == hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, + sizeof(val), &val, HCI_CMD_TIMEOUT); +} + +static int hci_scan_disable_sync(struct hci_dev *hdev) +{ + int err; + + /* If controller is not scanning we are done. */ + if (!hci_dev_test_flag(hdev, HCI_LE_SCAN)) + return 0; + + if (hdev->scanning_paused) { + bt_dev_dbg(hdev, "Scanning is paused for suspend"); + return 0; + } + + err = hci_le_set_scan_enable_sync(hdev, LE_SCAN_DISABLE, 0x00); + if (err) { + bt_dev_err(hdev, "Unable to disable scanning: %d", err); + return err; + } + + return err; +} + +static bool scan_use_rpa(struct hci_dev *hdev) +{ + return hci_dev_test_flag(hdev, HCI_PRIVACY); +} + +static void hci_start_interleave_scan(struct hci_dev *hdev) +{ + hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER; + queue_delayed_work(hdev->req_workqueue, + &hdev->interleave_scan, 0); +} + +static bool is_interleave_scanning(struct hci_dev *hdev) +{ + return hdev->interleave_scan_state != INTERLEAVE_SCAN_NONE; +} + +static void cancel_interleave_scan(struct hci_dev *hdev) +{ + bt_dev_dbg(hdev, "cancelling interleave scan"); + + cancel_delayed_work_sync(&hdev->interleave_scan); + + hdev->interleave_scan_state = INTERLEAVE_SCAN_NONE; +} + +/* Return true if interleave_scan wasn't started until exiting this function, + * otherwise, return false + */ +static bool hci_update_interleaved_scan_sync(struct hci_dev *hdev) +{ + /* Do interleaved scan only if all of the following are true: + * - There is at least one ADV monitor + * - At least one pending LE connection or one device to be scanned for + * - Monitor offloading is not supported + * If so, we should alternate between allowlist scan and one without + * any filters to save power. + */ + bool use_interleaving = hci_is_adv_monitoring(hdev) && + !(list_empty(&hdev->pend_le_conns) && + list_empty(&hdev->pend_le_reports)) && + hci_get_adv_monitor_offload_ext(hdev) == + HCI_ADV_MONITOR_EXT_NONE; + bool is_interleaving = is_interleave_scanning(hdev); + + if (use_interleaving && !is_interleaving) { + hci_start_interleave_scan(hdev); + bt_dev_dbg(hdev, "starting interleave scan"); + return true; + } + + if (!use_interleaving && is_interleaving) + cancel_interleave_scan(hdev); + + return false; +} + +/* Removes connection to resolve list if needed.*/ +static int hci_le_del_resolve_list_sync(struct hci_dev *hdev, + bdaddr_t *bdaddr, u8 bdaddr_type) +{ + struct hci_cp_le_del_from_resolv_list cp; + struct bdaddr_list_with_irk *entry; + + if (!use_ll_privacy(hdev)) + return 0; + + /* Check if the IRK has been programmed */ + entry = hci_bdaddr_list_lookup_with_irk(&hdev->le_resolv_list, bdaddr, + bdaddr_type); + if (!entry) + return 0; + + cp.bdaddr_type = bdaddr_type; + bacpy(&cp.bdaddr, bdaddr); + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_DEL_FROM_RESOLV_LIST, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +static int hci_le_del_accept_list_sync(struct hci_dev *hdev, + bdaddr_t *bdaddr, u8 bdaddr_type) +{ + struct hci_cp_le_del_from_accept_list cp; + int err; + + /* Check if device is on accept list before removing it */ + if (!hci_bdaddr_list_lookup(&hdev->le_accept_list, bdaddr, bdaddr_type)) + return 0; + + cp.bdaddr_type = bdaddr_type; + bacpy(&cp.bdaddr, bdaddr); + + /* Ignore errors when removing from resolving list as that is likely + * that the device was never added. + */ + hci_le_del_resolve_list_sync(hdev, &cp.bdaddr, cp.bdaddr_type); + + err = __hci_cmd_sync_status(hdev, HCI_OP_LE_DEL_FROM_ACCEPT_LIST, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); + if (err) { + bt_dev_err(hdev, "Unable to remove from allow list: %d", err); + return err; + } + + bt_dev_dbg(hdev, "Remove %pMR (0x%x) from allow list", &cp.bdaddr, + cp.bdaddr_type); + + return 0; +} + +struct conn_params { + bdaddr_t addr; + u8 addr_type; + hci_conn_flags_t flags; + u8 privacy_mode; +}; + +/* Adds connection to resolve list if needed. + * Setting params to NULL programs local hdev->irk + */ +static int hci_le_add_resolve_list_sync(struct hci_dev *hdev, + struct conn_params *params) +{ + struct hci_cp_le_add_to_resolv_list cp; + struct smp_irk *irk; + struct bdaddr_list_with_irk *entry; + struct hci_conn_params *p; + + if (!use_ll_privacy(hdev)) + return 0; + + /* Attempt to program local identity address, type and irk if params is + * NULL. + */ + if (!params) { + if (!hci_dev_test_flag(hdev, HCI_PRIVACY)) + return 0; + + hci_copy_identity_address(hdev, &cp.bdaddr, &cp.bdaddr_type); + memcpy(cp.peer_irk, hdev->irk, 16); + goto done; + } + + irk = hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type); + if (!irk) + return 0; + + /* Check if the IK has _not_ been programmed yet. */ + entry = hci_bdaddr_list_lookup_with_irk(&hdev->le_resolv_list, + ¶ms->addr, + params->addr_type); + if (entry) + return 0; + + cp.bdaddr_type = params->addr_type; + bacpy(&cp.bdaddr, ¶ms->addr); + memcpy(cp.peer_irk, irk->val, 16); + + /* Default privacy mode is always Network */ + params->privacy_mode = HCI_NETWORK_PRIVACY; + + rcu_read_lock(); + p = hci_pend_le_action_lookup(&hdev->pend_le_conns, + ¶ms->addr, params->addr_type); + if (!p) + p = hci_pend_le_action_lookup(&hdev->pend_le_reports, + ¶ms->addr, params->addr_type); + if (p) + WRITE_ONCE(p->privacy_mode, HCI_NETWORK_PRIVACY); + rcu_read_unlock(); + +done: + if (hci_dev_test_flag(hdev, HCI_PRIVACY)) + memcpy(cp.local_irk, hdev->irk, 16); + else + memset(cp.local_irk, 0, 16); + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_ADD_TO_RESOLV_LIST, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +/* Set Device Privacy Mode. */ +static int hci_le_set_privacy_mode_sync(struct hci_dev *hdev, + struct conn_params *params) +{ + struct hci_cp_le_set_privacy_mode cp; + struct smp_irk *irk; + + /* If device privacy mode has already been set there is nothing to do */ + if (params->privacy_mode == HCI_DEVICE_PRIVACY) + return 0; + + /* Check if HCI_CONN_FLAG_DEVICE_PRIVACY has been set as it also + * indicates that LL Privacy has been enabled and + * HCI_OP_LE_SET_PRIVACY_MODE is supported. + */ + if (!(params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY)) + return 0; + + irk = hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type); + if (!irk) + return 0; + + memset(&cp, 0, sizeof(cp)); + cp.bdaddr_type = irk->addr_type; + bacpy(&cp.bdaddr, &irk->bdaddr); + cp.mode = HCI_DEVICE_PRIVACY; + + /* Note: params->privacy_mode is not updated since it is a copy */ + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_PRIVACY_MODE, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +/* Adds connection to allow list if needed, if the device uses RPA (has IRK) + * this attempts to program the device in the resolving list as well and + * properly set the privacy mode. + */ +static int hci_le_add_accept_list_sync(struct hci_dev *hdev, + struct conn_params *params, + u8 *num_entries) +{ + struct hci_cp_le_add_to_accept_list cp; + int err; + + /* During suspend, only wakeable devices can be in acceptlist */ + if (hdev->suspended && + !(params->flags & HCI_CONN_FLAG_REMOTE_WAKEUP)) + return 0; + + /* Select filter policy to accept all advertising */ + if (*num_entries >= hdev->le_accept_list_size) + return -ENOSPC; + + /* Accept list can not be used with RPAs */ + if (!use_ll_privacy(hdev) && + hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type)) + return -EINVAL; + + /* Attempt to program the device in the resolving list first to avoid + * having to rollback in case it fails since the resolving list is + * dynamic it can probably be smaller than the accept list. + */ + err = hci_le_add_resolve_list_sync(hdev, params); + if (err) { + bt_dev_err(hdev, "Unable to add to resolve list: %d", err); + return err; + } + + /* Set Privacy Mode */ + err = hci_le_set_privacy_mode_sync(hdev, params); + if (err) { + bt_dev_err(hdev, "Unable to set privacy mode: %d", err); + return err; + } + + /* Check if already in accept list */ + if (hci_bdaddr_list_lookup(&hdev->le_accept_list, ¶ms->addr, + params->addr_type)) + return 0; + + *num_entries += 1; + cp.bdaddr_type = params->addr_type; + bacpy(&cp.bdaddr, ¶ms->addr); + + err = __hci_cmd_sync_status(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); + if (err) { + bt_dev_err(hdev, "Unable to add to allow list: %d", err); + /* Rollback the device from the resolving list */ + hci_le_del_resolve_list_sync(hdev, &cp.bdaddr, cp.bdaddr_type); + return err; + } + + bt_dev_dbg(hdev, "Add %pMR (0x%x) to allow list", &cp.bdaddr, + cp.bdaddr_type); + + return 0; +} + +/* This function disables/pause all advertising instances */ +static int hci_pause_advertising_sync(struct hci_dev *hdev) +{ + int err; + int old_state; + + /* If already been paused there is nothing to do. */ + if (hdev->advertising_paused) + return 0; + + bt_dev_dbg(hdev, "Pausing directed advertising"); + + /* Stop directed advertising */ + old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING); + if (old_state) { + /* When discoverable timeout triggers, then just make sure + * the limited discoverable flag is cleared. Even in the case + * of a timeout triggered from general discoverable, it is + * safe to unconditionally clear the flag. + */ + hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); + hci_dev_clear_flag(hdev, HCI_DISCOVERABLE); + hdev->discov_timeout = 0; + } + + bt_dev_dbg(hdev, "Pausing advertising instances"); + + /* Call to disable any advertisements active on the controller. + * This will succeed even if no advertisements are configured. + */ + err = hci_disable_advertising_sync(hdev); + if (err) + return err; + + /* If we are using software rotation, pause the loop */ + if (!ext_adv_capable(hdev)) + cancel_adv_timeout(hdev); + + hdev->advertising_paused = true; + hdev->advertising_old_state = old_state; + + return 0; +} + +/* This function enables all user advertising instances */ +static int hci_resume_advertising_sync(struct hci_dev *hdev) +{ + struct adv_info *adv, *tmp; + int err; + + /* If advertising has not been paused there is nothing to do. */ + if (!hdev->advertising_paused) + return 0; + + /* Resume directed advertising */ + hdev->advertising_paused = false; + if (hdev->advertising_old_state) { + hci_dev_set_flag(hdev, HCI_ADVERTISING); + hdev->advertising_old_state = 0; + } + + bt_dev_dbg(hdev, "Resuming advertising instances"); + + if (ext_adv_capable(hdev)) { + /* Call for each tracked instance to be re-enabled */ + list_for_each_entry_safe(adv, tmp, &hdev->adv_instances, list) { + err = hci_enable_ext_advertising_sync(hdev, + adv->instance); + if (!err) + continue; + + /* If the instance cannot be resumed remove it */ + hci_remove_ext_adv_instance_sync(hdev, adv->instance, + NULL); + } + } else { + /* Schedule for most recent instance to be restarted and begin + * the software rotation loop + */ + err = hci_schedule_adv_instance_sync(hdev, + hdev->cur_adv_instance, + true); + } + + hdev->advertising_paused = false; + + return err; +} + +static int hci_pause_addr_resolution(struct hci_dev *hdev) +{ + int err; + + if (!use_ll_privacy(hdev)) + return 0; + + if (!hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) + return 0; + + /* Cannot disable addr resolution if scanning is enabled or + * when initiating an LE connection. + */ + if (hci_dev_test_flag(hdev, HCI_LE_SCAN) || + hci_lookup_le_connect(hdev)) { + bt_dev_err(hdev, "Command not allowed when scan/LE connect"); + return -EPERM; + } + + /* Cannot disable addr resolution if advertising is enabled. */ + err = hci_pause_advertising_sync(hdev); + if (err) { + bt_dev_err(hdev, "Pause advertising failed: %d", err); + return err; + } + + err = hci_le_set_addr_resolution_enable_sync(hdev, 0x00); + if (err) + bt_dev_err(hdev, "Unable to disable Address Resolution: %d", + err); + + /* Return if address resolution is disabled and RPA is not used. */ + if (!err && scan_use_rpa(hdev)) + return err; + + hci_resume_advertising_sync(hdev); + return err; +} + +struct sk_buff *hci_read_local_oob_data_sync(struct hci_dev *hdev, + bool extended, struct sock *sk) +{ + u16 opcode = extended ? HCI_OP_READ_LOCAL_OOB_EXT_DATA : + HCI_OP_READ_LOCAL_OOB_DATA; + + return __hci_cmd_sync_sk(hdev, opcode, 0, NULL, 0, HCI_CMD_TIMEOUT, sk); +} + +static struct conn_params *conn_params_copy(struct list_head *list, size_t *n) +{ + struct hci_conn_params *params; + struct conn_params *p; + size_t i; + + rcu_read_lock(); + + i = 0; + list_for_each_entry_rcu(params, list, action) + ++i; + *n = i; + + rcu_read_unlock(); + + p = kvcalloc(*n, sizeof(struct conn_params), GFP_KERNEL); + if (!p) + return NULL; + + rcu_read_lock(); + + i = 0; + list_for_each_entry_rcu(params, list, action) { + /* Racing adds are handled in next scan update */ + if (i >= *n) + break; + + /* No hdev->lock, but: addr, addr_type are immutable. + * privacy_mode is only written by us or in + * hci_cc_le_set_privacy_mode that we wait for. + * We should be idempotent so MGMT updating flags + * while we are processing is OK. + */ + bacpy(&p[i].addr, ¶ms->addr); + p[i].addr_type = params->addr_type; + p[i].flags = READ_ONCE(params->flags); + p[i].privacy_mode = READ_ONCE(params->privacy_mode); + ++i; + } + + rcu_read_unlock(); + + *n = i; + return p; +} + +/* Device must not be scanning when updating the accept list. + * + * Update is done using the following sequence: + * + * use_ll_privacy((Disable Advertising) -> Disable Resolving List) -> + * Remove Devices From Accept List -> + * (has IRK && use_ll_privacy(Remove Devices From Resolving List))-> + * Add Devices to Accept List -> + * (has IRK && use_ll_privacy(Remove Devices From Resolving List)) -> + * use_ll_privacy(Enable Resolving List -> (Enable Advertising)) -> + * Enable Scanning + * + * In case of failure advertising shall be restored to its original state and + * return would disable accept list since either accept or resolving list could + * not be programmed. + * + */ +static u8 hci_update_accept_list_sync(struct hci_dev *hdev) +{ + struct conn_params *params; + struct bdaddr_list *b, *t; + u8 num_entries = 0; + bool pend_conn, pend_report; + u8 filter_policy; + size_t i, n; + int err; + + /* Pause advertising if resolving list can be used as controllers + * cannot accept resolving list modifications while advertising. + */ + if (use_ll_privacy(hdev)) { + err = hci_pause_advertising_sync(hdev); + if (err) { + bt_dev_err(hdev, "pause advertising failed: %d", err); + return 0x00; + } + } + + /* Disable address resolution while reprogramming accept list since + * devices that do have an IRK will be programmed in the resolving list + * when LL Privacy is enabled. + */ + err = hci_le_set_addr_resolution_enable_sync(hdev, 0x00); + if (err) { + bt_dev_err(hdev, "Unable to disable LL privacy: %d", err); + goto done; + } + + /* Go through the current accept list programmed into the + * controller one by one and check if that address is connected or is + * still in the list of pending connections or list of devices to + * report. If not present in either list, then remove it from + * the controller. + */ + list_for_each_entry_safe(b, t, &hdev->le_accept_list, list) { + if (hci_conn_hash_lookup_le(hdev, &b->bdaddr, b->bdaddr_type)) + continue; + + /* Pointers not dereferenced, no locks needed */ + pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns, + &b->bdaddr, + b->bdaddr_type); + pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports, + &b->bdaddr, + b->bdaddr_type); + + /* If the device is not likely to connect or report, + * remove it from the acceptlist. + */ + if (!pend_conn && !pend_report) { + hci_le_del_accept_list_sync(hdev, &b->bdaddr, + b->bdaddr_type); + continue; + } + + num_entries++; + } + + /* Since all no longer valid accept list entries have been + * removed, walk through the list of pending connections + * and ensure that any new device gets programmed into + * the controller. + * + * If the list of the devices is larger than the list of + * available accept list entries in the controller, then + * just abort and return filer policy value to not use the + * accept list. + * + * The list and params may be mutated while we wait for events, + * so make a copy and iterate it. + */ + + params = conn_params_copy(&hdev->pend_le_conns, &n); + if (!params) { + err = -ENOMEM; + goto done; + } + + for (i = 0; i < n; ++i) { + err = hci_le_add_accept_list_sync(hdev, ¶ms[i], + &num_entries); + if (err) { + kvfree(params); + goto done; + } + } + + kvfree(params); + + /* After adding all new pending connections, walk through + * the list of pending reports and also add these to the + * accept list if there is still space. Abort if space runs out. + */ + + params = conn_params_copy(&hdev->pend_le_reports, &n); + if (!params) { + err = -ENOMEM; + goto done; + } + + for (i = 0; i < n; ++i) { + err = hci_le_add_accept_list_sync(hdev, ¶ms[i], + &num_entries); + if (err) { + kvfree(params); + goto done; + } + } + + kvfree(params); + + /* Use the allowlist unless the following conditions are all true: + * - We are not currently suspending + * - There are 1 or more ADV monitors registered and it's not offloaded + * - Interleaved scanning is not currently using the allowlist + */ + if (!idr_is_empty(&hdev->adv_monitors_idr) && !hdev->suspended && + hci_get_adv_monitor_offload_ext(hdev) == HCI_ADV_MONITOR_EXT_NONE && + hdev->interleave_scan_state != INTERLEAVE_SCAN_ALLOWLIST) + err = -EINVAL; + +done: + filter_policy = err ? 0x00 : 0x01; + + /* Enable address resolution when LL Privacy is enabled. */ + err = hci_le_set_addr_resolution_enable_sync(hdev, 0x01); + if (err) + bt_dev_err(hdev, "Unable to enable LL privacy: %d", err); + + /* Resume advertising if it was paused */ + if (use_ll_privacy(hdev)) + hci_resume_advertising_sync(hdev); + + /* Select filter policy to use accept list */ + return filter_policy; +} + +/* Returns true if an le connection is in the scanning state */ +static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev) +{ + struct hci_conn_hash *h = &hdev->conn_hash; + struct hci_conn *c; + + rcu_read_lock(); + + list_for_each_entry_rcu(c, &h->list, list) { + if (c->type == LE_LINK && c->state == BT_CONNECT && + test_bit(HCI_CONN_SCANNING, &c->flags)) { + rcu_read_unlock(); + return true; + } + } + + rcu_read_unlock(); + + return false; +} + +static int hci_le_set_ext_scan_param_sync(struct hci_dev *hdev, u8 type, + u16 interval, u16 window, + u8 own_addr_type, u8 filter_policy) +{ + struct hci_cp_le_set_ext_scan_params *cp; + struct hci_cp_le_scan_phy_params *phy; + u8 data[sizeof(*cp) + sizeof(*phy) * 2]; + u8 num_phy = 0; + + cp = (void *)data; + phy = (void *)cp->data; + + memset(data, 0, sizeof(data)); + + cp->own_addr_type = own_addr_type; + cp->filter_policy = filter_policy; + + if (scan_1m(hdev) || scan_2m(hdev)) { + cp->scanning_phys |= LE_SCAN_PHY_1M; + + phy->type = type; + phy->interval = cpu_to_le16(interval); + phy->window = cpu_to_le16(window); + + num_phy++; + phy++; + } + + if (scan_coded(hdev)) { + cp->scanning_phys |= LE_SCAN_PHY_CODED; + + phy->type = type; + phy->interval = cpu_to_le16(interval); + phy->window = cpu_to_le16(window); + + num_phy++; + phy++; + } + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS, + sizeof(*cp) + sizeof(*phy) * num_phy, + data, HCI_CMD_TIMEOUT); +} + +static int hci_le_set_scan_param_sync(struct hci_dev *hdev, u8 type, + u16 interval, u16 window, + u8 own_addr_type, u8 filter_policy) +{ + struct hci_cp_le_set_scan_param cp; + + if (use_ext_scan(hdev)) + return hci_le_set_ext_scan_param_sync(hdev, type, interval, + window, own_addr_type, + filter_policy); + + memset(&cp, 0, sizeof(cp)); + cp.type = type; + cp.interval = cpu_to_le16(interval); + cp.window = cpu_to_le16(window); + cp.own_address_type = own_addr_type; + cp.filter_policy = filter_policy; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_PARAM, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +static int hci_start_scan_sync(struct hci_dev *hdev, u8 type, u16 interval, + u16 window, u8 own_addr_type, u8 filter_policy, + u8 filter_dup) +{ + int err; + + if (hdev->scanning_paused) { + bt_dev_dbg(hdev, "Scanning is paused for suspend"); + return 0; + } + + err = hci_le_set_scan_param_sync(hdev, type, interval, window, + own_addr_type, filter_policy); + if (err) + return err; + + return hci_le_set_scan_enable_sync(hdev, LE_SCAN_ENABLE, filter_dup); +} + +static int hci_passive_scan_sync(struct hci_dev *hdev) +{ + u8 own_addr_type; + u8 filter_policy; + u16 window, interval; + u8 filter_dups = LE_SCAN_FILTER_DUP_ENABLE; + int err; + + if (hdev->scanning_paused) { + bt_dev_dbg(hdev, "Scanning is paused for suspend"); + return 0; + } + + err = hci_scan_disable_sync(hdev); + if (err) { + bt_dev_err(hdev, "disable scanning failed: %d", err); + return err; + } + + /* Set require_privacy to false since no SCAN_REQ are send + * during passive scanning. Not using an non-resolvable address + * here is important so that peer devices using direct + * advertising with our address will be correctly reported + * by the controller. + */ + if (hci_update_random_address_sync(hdev, false, scan_use_rpa(hdev), + &own_addr_type)) + return 0; + + if (hdev->enable_advmon_interleave_scan && + hci_update_interleaved_scan_sync(hdev)) + return 0; + + bt_dev_dbg(hdev, "interleave state %d", hdev->interleave_scan_state); + + /* Adding or removing entries from the accept list must + * happen before enabling scanning. The controller does + * not allow accept list modification while scanning. + */ + filter_policy = hci_update_accept_list_sync(hdev); + + /* When the controller is using random resolvable addresses and + * with that having LE privacy enabled, then controllers with + * Extended Scanner Filter Policies support can now enable support + * for handling directed advertising. + * + * So instead of using filter polices 0x00 (no acceptlist) + * and 0x01 (acceptlist enabled) use the new filter policies + * 0x02 (no acceptlist) and 0x03 (acceptlist enabled). + */ + if (hci_dev_test_flag(hdev, HCI_PRIVACY) && + (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)) + filter_policy |= 0x02; + + if (hdev->suspended) { + window = hdev->le_scan_window_suspend; + interval = hdev->le_scan_int_suspend; + } else if (hci_is_le_conn_scanning(hdev)) { + window = hdev->le_scan_window_connect; + interval = hdev->le_scan_int_connect; + } else if (hci_is_adv_monitoring(hdev)) { + window = hdev->le_scan_window_adv_monitor; + interval = hdev->le_scan_int_adv_monitor; + } else { + window = hdev->le_scan_window; + interval = hdev->le_scan_interval; + } + + /* Disable all filtering for Mesh */ + if (hci_dev_test_flag(hdev, HCI_MESH)) { + filter_policy = 0; + filter_dups = LE_SCAN_FILTER_DUP_DISABLE; + } + + bt_dev_dbg(hdev, "LE passive scan with acceptlist = %d", filter_policy); + + return hci_start_scan_sync(hdev, LE_SCAN_PASSIVE, interval, window, + own_addr_type, filter_policy, filter_dups); +} + +/* This function controls the passive scanning based on hdev->pend_le_conns + * list. If there are pending LE connection we start the background scanning, + * otherwise we stop it in the following sequence: + * + * If there are devices to scan: + * + * Disable Scanning -> Update Accept List -> + * use_ll_privacy((Disable Advertising) -> Disable Resolving List -> + * Update Resolving List -> Enable Resolving List -> (Enable Advertising)) -> + * Enable Scanning + * + * Otherwise: + * + * Disable Scanning + */ +int hci_update_passive_scan_sync(struct hci_dev *hdev) +{ + int err; + + if (!test_bit(HCI_UP, &hdev->flags) || + test_bit(HCI_INIT, &hdev->flags) || + hci_dev_test_flag(hdev, HCI_SETUP) || + hci_dev_test_flag(hdev, HCI_CONFIG) || + hci_dev_test_flag(hdev, HCI_AUTO_OFF) || + hci_dev_test_flag(hdev, HCI_UNREGISTER)) + return 0; + + /* No point in doing scanning if LE support hasn't been enabled */ + if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) + return 0; + + /* If discovery is active don't interfere with it */ + if (hdev->discovery.state != DISCOVERY_STOPPED) + return 0; + + /* Reset RSSI and UUID filters when starting background scanning + * since these filters are meant for service discovery only. + * + * The Start Discovery and Start Service Discovery operations + * ensure to set proper values for RSSI threshold and UUID + * filter list. So it is safe to just reset them here. + */ + hci_discovery_filter_clear(hdev); + + bt_dev_dbg(hdev, "ADV monitoring is %s", + hci_is_adv_monitoring(hdev) ? "on" : "off"); + + if (!hci_dev_test_flag(hdev, HCI_MESH) && + list_empty(&hdev->pend_le_conns) && + list_empty(&hdev->pend_le_reports) && + !hci_is_adv_monitoring(hdev) && + !hci_dev_test_flag(hdev, HCI_PA_SYNC)) { + /* If there is no pending LE connections or devices + * to be scanned for or no ADV monitors, we should stop the + * background scanning. + */ + + bt_dev_dbg(hdev, "stopping background scanning"); + + err = hci_scan_disable_sync(hdev); + if (err) + bt_dev_err(hdev, "stop background scanning failed: %d", + err); + } else { + /* If there is at least one pending LE connection, we should + * keep the background scan running. + */ + + /* If controller is connecting, we should not start scanning + * since some controllers are not able to scan and connect at + * the same time. + */ + if (hci_lookup_le_connect(hdev)) + return 0; + + bt_dev_dbg(hdev, "start background scanning"); + + err = hci_passive_scan_sync(hdev); + if (err) + bt_dev_err(hdev, "start background scanning failed: %d", + err); + } + + return err; +} + +static int update_scan_sync(struct hci_dev *hdev, void *data) +{ + return hci_update_scan_sync(hdev); +} + +int hci_update_scan(struct hci_dev *hdev) +{ + return hci_cmd_sync_queue(hdev, update_scan_sync, NULL, NULL); +} + +static int update_passive_scan_sync(struct hci_dev *hdev, void *data) +{ + return hci_update_passive_scan_sync(hdev); +} + +int hci_update_passive_scan(struct hci_dev *hdev) +{ + /* Only queue if it would have any effect */ + if (!test_bit(HCI_UP, &hdev->flags) || + test_bit(HCI_INIT, &hdev->flags) || + hci_dev_test_flag(hdev, HCI_SETUP) || + hci_dev_test_flag(hdev, HCI_CONFIG) || + hci_dev_test_flag(hdev, HCI_AUTO_OFF) || + hci_dev_test_flag(hdev, HCI_UNREGISTER)) + return 0; + + return hci_cmd_sync_queue(hdev, update_passive_scan_sync, NULL, NULL); +} + +int hci_write_sc_support_sync(struct hci_dev *hdev, u8 val) +{ + int err; + + if (!bredr_sc_enabled(hdev) || lmp_host_sc_capable(hdev)) + return 0; + + err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SC_SUPPORT, + sizeof(val), &val, HCI_CMD_TIMEOUT); + + if (!err) { + if (val) { + hdev->features[1][0] |= LMP_HOST_SC; + hci_dev_set_flag(hdev, HCI_SC_ENABLED); + } else { + hdev->features[1][0] &= ~LMP_HOST_SC; + hci_dev_clear_flag(hdev, HCI_SC_ENABLED); + } + } + + return err; +} + +int hci_write_ssp_mode_sync(struct hci_dev *hdev, u8 mode) +{ + int err; + + if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED) || + lmp_host_ssp_capable(hdev)) + return 0; + + if (!mode && hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS)) { + __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, + sizeof(mode), &mode, HCI_CMD_TIMEOUT); + } + + err = __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SSP_MODE, + sizeof(mode), &mode, HCI_CMD_TIMEOUT); + if (err) + return err; + + return hci_write_sc_support_sync(hdev, 0x01); +} + +int hci_write_le_host_supported_sync(struct hci_dev *hdev, u8 le, u8 simul) +{ + struct hci_cp_write_le_host_supported cp; + + if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) || + !lmp_bredr_capable(hdev)) + return 0; + + /* Check first if we already have the right host state + * (host features set) + */ + if (le == lmp_host_le_capable(hdev) && + simul == lmp_host_le_br_capable(hdev)) + return 0; + + memset(&cp, 0, sizeof(cp)); + + cp.le = le; + cp.simul = simul; + + return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +static int hci_powered_update_adv_sync(struct hci_dev *hdev) +{ + struct adv_info *adv, *tmp; + int err; + + if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) + return 0; + + /* If RPA Resolution has not been enable yet it means the + * resolving list is empty and we should attempt to program the + * local IRK in order to support using own_addr_type + * ADDR_LE_DEV_RANDOM_RESOLVED (0x03). + */ + if (!hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) { + hci_le_add_resolve_list_sync(hdev, NULL); + hci_le_set_addr_resolution_enable_sync(hdev, 0x01); + } + + /* Make sure the controller has a good default for + * advertising data. This also applies to the case + * where BR/EDR was toggled during the AUTO_OFF phase. + */ + if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || + list_empty(&hdev->adv_instances)) { + if (ext_adv_capable(hdev)) { + err = hci_setup_ext_adv_instance_sync(hdev, 0x00); + if (!err) + hci_update_scan_rsp_data_sync(hdev, 0x00); + } else { + err = hci_update_adv_data_sync(hdev, 0x00); + if (!err) + hci_update_scan_rsp_data_sync(hdev, 0x00); + } + + if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) + hci_enable_advertising_sync(hdev); + } + + /* Call for each tracked instance to be scheduled */ + list_for_each_entry_safe(adv, tmp, &hdev->adv_instances, list) + hci_schedule_adv_instance_sync(hdev, adv->instance, true); + + return 0; +} + +static int hci_write_auth_enable_sync(struct hci_dev *hdev) +{ + u8 link_sec; + + link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY); + if (link_sec == test_bit(HCI_AUTH, &hdev->flags)) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_AUTH_ENABLE, + sizeof(link_sec), &link_sec, + HCI_CMD_TIMEOUT); +} + +int hci_write_fast_connectable_sync(struct hci_dev *hdev, bool enable) +{ + struct hci_cp_write_page_scan_activity cp; + u8 type; + int err = 0; + + if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) + return 0; + + if (hdev->hci_ver < BLUETOOTH_VER_1_2) + return 0; + + memset(&cp, 0, sizeof(cp)); + + if (enable) { + type = PAGE_SCAN_TYPE_INTERLACED; + + /* 160 msec page scan interval */ + cp.interval = cpu_to_le16(0x0100); + } else { + type = hdev->def_page_scan_type; + cp.interval = cpu_to_le16(hdev->def_page_scan_int); + } + + cp.window = cpu_to_le16(hdev->def_page_scan_window); + + if (__cpu_to_le16(hdev->page_scan_interval) != cp.interval || + __cpu_to_le16(hdev->page_scan_window) != cp.window) { + err = __hci_cmd_sync_status(hdev, + HCI_OP_WRITE_PAGE_SCAN_ACTIVITY, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); + if (err) + return err; + } + + if (hdev->page_scan_type != type) + err = __hci_cmd_sync_status(hdev, + HCI_OP_WRITE_PAGE_SCAN_TYPE, + sizeof(type), &type, + HCI_CMD_TIMEOUT); + + return err; +} + +static bool disconnected_accept_list_entries(struct hci_dev *hdev) +{ + struct bdaddr_list *b; + + list_for_each_entry(b, &hdev->accept_list, list) { + struct hci_conn *conn; + + conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr); + if (!conn) + return true; + + if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG) + return true; + } + + return false; +} + +static int hci_write_scan_enable_sync(struct hci_dev *hdev, u8 val) +{ + return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SCAN_ENABLE, + sizeof(val), &val, + HCI_CMD_TIMEOUT); +} + +int hci_update_scan_sync(struct hci_dev *hdev) +{ + u8 scan; + + if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) + return 0; + + if (!hdev_is_powered(hdev)) + return 0; + + if (mgmt_powering_down(hdev)) + return 0; + + if (hdev->scanning_paused) + return 0; + + if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) || + disconnected_accept_list_entries(hdev)) + scan = SCAN_PAGE; + else + scan = SCAN_DISABLED; + + if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) + scan |= SCAN_INQUIRY; + + if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) && + test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY)) + return 0; + + return hci_write_scan_enable_sync(hdev, scan); +} + +int hci_update_name_sync(struct hci_dev *hdev) +{ + struct hci_cp_write_local_name cp; + + memset(&cp, 0, sizeof(cp)); + + memcpy(cp.name, hdev->dev_name, sizeof(cp.name)); + + return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LOCAL_NAME, + sizeof(cp), &cp, + HCI_CMD_TIMEOUT); +} + +/* This function perform powered update HCI command sequence after the HCI init + * sequence which end up resetting all states, the sequence is as follows: + * + * HCI_SSP_ENABLED(Enable SSP) + * HCI_LE_ENABLED(Enable LE) + * HCI_LE_ENABLED(use_ll_privacy(Add local IRK to Resolving List) -> + * Update adv data) + * Enable Authentication + * lmp_bredr_capable(Set Fast Connectable -> Set Scan Type -> Set Class -> + * Set Name -> Set EIR) + */ +int hci_powered_update_sync(struct hci_dev *hdev) +{ + int err; + + /* Register the available SMP channels (BR/EDR and LE) only when + * successfully powering on the controller. This late + * registration is required so that LE SMP can clearly decide if + * the public address or static address is used. + */ + smp_register(hdev); + + err = hci_write_ssp_mode_sync(hdev, 0x01); + if (err) + return err; + + err = hci_write_le_host_supported_sync(hdev, 0x01, 0x00); + if (err) + return err; + + err = hci_powered_update_adv_sync(hdev); + if (err) + return err; + + err = hci_write_auth_enable_sync(hdev); + if (err) + return err; + + if (lmp_bredr_capable(hdev)) { + if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) + hci_write_fast_connectable_sync(hdev, true); + else + hci_write_fast_connectable_sync(hdev, false); + hci_update_scan_sync(hdev); + hci_update_class_sync(hdev); + hci_update_name_sync(hdev); + hci_update_eir_sync(hdev); + } + + return 0; +} + +/** + * hci_dev_get_bd_addr_from_property - Get the Bluetooth Device Address + * (BD_ADDR) for a HCI device from + * a firmware node property. + * @hdev: The HCI device + * + * Search the firmware node for 'local-bd-address'. + * + * All-zero BD addresses are rejected, because those could be properties + * that exist in the firmware tables, but were not updated by the firmware. For + * example, the DTS could define 'local-bd-address', with zero BD addresses. + */ +static void hci_dev_get_bd_addr_from_property(struct hci_dev *hdev) +{ + struct fwnode_handle *fwnode = dev_fwnode(hdev->dev.parent); + bdaddr_t ba; + int ret; + + ret = fwnode_property_read_u8_array(fwnode, "local-bd-address", + (u8 *)&ba, sizeof(ba)); + if (ret < 0 || !bacmp(&ba, BDADDR_ANY)) + return; + + bacpy(&hdev->public_addr, &ba); +} + +struct hci_init_stage { + int (*func)(struct hci_dev *hdev); +}; + +/* Run init stage NULL terminated function table */ +static int hci_init_stage_sync(struct hci_dev *hdev, + const struct hci_init_stage *stage) +{ + size_t i; + + for (i = 0; stage[i].func; i++) { + int err; + + err = stage[i].func(hdev); + if (err) + return err; + } + + return 0; +} + +/* Read Local Version */ +static int hci_read_local_version_sync(struct hci_dev *hdev) +{ + return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_VERSION, + 0, NULL, HCI_CMD_TIMEOUT); +} + +/* Read BD Address */ +static int hci_read_bd_addr_sync(struct hci_dev *hdev) +{ + return __hci_cmd_sync_status(hdev, HCI_OP_READ_BD_ADDR, + 0, NULL, HCI_CMD_TIMEOUT); +} + +#define HCI_INIT(_func) \ +{ \ + .func = _func, \ +} + +static const struct hci_init_stage hci_init0[] = { + /* HCI_OP_READ_LOCAL_VERSION */ + HCI_INIT(hci_read_local_version_sync), + /* HCI_OP_READ_BD_ADDR */ + HCI_INIT(hci_read_bd_addr_sync), + {} +}; + +int hci_reset_sync(struct hci_dev *hdev) +{ + int err; + + set_bit(HCI_RESET, &hdev->flags); + + err = __hci_cmd_sync_status(hdev, HCI_OP_RESET, 0, NULL, + HCI_CMD_TIMEOUT); + if (err) + return err; + + return 0; +} + +static int hci_init0_sync(struct hci_dev *hdev) +{ + int err; + + bt_dev_dbg(hdev, ""); + + /* Reset */ + if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) { + err = hci_reset_sync(hdev); + if (err) + return err; + } + + return hci_init_stage_sync(hdev, hci_init0); +} + +static int hci_unconf_init_sync(struct hci_dev *hdev) +{ + int err; + + if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) + return 0; + + err = hci_init0_sync(hdev); + if (err < 0) + return err; + + if (hci_dev_test_flag(hdev, HCI_SETUP)) + hci_debugfs_create_basic(hdev); + + return 0; +} + +/* Read Local Supported Features. */ +static int hci_read_local_features_sync(struct hci_dev *hdev) +{ + /* Not all AMP controllers support this command */ + if (hdev->dev_type == HCI_AMP && !(hdev->commands[14] & 0x20)) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_FEATURES, + 0, NULL, HCI_CMD_TIMEOUT); +} + +/* BR Controller init stage 1 command sequence */ +static const struct hci_init_stage br_init1[] = { + /* HCI_OP_READ_LOCAL_FEATURES */ + HCI_INIT(hci_read_local_features_sync), + /* HCI_OP_READ_LOCAL_VERSION */ + HCI_INIT(hci_read_local_version_sync), + /* HCI_OP_READ_BD_ADDR */ + HCI_INIT(hci_read_bd_addr_sync), + {} +}; + +/* Read Local Commands */ +static int hci_read_local_cmds_sync(struct hci_dev *hdev) +{ + /* All Bluetooth 1.2 and later controllers should support the + * HCI command for reading the local supported commands. + * + * Unfortunately some controllers indicate Bluetooth 1.2 support, + * but do not have support for this command. If that is the case, + * the driver can quirk the behavior and skip reading the local + * supported commands. + */ + if (hdev->hci_ver > BLUETOOTH_VER_1_1 && + !test_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks)) + return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_COMMANDS, + 0, NULL, HCI_CMD_TIMEOUT); + + return 0; +} + +/* Read Local AMP Info */ +static int hci_read_local_amp_info_sync(struct hci_dev *hdev) +{ + return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_AMP_INFO, + 0, NULL, HCI_CMD_TIMEOUT); +} + +/* Read Data Blk size */ +static int hci_read_data_block_size_sync(struct hci_dev *hdev) +{ + return __hci_cmd_sync_status(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, + 0, NULL, HCI_CMD_TIMEOUT); +} + +/* Read Flow Control Mode */ +static int hci_read_flow_control_mode_sync(struct hci_dev *hdev) +{ + return __hci_cmd_sync_status(hdev, HCI_OP_READ_FLOW_CONTROL_MODE, + 0, NULL, HCI_CMD_TIMEOUT); +} + +/* Read Location Data */ +static int hci_read_location_data_sync(struct hci_dev *hdev) +{ + return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCATION_DATA, + 0, NULL, HCI_CMD_TIMEOUT); +} + +/* AMP Controller init stage 1 command sequence */ +static const struct hci_init_stage amp_init1[] = { + /* HCI_OP_READ_LOCAL_VERSION */ + HCI_INIT(hci_read_local_version_sync), + /* HCI_OP_READ_LOCAL_COMMANDS */ + HCI_INIT(hci_read_local_cmds_sync), + /* HCI_OP_READ_LOCAL_AMP_INFO */ + HCI_INIT(hci_read_local_amp_info_sync), + /* HCI_OP_READ_DATA_BLOCK_SIZE */ + HCI_INIT(hci_read_data_block_size_sync), + /* HCI_OP_READ_FLOW_CONTROL_MODE */ + HCI_INIT(hci_read_flow_control_mode_sync), + /* HCI_OP_READ_LOCATION_DATA */ + HCI_INIT(hci_read_location_data_sync), + {} +}; + +static int hci_init1_sync(struct hci_dev *hdev) +{ + int err; + + bt_dev_dbg(hdev, ""); + + /* Reset */ + if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) { + err = hci_reset_sync(hdev); + if (err) + return err; + } + + switch (hdev->dev_type) { + case HCI_PRIMARY: + hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED; + return hci_init_stage_sync(hdev, br_init1); + case HCI_AMP: + hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED; + return hci_init_stage_sync(hdev, amp_init1); + default: + bt_dev_err(hdev, "Unknown device type %d", hdev->dev_type); + break; + } + + return 0; +} + +/* AMP Controller init stage 2 command sequence */ +static const struct hci_init_stage amp_init2[] = { + /* HCI_OP_READ_LOCAL_FEATURES */ + HCI_INIT(hci_read_local_features_sync), + {} +}; + +/* Read Buffer Size (ACL mtu, max pkt, etc.) */ +static int hci_read_buffer_size_sync(struct hci_dev *hdev) +{ + return __hci_cmd_sync_status(hdev, HCI_OP_READ_BUFFER_SIZE, + 0, NULL, HCI_CMD_TIMEOUT); +} + +/* Read Class of Device */ +static int hci_read_dev_class_sync(struct hci_dev *hdev) +{ + return __hci_cmd_sync_status(hdev, HCI_OP_READ_CLASS_OF_DEV, + 0, NULL, HCI_CMD_TIMEOUT); +} + +/* Read Local Name */ +static int hci_read_local_name_sync(struct hci_dev *hdev) +{ + return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_NAME, + 0, NULL, HCI_CMD_TIMEOUT); +} + +/* Read Voice Setting */ +static int hci_read_voice_setting_sync(struct hci_dev *hdev) +{ + return __hci_cmd_sync_status(hdev, HCI_OP_READ_VOICE_SETTING, + 0, NULL, HCI_CMD_TIMEOUT); +} + +/* Read Number of Supported IAC */ +static int hci_read_num_supported_iac_sync(struct hci_dev *hdev) +{ + return __hci_cmd_sync_status(hdev, HCI_OP_READ_NUM_SUPPORTED_IAC, + 0, NULL, HCI_CMD_TIMEOUT); +} + +/* Read Current IAC LAP */ +static int hci_read_current_iac_lap_sync(struct hci_dev *hdev) +{ + return __hci_cmd_sync_status(hdev, HCI_OP_READ_CURRENT_IAC_LAP, + 0, NULL, HCI_CMD_TIMEOUT); +} + +static int hci_set_event_filter_sync(struct hci_dev *hdev, u8 flt_type, + u8 cond_type, bdaddr_t *bdaddr, + u8 auto_accept) +{ + struct hci_cp_set_event_filter cp; + + if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) + return 0; + + if (test_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks)) + return 0; + + memset(&cp, 0, sizeof(cp)); + cp.flt_type = flt_type; + + if (flt_type != HCI_FLT_CLEAR_ALL) { + cp.cond_type = cond_type; + bacpy(&cp.addr_conn_flt.bdaddr, bdaddr); + cp.addr_conn_flt.auto_accept = auto_accept; + } + + return __hci_cmd_sync_status(hdev, HCI_OP_SET_EVENT_FLT, + flt_type == HCI_FLT_CLEAR_ALL ? + sizeof(cp.flt_type) : sizeof(cp), &cp, + HCI_CMD_TIMEOUT); +} + +static int hci_clear_event_filter_sync(struct hci_dev *hdev) +{ + if (!hci_dev_test_flag(hdev, HCI_EVENT_FILTER_CONFIGURED)) + return 0; + + /* In theory the state machine should not reach here unless + * a hci_set_event_filter_sync() call succeeds, but we do + * the check both for parity and as a future reminder. + */ + if (test_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks)) + return 0; + + return hci_set_event_filter_sync(hdev, HCI_FLT_CLEAR_ALL, 0x00, + BDADDR_ANY, 0x00); +} + +/* Connection accept timeout ~20 secs */ +static int hci_write_ca_timeout_sync(struct hci_dev *hdev) +{ + __le16 param = cpu_to_le16(0x7d00); + + return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_CA_TIMEOUT, + sizeof(param), ¶m, HCI_CMD_TIMEOUT); +} + +/* BR Controller init stage 2 command sequence */ +static const struct hci_init_stage br_init2[] = { + /* HCI_OP_READ_BUFFER_SIZE */ + HCI_INIT(hci_read_buffer_size_sync), + /* HCI_OP_READ_CLASS_OF_DEV */ + HCI_INIT(hci_read_dev_class_sync), + /* HCI_OP_READ_LOCAL_NAME */ + HCI_INIT(hci_read_local_name_sync), + /* HCI_OP_READ_VOICE_SETTING */ + HCI_INIT(hci_read_voice_setting_sync), + /* HCI_OP_READ_NUM_SUPPORTED_IAC */ + HCI_INIT(hci_read_num_supported_iac_sync), + /* HCI_OP_READ_CURRENT_IAC_LAP */ + HCI_INIT(hci_read_current_iac_lap_sync), + /* HCI_OP_SET_EVENT_FLT */ + HCI_INIT(hci_clear_event_filter_sync), + /* HCI_OP_WRITE_CA_TIMEOUT */ + HCI_INIT(hci_write_ca_timeout_sync), + {} +}; + +static int hci_write_ssp_mode_1_sync(struct hci_dev *hdev) +{ + u8 mode = 0x01; + + if (!lmp_ssp_capable(hdev) || !hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) + return 0; + + /* When SSP is available, then the host features page + * should also be available as well. However some + * controllers list the max_page as 0 as long as SSP + * has not been enabled. To achieve proper debugging + * output, force the minimum max_page to 1 at least. + */ + hdev->max_page = 0x01; + + return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SSP_MODE, + sizeof(mode), &mode, HCI_CMD_TIMEOUT); +} + +static int hci_write_eir_sync(struct hci_dev *hdev) +{ + struct hci_cp_write_eir cp; + + if (!lmp_ssp_capable(hdev) || hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) + return 0; + + memset(hdev->eir, 0, sizeof(hdev->eir)); + memset(&cp, 0, sizeof(cp)); + + return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_EIR, sizeof(cp), &cp, + HCI_CMD_TIMEOUT); +} + +static int hci_write_inquiry_mode_sync(struct hci_dev *hdev) +{ + u8 mode; + + if (!lmp_inq_rssi_capable(hdev) && + !test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) + return 0; + + /* If Extended Inquiry Result events are supported, then + * they are clearly preferred over Inquiry Result with RSSI + * events. + */ + mode = lmp_ext_inq_capable(hdev) ? 0x02 : 0x01; + + return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_INQUIRY_MODE, + sizeof(mode), &mode, HCI_CMD_TIMEOUT); +} + +static int hci_read_inq_rsp_tx_power_sync(struct hci_dev *hdev) +{ + if (!lmp_inq_tx_pwr_capable(hdev)) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_READ_INQ_RSP_TX_POWER, + 0, NULL, HCI_CMD_TIMEOUT); +} + +static int hci_read_local_ext_features_sync(struct hci_dev *hdev, u8 page) +{ + struct hci_cp_read_local_ext_features cp; + + if (!lmp_ext_feat_capable(hdev)) + return 0; + + memset(&cp, 0, sizeof(cp)); + cp.page = page; + + return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_EXT_FEATURES, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +static int hci_read_local_ext_features_1_sync(struct hci_dev *hdev) +{ + return hci_read_local_ext_features_sync(hdev, 0x01); +} + +/* HCI Controller init stage 2 command sequence */ +static const struct hci_init_stage hci_init2[] = { + /* HCI_OP_READ_LOCAL_COMMANDS */ + HCI_INIT(hci_read_local_cmds_sync), + /* HCI_OP_WRITE_SSP_MODE */ + HCI_INIT(hci_write_ssp_mode_1_sync), + /* HCI_OP_WRITE_EIR */ + HCI_INIT(hci_write_eir_sync), + /* HCI_OP_WRITE_INQUIRY_MODE */ + HCI_INIT(hci_write_inquiry_mode_sync), + /* HCI_OP_READ_INQ_RSP_TX_POWER */ + HCI_INIT(hci_read_inq_rsp_tx_power_sync), + /* HCI_OP_READ_LOCAL_EXT_FEATURES */ + HCI_INIT(hci_read_local_ext_features_1_sync), + /* HCI_OP_WRITE_AUTH_ENABLE */ + HCI_INIT(hci_write_auth_enable_sync), + {} +}; + +/* Read LE Buffer Size */ +static int hci_le_read_buffer_size_sync(struct hci_dev *hdev) +{ + /* Use Read LE Buffer Size V2 if supported */ + if (iso_capable(hdev) && hdev->commands[41] & 0x20) + return __hci_cmd_sync_status(hdev, + HCI_OP_LE_READ_BUFFER_SIZE_V2, + 0, NULL, HCI_CMD_TIMEOUT); + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_BUFFER_SIZE, + 0, NULL, HCI_CMD_TIMEOUT); +} + +/* Read LE Local Supported Features */ +static int hci_le_read_local_features_sync(struct hci_dev *hdev) +{ + return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_LOCAL_FEATURES, + 0, NULL, HCI_CMD_TIMEOUT); +} + +/* Read LE Supported States */ +static int hci_le_read_supported_states_sync(struct hci_dev *hdev) +{ + return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_SUPPORTED_STATES, + 0, NULL, HCI_CMD_TIMEOUT); +} + +/* LE Controller init stage 2 command sequence */ +static const struct hci_init_stage le_init2[] = { + /* HCI_OP_LE_READ_LOCAL_FEATURES */ + HCI_INIT(hci_le_read_local_features_sync), + /* HCI_OP_LE_READ_BUFFER_SIZE */ + HCI_INIT(hci_le_read_buffer_size_sync), + /* HCI_OP_LE_READ_SUPPORTED_STATES */ + HCI_INIT(hci_le_read_supported_states_sync), + {} +}; + +static int hci_init2_sync(struct hci_dev *hdev) +{ + int err; + + bt_dev_dbg(hdev, ""); + + if (hdev->dev_type == HCI_AMP) + return hci_init_stage_sync(hdev, amp_init2); + + err = hci_init_stage_sync(hdev, hci_init2); + if (err) + return err; + + if (lmp_bredr_capable(hdev)) { + err = hci_init_stage_sync(hdev, br_init2); + if (err) + return err; + } else { + hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED); + } + + if (lmp_le_capable(hdev)) { + err = hci_init_stage_sync(hdev, le_init2); + if (err) + return err; + /* LE-only controllers have LE implicitly enabled */ + if (!lmp_bredr_capable(hdev)) + hci_dev_set_flag(hdev, HCI_LE_ENABLED); + } + + return 0; +} + +static int hci_set_event_mask_sync(struct hci_dev *hdev) +{ + /* The second byte is 0xff instead of 0x9f (two reserved bits + * disabled) since a Broadcom 1.2 dongle doesn't respond to the + * command otherwise. + */ + u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 }; + + /* CSR 1.1 dongles does not accept any bitfield so don't try to set + * any event mask for pre 1.2 devices. + */ + if (hdev->hci_ver < BLUETOOTH_VER_1_2) + return 0; + + if (lmp_bredr_capable(hdev)) { + events[4] |= 0x01; /* Flow Specification Complete */ + + /* Don't set Disconnect Complete when suspended as that + * would wakeup the host when disconnecting due to + * suspend. + */ + if (hdev->suspended) + events[0] &= 0xef; + } else { + /* Use a different default for LE-only devices */ + memset(events, 0, sizeof(events)); + events[1] |= 0x20; /* Command Complete */ + events[1] |= 0x40; /* Command Status */ + events[1] |= 0x80; /* Hardware Error */ + + /* If the controller supports the Disconnect command, enable + * the corresponding event. In addition enable packet flow + * control related events. + */ + if (hdev->commands[0] & 0x20) { + /* Don't set Disconnect Complete when suspended as that + * would wakeup the host when disconnecting due to + * suspend. + */ + if (!hdev->suspended) + events[0] |= 0x10; /* Disconnection Complete */ + events[2] |= 0x04; /* Number of Completed Packets */ + events[3] |= 0x02; /* Data Buffer Overflow */ + } + + /* If the controller supports the Read Remote Version + * Information command, enable the corresponding event. + */ + if (hdev->commands[2] & 0x80) + events[1] |= 0x08; /* Read Remote Version Information + * Complete + */ + + if (hdev->le_features[0] & HCI_LE_ENCRYPTION) { + events[0] |= 0x80; /* Encryption Change */ + events[5] |= 0x80; /* Encryption Key Refresh Complete */ + } + } + + if (lmp_inq_rssi_capable(hdev) || + test_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks)) + events[4] |= 0x02; /* Inquiry Result with RSSI */ + + if (lmp_ext_feat_capable(hdev)) + events[4] |= 0x04; /* Read Remote Extended Features Complete */ + + if (lmp_esco_capable(hdev)) { + events[5] |= 0x08; /* Synchronous Connection Complete */ + events[5] |= 0x10; /* Synchronous Connection Changed */ + } + + if (lmp_sniffsubr_capable(hdev)) + events[5] |= 0x20; /* Sniff Subrating */ + + if (lmp_pause_enc_capable(hdev)) + events[5] |= 0x80; /* Encryption Key Refresh Complete */ + + if (lmp_ext_inq_capable(hdev)) + events[5] |= 0x40; /* Extended Inquiry Result */ + + if (lmp_no_flush_capable(hdev)) + events[7] |= 0x01; /* Enhanced Flush Complete */ + + if (lmp_lsto_capable(hdev)) + events[6] |= 0x80; /* Link Supervision Timeout Changed */ + + if (lmp_ssp_capable(hdev)) { + events[6] |= 0x01; /* IO Capability Request */ + events[6] |= 0x02; /* IO Capability Response */ + events[6] |= 0x04; /* User Confirmation Request */ + events[6] |= 0x08; /* User Passkey Request */ + events[6] |= 0x10; /* Remote OOB Data Request */ + events[6] |= 0x20; /* Simple Pairing Complete */ + events[7] |= 0x04; /* User Passkey Notification */ + events[7] |= 0x08; /* Keypress Notification */ + events[7] |= 0x10; /* Remote Host Supported + * Features Notification + */ + } + + if (lmp_le_capable(hdev)) + events[7] |= 0x20; /* LE Meta-Event */ + + return __hci_cmd_sync_status(hdev, HCI_OP_SET_EVENT_MASK, + sizeof(events), events, HCI_CMD_TIMEOUT); +} + +static int hci_read_stored_link_key_sync(struct hci_dev *hdev) +{ + struct hci_cp_read_stored_link_key cp; + + if (!(hdev->commands[6] & 0x20) || + test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) + return 0; + + memset(&cp, 0, sizeof(cp)); + bacpy(&cp.bdaddr, BDADDR_ANY); + cp.read_all = 0x01; + + return __hci_cmd_sync_status(hdev, HCI_OP_READ_STORED_LINK_KEY, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +static int hci_setup_link_policy_sync(struct hci_dev *hdev) +{ + struct hci_cp_write_def_link_policy cp; + u16 link_policy = 0; + + if (!(hdev->commands[5] & 0x10)) + return 0; + + memset(&cp, 0, sizeof(cp)); + + if (lmp_rswitch_capable(hdev)) + link_policy |= HCI_LP_RSWITCH; + if (lmp_hold_capable(hdev)) + link_policy |= HCI_LP_HOLD; + if (lmp_sniff_capable(hdev)) + link_policy |= HCI_LP_SNIFF; + if (lmp_park_capable(hdev)) + link_policy |= HCI_LP_PARK; + + cp.policy = cpu_to_le16(link_policy); + + return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +static int hci_read_page_scan_activity_sync(struct hci_dev *hdev) +{ + if (!(hdev->commands[8] & 0x01)) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_READ_PAGE_SCAN_ACTIVITY, + 0, NULL, HCI_CMD_TIMEOUT); +} + +static int hci_read_def_err_data_reporting_sync(struct hci_dev *hdev) +{ + if (!(hdev->commands[18] & 0x04) || + !(hdev->features[0][6] & LMP_ERR_DATA_REPORTING) || + test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks)) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_READ_DEF_ERR_DATA_REPORTING, + 0, NULL, HCI_CMD_TIMEOUT); +} + +static int hci_read_page_scan_type_sync(struct hci_dev *hdev) +{ + /* Some older Broadcom based Bluetooth 1.2 controllers do not + * support the Read Page Scan Type command. Check support for + * this command in the bit mask of supported commands. + */ + if (!(hdev->commands[13] & 0x01)) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_READ_PAGE_SCAN_TYPE, + 0, NULL, HCI_CMD_TIMEOUT); +} + +/* Read features beyond page 1 if available */ +static int hci_read_local_ext_features_all_sync(struct hci_dev *hdev) +{ + u8 page; + int err; + + if (!lmp_ext_feat_capable(hdev)) + return 0; + + for (page = 2; page < HCI_MAX_PAGES && page <= hdev->max_page; + page++) { + err = hci_read_local_ext_features_sync(hdev, page); + if (err) + return err; + } + + return 0; +} + +/* HCI Controller init stage 3 command sequence */ +static const struct hci_init_stage hci_init3[] = { + /* HCI_OP_SET_EVENT_MASK */ + HCI_INIT(hci_set_event_mask_sync), + /* HCI_OP_READ_STORED_LINK_KEY */ + HCI_INIT(hci_read_stored_link_key_sync), + /* HCI_OP_WRITE_DEF_LINK_POLICY */ + HCI_INIT(hci_setup_link_policy_sync), + /* HCI_OP_READ_PAGE_SCAN_ACTIVITY */ + HCI_INIT(hci_read_page_scan_activity_sync), + /* HCI_OP_READ_DEF_ERR_DATA_REPORTING */ + HCI_INIT(hci_read_def_err_data_reporting_sync), + /* HCI_OP_READ_PAGE_SCAN_TYPE */ + HCI_INIT(hci_read_page_scan_type_sync), + /* HCI_OP_READ_LOCAL_EXT_FEATURES */ + HCI_INIT(hci_read_local_ext_features_all_sync), + {} +}; + +static int hci_le_set_event_mask_sync(struct hci_dev *hdev) +{ + u8 events[8]; + + if (!lmp_le_capable(hdev)) + return 0; + + memset(events, 0, sizeof(events)); + + if (hdev->le_features[0] & HCI_LE_ENCRYPTION) + events[0] |= 0x10; /* LE Long Term Key Request */ + + /* If controller supports the Connection Parameters Request + * Link Layer Procedure, enable the corresponding event. + */ + if (hdev->le_features[0] & HCI_LE_CONN_PARAM_REQ_PROC) + /* LE Remote Connection Parameter Request */ + events[0] |= 0x20; + + /* If the controller supports the Data Length Extension + * feature, enable the corresponding event. + */ + if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) + events[0] |= 0x40; /* LE Data Length Change */ + + /* If the controller supports LL Privacy feature or LE Extended Adv, + * enable the corresponding event. + */ + if (use_enhanced_conn_complete(hdev)) + events[1] |= 0x02; /* LE Enhanced Connection Complete */ + + /* If the controller supports Extended Scanner Filter + * Policies, enable the corresponding event. + */ + if (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY) + events[1] |= 0x04; /* LE Direct Advertising Report */ + + /* If the controller supports Channel Selection Algorithm #2 + * feature, enable the corresponding event. + */ + if (hdev->le_features[1] & HCI_LE_CHAN_SEL_ALG2) + events[2] |= 0x08; /* LE Channel Selection Algorithm */ + + /* If the controller supports the LE Set Scan Enable command, + * enable the corresponding advertising report event. + */ + if (hdev->commands[26] & 0x08) + events[0] |= 0x02; /* LE Advertising Report */ + + /* If the controller supports the LE Create Connection + * command, enable the corresponding event. + */ + if (hdev->commands[26] & 0x10) + events[0] |= 0x01; /* LE Connection Complete */ + + /* If the controller supports the LE Connection Update + * command, enable the corresponding event. + */ + if (hdev->commands[27] & 0x04) + events[0] |= 0x04; /* LE Connection Update Complete */ + + /* If the controller supports the LE Read Remote Used Features + * command, enable the corresponding event. + */ + if (hdev->commands[27] & 0x20) + /* LE Read Remote Used Features Complete */ + events[0] |= 0x08; + + /* If the controller supports the LE Read Local P-256 + * Public Key command, enable the corresponding event. + */ + if (hdev->commands[34] & 0x02) + /* LE Read Local P-256 Public Key Complete */ + events[0] |= 0x80; + + /* If the controller supports the LE Generate DHKey + * command, enable the corresponding event. + */ + if (hdev->commands[34] & 0x04) + events[1] |= 0x01; /* LE Generate DHKey Complete */ + + /* If the controller supports the LE Set Default PHY or + * LE Set PHY commands, enable the corresponding event. + */ + if (hdev->commands[35] & (0x20 | 0x40)) + events[1] |= 0x08; /* LE PHY Update Complete */ + + /* If the controller supports LE Set Extended Scan Parameters + * and LE Set Extended Scan Enable commands, enable the + * corresponding event. + */ + if (use_ext_scan(hdev)) + events[1] |= 0x10; /* LE Extended Advertising Report */ + + /* If the controller supports the LE Extended Advertising + * command, enable the corresponding event. + */ + if (ext_adv_capable(hdev)) + events[2] |= 0x02; /* LE Advertising Set Terminated */ + + if (cis_capable(hdev)) { + events[3] |= 0x01; /* LE CIS Established */ + if (cis_peripheral_capable(hdev)) + events[3] |= 0x02; /* LE CIS Request */ + } + + if (bis_capable(hdev)) { + events[3] |= 0x04; /* LE Create BIG Complete */ + events[3] |= 0x08; /* LE Terminate BIG Complete */ + events[3] |= 0x10; /* LE BIG Sync Established */ + events[3] |= 0x20; /* LE BIG Sync Loss */ + } + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EVENT_MASK, + sizeof(events), events, HCI_CMD_TIMEOUT); +} + +/* Read LE Advertising Channel TX Power */ +static int hci_le_read_adv_tx_power_sync(struct hci_dev *hdev) +{ + if ((hdev->commands[25] & 0x40) && !ext_adv_capable(hdev)) { + /* HCI TS spec forbids mixing of legacy and extended + * advertising commands wherein READ_ADV_TX_POWER is + * also included. So do not call it if extended adv + * is supported otherwise controller will return + * COMMAND_DISALLOWED for extended commands. + */ + return __hci_cmd_sync_status(hdev, + HCI_OP_LE_READ_ADV_TX_POWER, + 0, NULL, HCI_CMD_TIMEOUT); + } + + return 0; +} + +/* Read LE Min/Max Tx Power*/ +static int hci_le_read_tx_power_sync(struct hci_dev *hdev) +{ + if (!(hdev->commands[38] & 0x80) || + test_bit(HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER, &hdev->quirks)) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_TRANSMIT_POWER, + 0, NULL, HCI_CMD_TIMEOUT); +} + +/* Read LE Accept List Size */ +static int hci_le_read_accept_list_size_sync(struct hci_dev *hdev) +{ + if (!(hdev->commands[26] & 0x40)) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_ACCEPT_LIST_SIZE, + 0, NULL, HCI_CMD_TIMEOUT); +} + +/* Clear LE Accept List */ +static int hci_le_clear_accept_list_sync(struct hci_dev *hdev) +{ + if (!(hdev->commands[26] & 0x80)) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_CLEAR_ACCEPT_LIST, 0, NULL, + HCI_CMD_TIMEOUT); +} + +/* Read LE Resolving List Size */ +static int hci_le_read_resolv_list_size_sync(struct hci_dev *hdev) +{ + if (!(hdev->commands[34] & 0x40)) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_RESOLV_LIST_SIZE, + 0, NULL, HCI_CMD_TIMEOUT); +} + +/* Clear LE Resolving List */ +static int hci_le_clear_resolv_list_sync(struct hci_dev *hdev) +{ + if (!(hdev->commands[34] & 0x20)) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_CLEAR_RESOLV_LIST, 0, NULL, + HCI_CMD_TIMEOUT); +} + +/* Set RPA timeout */ +static int hci_le_set_rpa_timeout_sync(struct hci_dev *hdev) +{ + __le16 timeout = cpu_to_le16(hdev->rpa_timeout); + + if (!(hdev->commands[35] & 0x04) || + test_bit(HCI_QUIRK_BROKEN_SET_RPA_TIMEOUT, &hdev->quirks)) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_RPA_TIMEOUT, + sizeof(timeout), &timeout, + HCI_CMD_TIMEOUT); +} + +/* Read LE Maximum Data Length */ +static int hci_le_read_max_data_len_sync(struct hci_dev *hdev) +{ + if (!(hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_MAX_DATA_LEN, 0, NULL, + HCI_CMD_TIMEOUT); +} + +/* Read LE Suggested Default Data Length */ +static int hci_le_read_def_data_len_sync(struct hci_dev *hdev) +{ + if (!(hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_READ_DEF_DATA_LEN, 0, NULL, + HCI_CMD_TIMEOUT); +} + +/* Read LE Number of Supported Advertising Sets */ +static int hci_le_read_num_support_adv_sets_sync(struct hci_dev *hdev) +{ + if (!ext_adv_capable(hdev)) + return 0; + + return __hci_cmd_sync_status(hdev, + HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS, + 0, NULL, HCI_CMD_TIMEOUT); +} + +/* Write LE Host Supported */ +static int hci_set_le_support_sync(struct hci_dev *hdev) +{ + struct hci_cp_write_le_host_supported cp; + + /* LE-only devices do not support explicit enablement */ + if (!lmp_bredr_capable(hdev)) + return 0; + + memset(&cp, 0, sizeof(cp)); + + if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) { + cp.le = 0x01; + cp.simul = 0x00; + } + + if (cp.le == lmp_host_le_capable(hdev)) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +/* LE Set Host Feature */ +static int hci_le_set_host_feature_sync(struct hci_dev *hdev) +{ + struct hci_cp_le_set_host_feature cp; + + if (!iso_capable(hdev)) + return 0; + + memset(&cp, 0, sizeof(cp)); + + /* Isochronous Channels (Host Support) */ + cp.bit_number = 32; + cp.bit_value = 1; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_HOST_FEATURE, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +/* LE Controller init stage 3 command sequence */ +static const struct hci_init_stage le_init3[] = { + /* HCI_OP_LE_SET_EVENT_MASK */ + HCI_INIT(hci_le_set_event_mask_sync), + /* HCI_OP_LE_READ_ADV_TX_POWER */ + HCI_INIT(hci_le_read_adv_tx_power_sync), + /* HCI_OP_LE_READ_TRANSMIT_POWER */ + HCI_INIT(hci_le_read_tx_power_sync), + /* HCI_OP_LE_READ_ACCEPT_LIST_SIZE */ + HCI_INIT(hci_le_read_accept_list_size_sync), + /* HCI_OP_LE_CLEAR_ACCEPT_LIST */ + HCI_INIT(hci_le_clear_accept_list_sync), + /* HCI_OP_LE_READ_RESOLV_LIST_SIZE */ + HCI_INIT(hci_le_read_resolv_list_size_sync), + /* HCI_OP_LE_CLEAR_RESOLV_LIST */ + HCI_INIT(hci_le_clear_resolv_list_sync), + /* HCI_OP_LE_SET_RPA_TIMEOUT */ + HCI_INIT(hci_le_set_rpa_timeout_sync), + /* HCI_OP_LE_READ_MAX_DATA_LEN */ + HCI_INIT(hci_le_read_max_data_len_sync), + /* HCI_OP_LE_READ_DEF_DATA_LEN */ + HCI_INIT(hci_le_read_def_data_len_sync), + /* HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS */ + HCI_INIT(hci_le_read_num_support_adv_sets_sync), + /* HCI_OP_WRITE_LE_HOST_SUPPORTED */ + HCI_INIT(hci_set_le_support_sync), + /* HCI_OP_LE_SET_HOST_FEATURE */ + HCI_INIT(hci_le_set_host_feature_sync), + {} +}; + +static int hci_init3_sync(struct hci_dev *hdev) +{ + int err; + + bt_dev_dbg(hdev, ""); + + err = hci_init_stage_sync(hdev, hci_init3); + if (err) + return err; + + if (lmp_le_capable(hdev)) + return hci_init_stage_sync(hdev, le_init3); + + return 0; +} + +static int hci_delete_stored_link_key_sync(struct hci_dev *hdev) +{ + struct hci_cp_delete_stored_link_key cp; + + /* Some Broadcom based Bluetooth controllers do not support the + * Delete Stored Link Key command. They are clearly indicating its + * absence in the bit mask of supported commands. + * + * Check the supported commands and only if the command is marked + * as supported send it. If not supported assume that the controller + * does not have actual support for stored link keys which makes this + * command redundant anyway. + * + * Some controllers indicate that they support handling deleting + * stored link keys, but they don't. The quirk lets a driver + * just disable this command. + */ + if (!(hdev->commands[6] & 0x80) || + test_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks)) + return 0; + + memset(&cp, 0, sizeof(cp)); + bacpy(&cp.bdaddr, BDADDR_ANY); + cp.delete_all = 0x01; + + return __hci_cmd_sync_status(hdev, HCI_OP_DELETE_STORED_LINK_KEY, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +static int hci_set_event_mask_page_2_sync(struct hci_dev *hdev) +{ + u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; + bool changed = false; + + /* Set event mask page 2 if the HCI command for it is supported */ + if (!(hdev->commands[22] & 0x04)) + return 0; + + /* If Connectionless Peripheral Broadcast central role is supported + * enable all necessary events for it. + */ + if (lmp_cpb_central_capable(hdev)) { + events[1] |= 0x40; /* Triggered Clock Capture */ + events[1] |= 0x80; /* Synchronization Train Complete */ + events[2] |= 0x08; /* Truncated Page Complete */ + events[2] |= 0x20; /* CPB Channel Map Change */ + changed = true; + } + + /* If Connectionless Peripheral Broadcast peripheral role is supported + * enable all necessary events for it. + */ + if (lmp_cpb_peripheral_capable(hdev)) { + events[2] |= 0x01; /* Synchronization Train Received */ + events[2] |= 0x02; /* CPB Receive */ + events[2] |= 0x04; /* CPB Timeout */ + events[2] |= 0x10; /* Peripheral Page Response Timeout */ + changed = true; + } + + /* Enable Authenticated Payload Timeout Expired event if supported */ + if (lmp_ping_capable(hdev) || hdev->le_features[0] & HCI_LE_PING) { + events[2] |= 0x80; + changed = true; + } + + /* Some Broadcom based controllers indicate support for Set Event + * Mask Page 2 command, but then actually do not support it. Since + * the default value is all bits set to zero, the command is only + * required if the event mask has to be changed. In case no change + * to the event mask is needed, skip this command. + */ + if (!changed) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_SET_EVENT_MASK_PAGE_2, + sizeof(events), events, HCI_CMD_TIMEOUT); +} + +/* Read local codec list if the HCI command is supported */ +static int hci_read_local_codecs_sync(struct hci_dev *hdev) +{ + if (hdev->commands[45] & 0x04) + hci_read_supported_codecs_v2(hdev); + else if (hdev->commands[29] & 0x20) + hci_read_supported_codecs(hdev); + + return 0; +} + +/* Read local pairing options if the HCI command is supported */ +static int hci_read_local_pairing_opts_sync(struct hci_dev *hdev) +{ + if (!(hdev->commands[41] & 0x08)) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_READ_LOCAL_PAIRING_OPTS, + 0, NULL, HCI_CMD_TIMEOUT); +} + +/* Get MWS transport configuration if the HCI command is supported */ +static int hci_get_mws_transport_config_sync(struct hci_dev *hdev) +{ + if (!mws_transport_config_capable(hdev)) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_GET_MWS_TRANSPORT_CONFIG, + 0, NULL, HCI_CMD_TIMEOUT); +} + +/* Check for Synchronization Train support */ +static int hci_read_sync_train_params_sync(struct hci_dev *hdev) +{ + if (!lmp_sync_train_capable(hdev)) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_READ_SYNC_TRAIN_PARAMS, + 0, NULL, HCI_CMD_TIMEOUT); +} + +/* Enable Secure Connections if supported and configured */ +static int hci_write_sc_support_1_sync(struct hci_dev *hdev) +{ + u8 support = 0x01; + + if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED) || + !bredr_sc_enabled(hdev)) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_SC_SUPPORT, + sizeof(support), &support, + HCI_CMD_TIMEOUT); +} + +/* Set erroneous data reporting if supported to the wideband speech + * setting value + */ +static int hci_set_err_data_report_sync(struct hci_dev *hdev) +{ + struct hci_cp_write_def_err_data_reporting cp; + bool enabled = hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED); + + if (!(hdev->commands[18] & 0x08) || + !(hdev->features[0][6] & LMP_ERR_DATA_REPORTING) || + test_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks)) + return 0; + + if (enabled == hdev->err_data_reporting) + return 0; + + memset(&cp, 0, sizeof(cp)); + cp.err_data_reporting = enabled ? ERR_DATA_REPORTING_ENABLED : + ERR_DATA_REPORTING_DISABLED; + + return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +static const struct hci_init_stage hci_init4[] = { + /* HCI_OP_DELETE_STORED_LINK_KEY */ + HCI_INIT(hci_delete_stored_link_key_sync), + /* HCI_OP_SET_EVENT_MASK_PAGE_2 */ + HCI_INIT(hci_set_event_mask_page_2_sync), + /* HCI_OP_READ_LOCAL_CODECS */ + HCI_INIT(hci_read_local_codecs_sync), + /* HCI_OP_READ_LOCAL_PAIRING_OPTS */ + HCI_INIT(hci_read_local_pairing_opts_sync), + /* HCI_OP_GET_MWS_TRANSPORT_CONFIG */ + HCI_INIT(hci_get_mws_transport_config_sync), + /* HCI_OP_READ_SYNC_TRAIN_PARAMS */ + HCI_INIT(hci_read_sync_train_params_sync), + /* HCI_OP_WRITE_SC_SUPPORT */ + HCI_INIT(hci_write_sc_support_1_sync), + /* HCI_OP_WRITE_DEF_ERR_DATA_REPORTING */ + HCI_INIT(hci_set_err_data_report_sync), + {} +}; + +/* Set Suggested Default Data Length to maximum if supported */ +static int hci_le_set_write_def_data_len_sync(struct hci_dev *hdev) +{ + struct hci_cp_le_write_def_data_len cp; + + if (!(hdev->le_features[0] & HCI_LE_DATA_LEN_EXT)) + return 0; + + memset(&cp, 0, sizeof(cp)); + cp.tx_len = cpu_to_le16(hdev->le_max_tx_len); + cp.tx_time = cpu_to_le16(hdev->le_max_tx_time); + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_WRITE_DEF_DATA_LEN, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +/* Set Default PHY parameters if command is supported */ +static int hci_le_set_default_phy_sync(struct hci_dev *hdev) +{ + struct hci_cp_le_set_default_phy cp; + + if (!(hdev->commands[35] & 0x20)) + return 0; + + memset(&cp, 0, sizeof(cp)); + cp.all_phys = 0x00; + cp.tx_phys = hdev->le_tx_def_phys; + cp.rx_phys = hdev->le_rx_def_phys; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_DEFAULT_PHY, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +static const struct hci_init_stage le_init4[] = { + /* HCI_OP_LE_WRITE_DEF_DATA_LEN */ + HCI_INIT(hci_le_set_write_def_data_len_sync), + /* HCI_OP_LE_SET_DEFAULT_PHY */ + HCI_INIT(hci_le_set_default_phy_sync), + {} +}; + +static int hci_init4_sync(struct hci_dev *hdev) +{ + int err; + + bt_dev_dbg(hdev, ""); + + err = hci_init_stage_sync(hdev, hci_init4); + if (err) + return err; + + if (lmp_le_capable(hdev)) + return hci_init_stage_sync(hdev, le_init4); + + return 0; +} + +static int hci_init_sync(struct hci_dev *hdev) +{ + int err; + + err = hci_init1_sync(hdev); + if (err < 0) + return err; + + if (hci_dev_test_flag(hdev, HCI_SETUP)) + hci_debugfs_create_basic(hdev); + + err = hci_init2_sync(hdev); + if (err < 0) + return err; + + /* HCI_PRIMARY covers both single-mode LE, BR/EDR and dual-mode + * BR/EDR/LE type controllers. AMP controllers only need the + * first two stages of init. + */ + if (hdev->dev_type != HCI_PRIMARY) + return 0; + + err = hci_init3_sync(hdev); + if (err < 0) + return err; + + err = hci_init4_sync(hdev); + if (err < 0) + return err; + + /* This function is only called when the controller is actually in + * configured state. When the controller is marked as unconfigured, + * this initialization procedure is not run. + * + * It means that it is possible that a controller runs through its + * setup phase and then discovers missing settings. If that is the + * case, then this function will not be called. It then will only + * be called during the config phase. + * + * So only when in setup phase or config phase, create the debugfs + * entries and register the SMP channels. + */ + if (!hci_dev_test_flag(hdev, HCI_SETUP) && + !hci_dev_test_flag(hdev, HCI_CONFIG)) + return 0; + + if (hci_dev_test_and_set_flag(hdev, HCI_DEBUGFS_CREATED)) + return 0; + + hci_debugfs_create_common(hdev); + + if (lmp_bredr_capable(hdev)) + hci_debugfs_create_bredr(hdev); + + if (lmp_le_capable(hdev)) + hci_debugfs_create_le(hdev); + + return 0; +} + +#define HCI_QUIRK_BROKEN(_quirk, _desc) { HCI_QUIRK_BROKEN_##_quirk, _desc } + +static const struct { + unsigned long quirk; + const char *desc; +} hci_broken_table[] = { + HCI_QUIRK_BROKEN(LOCAL_COMMANDS, + "HCI Read Local Supported Commands not supported"), + HCI_QUIRK_BROKEN(STORED_LINK_KEY, + "HCI Delete Stored Link Key command is advertised, " + "but not supported."), + HCI_QUIRK_BROKEN(ERR_DATA_REPORTING, + "HCI Read Default Erroneous Data Reporting command is " + "advertised, but not supported."), + HCI_QUIRK_BROKEN(READ_TRANSMIT_POWER, + "HCI Read Transmit Power Level command is advertised, " + "but not supported."), + HCI_QUIRK_BROKEN(FILTER_CLEAR_ALL, + "HCI Set Event Filter command not supported."), + HCI_QUIRK_BROKEN(ENHANCED_SETUP_SYNC_CONN, + "HCI Enhanced Setup Synchronous Connection command is " + "advertised, but not supported."), + HCI_QUIRK_BROKEN(SET_RPA_TIMEOUT, + "HCI LE Set Random Private Address Timeout command is " + "advertised, but not supported.") +}; + +/* This function handles hdev setup stage: + * + * Calls hdev->setup + * Setup address if HCI_QUIRK_USE_BDADDR_PROPERTY is set. + */ +static int hci_dev_setup_sync(struct hci_dev *hdev) +{ + int ret = 0; + bool invalid_bdaddr; + size_t i; + + if (!hci_dev_test_flag(hdev, HCI_SETUP) && + !test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) + return 0; + + bt_dev_dbg(hdev, ""); + + hci_sock_dev_event(hdev, HCI_DEV_SETUP); + + if (hdev->setup) + ret = hdev->setup(hdev); + + for (i = 0; i < ARRAY_SIZE(hci_broken_table); i++) { + if (test_bit(hci_broken_table[i].quirk, &hdev->quirks)) + bt_dev_warn(hdev, "%s", hci_broken_table[i].desc); + } + + /* The transport driver can set the quirk to mark the + * BD_ADDR invalid before creating the HCI device or in + * its setup callback. + */ + invalid_bdaddr = test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks); + + if (!ret) { + if (test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks) && + !bacmp(&hdev->public_addr, BDADDR_ANY)) + hci_dev_get_bd_addr_from_property(hdev); + + if ((invalid_bdaddr || + test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) && + bacmp(&hdev->public_addr, BDADDR_ANY) && + hdev->set_bdaddr) { + ret = hdev->set_bdaddr(hdev, &hdev->public_addr); + if (!ret) + invalid_bdaddr = false; + } + } + + /* The transport driver can set these quirks before + * creating the HCI device or in its setup callback. + * + * For the invalid BD_ADDR quirk it is possible that + * it becomes a valid address if the bootloader does + * provide it (see above). + * + * In case any of them is set, the controller has to + * start up as unconfigured. + */ + if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) || + invalid_bdaddr) + hci_dev_set_flag(hdev, HCI_UNCONFIGURED); + + /* For an unconfigured controller it is required to + * read at least the version information provided by + * the Read Local Version Information command. + * + * If the set_bdaddr driver callback is provided, then + * also the original Bluetooth public device address + * will be read using the Read BD Address command. + */ + if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) + return hci_unconf_init_sync(hdev); + + return ret; +} + +/* This function handles hdev init stage: + * + * Calls hci_dev_setup_sync to perform setup stage + * Calls hci_init_sync to perform HCI command init sequence + */ +static int hci_dev_init_sync(struct hci_dev *hdev) +{ + int ret; + + bt_dev_dbg(hdev, ""); + + atomic_set(&hdev->cmd_cnt, 1); + set_bit(HCI_INIT, &hdev->flags); + + ret = hci_dev_setup_sync(hdev); + + if (hci_dev_test_flag(hdev, HCI_CONFIG)) { + /* If public address change is configured, ensure that + * the address gets programmed. If the driver does not + * support changing the public address, fail the power + * on procedure. + */ + if (bacmp(&hdev->public_addr, BDADDR_ANY) && + hdev->set_bdaddr) + ret = hdev->set_bdaddr(hdev, &hdev->public_addr); + else + ret = -EADDRNOTAVAIL; + } + + if (!ret) { + if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) && + !hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { + ret = hci_init_sync(hdev); + if (!ret && hdev->post_init) + ret = hdev->post_init(hdev); + } + } + + /* If the HCI Reset command is clearing all diagnostic settings, + * then they need to be reprogrammed after the init procedure + * completed. + */ + if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) && + !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && + hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag) + ret = hdev->set_diag(hdev, true); + + if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { + msft_do_open(hdev); + aosp_do_open(hdev); + } + + clear_bit(HCI_INIT, &hdev->flags); + + return ret; +} + +int hci_dev_open_sync(struct hci_dev *hdev) +{ + int ret; + + bt_dev_dbg(hdev, ""); + + if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) { + ret = -ENODEV; + goto done; + } + + if (!hci_dev_test_flag(hdev, HCI_SETUP) && + !hci_dev_test_flag(hdev, HCI_CONFIG)) { + /* Check for rfkill but allow the HCI setup stage to + * proceed (which in itself doesn't cause any RF activity). + */ + if (hci_dev_test_flag(hdev, HCI_RFKILLED)) { + ret = -ERFKILL; + goto done; + } + + /* Check for valid public address or a configured static + * random address, but let the HCI setup proceed to + * be able to determine if there is a public address + * or not. + * + * In case of user channel usage, it is not important + * if a public address or static random address is + * available. + * + * This check is only valid for BR/EDR controllers + * since AMP controllers do not have an address. + */ + if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && + hdev->dev_type == HCI_PRIMARY && + !bacmp(&hdev->bdaddr, BDADDR_ANY) && + !bacmp(&hdev->static_addr, BDADDR_ANY)) { + ret = -EADDRNOTAVAIL; + goto done; + } + } + + if (test_bit(HCI_UP, &hdev->flags)) { + ret = -EALREADY; + goto done; + } + + if (hdev->open(hdev)) { + ret = -EIO; + goto done; + } + + set_bit(HCI_RUNNING, &hdev->flags); + hci_sock_dev_event(hdev, HCI_DEV_OPEN); + + ret = hci_dev_init_sync(hdev); + if (!ret) { + hci_dev_hold(hdev); + hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); + hci_adv_instances_set_rpa_expired(hdev, true); + set_bit(HCI_UP, &hdev->flags); + hci_sock_dev_event(hdev, HCI_DEV_UP); + hci_leds_update_powered(hdev, true); + if (!hci_dev_test_flag(hdev, HCI_SETUP) && + !hci_dev_test_flag(hdev, HCI_CONFIG) && + !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) && + !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && + hci_dev_test_flag(hdev, HCI_MGMT) && + hdev->dev_type == HCI_PRIMARY) { + ret = hci_powered_update_sync(hdev); + mgmt_power_on(hdev, ret); + } + } else { + /* Init failed, cleanup */ + flush_work(&hdev->tx_work); + + /* Since hci_rx_work() is possible to awake new cmd_work + * it should be flushed first to avoid unexpected call of + * hci_cmd_work() + */ + flush_work(&hdev->rx_work); + flush_work(&hdev->cmd_work); + + skb_queue_purge(&hdev->cmd_q); + skb_queue_purge(&hdev->rx_q); + + if (hdev->flush) + hdev->flush(hdev); + + if (hdev->sent_cmd) { + cancel_delayed_work_sync(&hdev->cmd_timer); + kfree_skb(hdev->sent_cmd); + hdev->sent_cmd = NULL; + } + + clear_bit(HCI_RUNNING, &hdev->flags); + hci_sock_dev_event(hdev, HCI_DEV_CLOSE); + + hdev->close(hdev); + hdev->flags &= BIT(HCI_RAW); + } + +done: + return ret; +} + +/* This function requires the caller holds hdev->lock */ +static void hci_pend_le_actions_clear(struct hci_dev *hdev) +{ + struct hci_conn_params *p; + + list_for_each_entry(p, &hdev->le_conn_params, list) { + hci_pend_le_list_del_init(p); + if (p->conn) { + hci_conn_drop(p->conn); + hci_conn_put(p->conn); + p->conn = NULL; + } + } + + BT_DBG("All LE pending actions cleared"); +} + +static int hci_dev_shutdown(struct hci_dev *hdev) +{ + int err = 0; + /* Similar to how we first do setup and then set the exclusive access + * bit for userspace, we must first unset userchannel and then clean up. + * Otherwise, the kernel can't properly use the hci channel to clean up + * the controller (some shutdown routines require sending additional + * commands to the controller for example). + */ + bool was_userchannel = + hci_dev_test_and_clear_flag(hdev, HCI_USER_CHANNEL); + + if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) && + test_bit(HCI_UP, &hdev->flags)) { + /* Execute vendor specific shutdown routine */ + if (hdev->shutdown) + err = hdev->shutdown(hdev); + } + + if (was_userchannel) + hci_dev_set_flag(hdev, HCI_USER_CHANNEL); + + return err; +} + +int hci_dev_close_sync(struct hci_dev *hdev) +{ + bool auto_off; + int err = 0; + + bt_dev_dbg(hdev, ""); + + cancel_delayed_work(&hdev->power_off); + cancel_delayed_work(&hdev->ncmd_timer); + cancel_delayed_work(&hdev->le_scan_disable); + cancel_delayed_work(&hdev->le_scan_restart); + + hci_request_cancel_all(hdev); + + if (hdev->adv_instance_timeout) { + cancel_delayed_work_sync(&hdev->adv_instance_expire); + hdev->adv_instance_timeout = 0; + } + + err = hci_dev_shutdown(hdev); + + if (!test_and_clear_bit(HCI_UP, &hdev->flags)) { + cancel_delayed_work_sync(&hdev->cmd_timer); + return err; + } + + hci_leds_update_powered(hdev, false); + + /* Flush RX and TX works */ + flush_work(&hdev->tx_work); + flush_work(&hdev->rx_work); + + if (hdev->discov_timeout > 0) { + hdev->discov_timeout = 0; + hci_dev_clear_flag(hdev, HCI_DISCOVERABLE); + hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); + } + + if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) + cancel_delayed_work(&hdev->service_cache); + + if (hci_dev_test_flag(hdev, HCI_MGMT)) { + struct adv_info *adv_instance; + + cancel_delayed_work_sync(&hdev->rpa_expired); + + list_for_each_entry(adv_instance, &hdev->adv_instances, list) + cancel_delayed_work_sync(&adv_instance->rpa_expired_cb); + } + + /* Avoid potential lockdep warnings from the *_flush() calls by + * ensuring the workqueue is empty up front. + */ + drain_workqueue(hdev->workqueue); + + hci_dev_lock(hdev); + + hci_discovery_set_state(hdev, DISCOVERY_STOPPED); + + auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF); + + if (!auto_off && hdev->dev_type == HCI_PRIMARY && + !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && + hci_dev_test_flag(hdev, HCI_MGMT)) + __mgmt_power_off(hdev); + + hci_inquiry_cache_flush(hdev); + hci_pend_le_actions_clear(hdev); + hci_conn_hash_flush(hdev); + /* Prevent data races on hdev->smp_data or hdev->smp_bredr_data */ + smp_unregister(hdev); + hci_dev_unlock(hdev); + + hci_sock_dev_event(hdev, HCI_DEV_DOWN); + + if (!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) { + aosp_do_close(hdev); + msft_do_close(hdev); + } + + if (hdev->flush) + hdev->flush(hdev); + + /* Reset device */ + skb_queue_purge(&hdev->cmd_q); + atomic_set(&hdev->cmd_cnt, 1); + if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) && + !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { + set_bit(HCI_INIT, &hdev->flags); + hci_reset_sync(hdev); + clear_bit(HCI_INIT, &hdev->flags); + } + + /* flush cmd work */ + flush_work(&hdev->cmd_work); + + /* Drop queues */ + skb_queue_purge(&hdev->rx_q); + skb_queue_purge(&hdev->cmd_q); + skb_queue_purge(&hdev->raw_q); + + /* Drop last sent command */ + if (hdev->sent_cmd) { + cancel_delayed_work_sync(&hdev->cmd_timer); + kfree_skb(hdev->sent_cmd); + hdev->sent_cmd = NULL; + } + + clear_bit(HCI_RUNNING, &hdev->flags); + hci_sock_dev_event(hdev, HCI_DEV_CLOSE); + + /* After this point our queues are empty and no tasks are scheduled. */ + hdev->close(hdev); + + /* Clear flags */ + hdev->flags &= BIT(HCI_RAW); + hci_dev_clear_volatile_flags(hdev); + + /* Controller radio is available but is currently powered down */ + hdev->amp_status = AMP_STATUS_POWERED_DOWN; + + memset(hdev->eir, 0, sizeof(hdev->eir)); + memset(hdev->dev_class, 0, sizeof(hdev->dev_class)); + bacpy(&hdev->random_addr, BDADDR_ANY); + hci_codec_list_clear(&hdev->local_codecs); + + hci_dev_put(hdev); + return err; +} + +/* This function perform power on HCI command sequence as follows: + * + * If controller is already up (HCI_UP) performs hci_powered_update_sync + * sequence otherwise run hci_dev_open_sync which will follow with + * hci_powered_update_sync after the init sequence is completed. + */ +static int hci_power_on_sync(struct hci_dev *hdev) +{ + int err; + + if (test_bit(HCI_UP, &hdev->flags) && + hci_dev_test_flag(hdev, HCI_MGMT) && + hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) { + cancel_delayed_work(&hdev->power_off); + return hci_powered_update_sync(hdev); + } + + err = hci_dev_open_sync(hdev); + if (err < 0) + return err; + + /* During the HCI setup phase, a few error conditions are + * ignored and they need to be checked now. If they are still + * valid, it is important to return the device back off. + */ + if (hci_dev_test_flag(hdev, HCI_RFKILLED) || + hci_dev_test_flag(hdev, HCI_UNCONFIGURED) || + (hdev->dev_type == HCI_PRIMARY && + !bacmp(&hdev->bdaddr, BDADDR_ANY) && + !bacmp(&hdev->static_addr, BDADDR_ANY))) { + hci_dev_clear_flag(hdev, HCI_AUTO_OFF); + hci_dev_close_sync(hdev); + } else if (hci_dev_test_flag(hdev, HCI_AUTO_OFF)) { + queue_delayed_work(hdev->req_workqueue, &hdev->power_off, + HCI_AUTO_OFF_TIMEOUT); + } + + if (hci_dev_test_and_clear_flag(hdev, HCI_SETUP)) { + /* For unconfigured devices, set the HCI_RAW flag + * so that userspace can easily identify them. + */ + if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) + set_bit(HCI_RAW, &hdev->flags); + + /* For fully configured devices, this will send + * the Index Added event. For unconfigured devices, + * it will send Unconfigued Index Added event. + * + * Devices with HCI_QUIRK_RAW_DEVICE are ignored + * and no event will be send. + */ + mgmt_index_added(hdev); + } else if (hci_dev_test_and_clear_flag(hdev, HCI_CONFIG)) { + /* When the controller is now configured, then it + * is important to clear the HCI_RAW flag. + */ + if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) + clear_bit(HCI_RAW, &hdev->flags); + + /* Powering on the controller with HCI_CONFIG set only + * happens with the transition from unconfigured to + * configured. This will send the Index Added event. + */ + mgmt_index_added(hdev); + } + + return 0; +} + +static int hci_remote_name_cancel_sync(struct hci_dev *hdev, bdaddr_t *addr) +{ + struct hci_cp_remote_name_req_cancel cp; + + memset(&cp, 0, sizeof(cp)); + bacpy(&cp.bdaddr, addr); + + return __hci_cmd_sync_status(hdev, HCI_OP_REMOTE_NAME_REQ_CANCEL, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +int hci_stop_discovery_sync(struct hci_dev *hdev) +{ + struct discovery_state *d = &hdev->discovery; + struct inquiry_entry *e; + int err; + + bt_dev_dbg(hdev, "state %u", hdev->discovery.state); + + if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) { + if (test_bit(HCI_INQUIRY, &hdev->flags)) { + err = __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY_CANCEL, + 0, NULL, HCI_CMD_TIMEOUT); + if (err) + return err; + } + + if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) { + cancel_delayed_work(&hdev->le_scan_disable); + cancel_delayed_work(&hdev->le_scan_restart); + + err = hci_scan_disable_sync(hdev); + if (err) + return err; + } + + } else { + err = hci_scan_disable_sync(hdev); + if (err) + return err; + } + + /* Resume advertising if it was paused */ + if (use_ll_privacy(hdev)) + hci_resume_advertising_sync(hdev); + + /* No further actions needed for LE-only discovery */ + if (d->type == DISCOV_TYPE_LE) + return 0; + + if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) { + e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, + NAME_PENDING); + if (!e) + return 0; + + return hci_remote_name_cancel_sync(hdev, &e->data.bdaddr); + } + + return 0; +} + +static int hci_disconnect_phy_link_sync(struct hci_dev *hdev, u16 handle, + u8 reason) +{ + struct hci_cp_disconn_phy_link cp; + + memset(&cp, 0, sizeof(cp)); + cp.phy_handle = HCI_PHY_HANDLE(handle); + cp.reason = reason; + + return __hci_cmd_sync_status(hdev, HCI_OP_DISCONN_PHY_LINK, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +static int hci_disconnect_sync(struct hci_dev *hdev, struct hci_conn *conn, + u8 reason) +{ + struct hci_cp_disconnect cp; + + if (conn->type == AMP_LINK) + return hci_disconnect_phy_link_sync(hdev, conn->handle, reason); + + memset(&cp, 0, sizeof(cp)); + cp.handle = cpu_to_le16(conn->handle); + cp.reason = reason; + + /* Wait for HCI_EV_DISCONN_COMPLETE not HCI_EV_CMD_STATUS when not + * suspending. + */ + if (!hdev->suspended) + return __hci_cmd_sync_status_sk(hdev, HCI_OP_DISCONNECT, + sizeof(cp), &cp, + HCI_EV_DISCONN_COMPLETE, + HCI_CMD_TIMEOUT, NULL); + + return __hci_cmd_sync_status(hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp, + HCI_CMD_TIMEOUT); +} + +static int hci_le_connect_cancel_sync(struct hci_dev *hdev, + struct hci_conn *conn) +{ + if (test_bit(HCI_CONN_SCANNING, &conn->flags)) + return 0; + + if (test_and_set_bit(HCI_CONN_CANCEL, &conn->flags)) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_CREATE_CONN_CANCEL, + 0, NULL, HCI_CMD_TIMEOUT); +} + +static int hci_connect_cancel_sync(struct hci_dev *hdev, struct hci_conn *conn) +{ + if (conn->type == LE_LINK) + return hci_le_connect_cancel_sync(hdev, conn); + + if (hdev->hci_ver < BLUETOOTH_VER_1_2) + return 0; + + return __hci_cmd_sync_status(hdev, HCI_OP_CREATE_CONN_CANCEL, + 6, &conn->dst, HCI_CMD_TIMEOUT); +} + +static int hci_reject_sco_sync(struct hci_dev *hdev, struct hci_conn *conn, + u8 reason) +{ + struct hci_cp_reject_sync_conn_req cp; + + memset(&cp, 0, sizeof(cp)); + bacpy(&cp.bdaddr, &conn->dst); + cp.reason = reason; + + /* SCO rejection has its own limited set of + * allowed error values (0x0D-0x0F). + */ + if (reason < 0x0d || reason > 0x0f) + cp.reason = HCI_ERROR_REJ_LIMITED_RESOURCES; + + return __hci_cmd_sync_status(hdev, HCI_OP_REJECT_SYNC_CONN_REQ, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +static int hci_reject_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, + u8 reason) +{ + struct hci_cp_reject_conn_req cp; + + if (conn->type == SCO_LINK || conn->type == ESCO_LINK) + return hci_reject_sco_sync(hdev, conn, reason); + + memset(&cp, 0, sizeof(cp)); + bacpy(&cp.bdaddr, &conn->dst); + cp.reason = reason; + + return __hci_cmd_sync_status(hdev, HCI_OP_REJECT_CONN_REQ, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason) +{ + int err; + + switch (conn->state) { + case BT_CONNECTED: + case BT_CONFIG: + return hci_disconnect_sync(hdev, conn, reason); + case BT_CONNECT: + err = hci_connect_cancel_sync(hdev, conn); + /* Cleanup hci_conn object if it cannot be cancelled as it + * likelly means the controller and host stack are out of sync. + */ + if (err) { + hci_dev_lock(hdev); + hci_conn_failed(conn, err); + hci_dev_unlock(hdev); + } + return err; + case BT_CONNECT2: + return hci_reject_conn_sync(hdev, conn, reason); + default: + conn->state = BT_CLOSED; + break; + } + + return 0; +} + +static int hci_disconnect_all_sync(struct hci_dev *hdev, u8 reason) +{ + struct hci_conn *conn, *tmp; + int err; + + list_for_each_entry_safe(conn, tmp, &hdev->conn_hash.list, list) { + err = hci_abort_conn_sync(hdev, conn, reason); + if (err) + return err; + } + + return 0; +} + +/* This function perform power off HCI command sequence as follows: + * + * Clear Advertising + * Stop Discovery + * Disconnect all connections + * hci_dev_close_sync + */ +static int hci_power_off_sync(struct hci_dev *hdev) +{ + int err; + + /* If controller is already down there is nothing to do */ + if (!test_bit(HCI_UP, &hdev->flags)) + return 0; + + if (test_bit(HCI_ISCAN, &hdev->flags) || + test_bit(HCI_PSCAN, &hdev->flags)) { + err = hci_write_scan_enable_sync(hdev, 0x00); + if (err) + return err; + } + + err = hci_clear_adv_sync(hdev, NULL, false); + if (err) + return err; + + err = hci_stop_discovery_sync(hdev); + if (err) + return err; + + /* Terminated due to Power Off */ + err = hci_disconnect_all_sync(hdev, HCI_ERROR_REMOTE_POWER_OFF); + if (err) + return err; + + return hci_dev_close_sync(hdev); +} + +int hci_set_powered_sync(struct hci_dev *hdev, u8 val) +{ + if (val) + return hci_power_on_sync(hdev); + + return hci_power_off_sync(hdev); +} + +static int hci_write_iac_sync(struct hci_dev *hdev) +{ + struct hci_cp_write_current_iac_lap cp; + + if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) + return 0; + + memset(&cp, 0, sizeof(cp)); + + if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) { + /* Limited discoverable mode */ + cp.num_iac = min_t(u8, hdev->num_iac, 2); + cp.iac_lap[0] = 0x00; /* LIAC */ + cp.iac_lap[1] = 0x8b; + cp.iac_lap[2] = 0x9e; + cp.iac_lap[3] = 0x33; /* GIAC */ + cp.iac_lap[4] = 0x8b; + cp.iac_lap[5] = 0x9e; + } else { + /* General discoverable mode */ + cp.num_iac = 1; + cp.iac_lap[0] = 0x33; /* GIAC */ + cp.iac_lap[1] = 0x8b; + cp.iac_lap[2] = 0x9e; + } + + return __hci_cmd_sync_status(hdev, HCI_OP_WRITE_CURRENT_IAC_LAP, + (cp.num_iac * 3) + 1, &cp, + HCI_CMD_TIMEOUT); +} + +int hci_update_discoverable_sync(struct hci_dev *hdev) +{ + int err = 0; + + if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { + err = hci_write_iac_sync(hdev); + if (err) + return err; + + err = hci_update_scan_sync(hdev); + if (err) + return err; + + err = hci_update_class_sync(hdev); + if (err) + return err; + } + + /* Advertising instances don't use the global discoverable setting, so + * only update AD if advertising was enabled using Set Advertising. + */ + if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) { + err = hci_update_adv_data_sync(hdev, 0x00); + if (err) + return err; + + /* Discoverable mode affects the local advertising + * address in limited privacy mode. + */ + if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) { + if (ext_adv_capable(hdev)) + err = hci_start_ext_adv_sync(hdev, 0x00); + else + err = hci_enable_advertising_sync(hdev); + } + } + + return err; +} + +static int update_discoverable_sync(struct hci_dev *hdev, void *data) +{ + return hci_update_discoverable_sync(hdev); +} + +int hci_update_discoverable(struct hci_dev *hdev) +{ + /* Only queue if it would have any effect */ + if (hdev_is_powered(hdev) && + hci_dev_test_flag(hdev, HCI_ADVERTISING) && + hci_dev_test_flag(hdev, HCI_DISCOVERABLE) && + hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) + return hci_cmd_sync_queue(hdev, update_discoverable_sync, NULL, + NULL); + + return 0; +} + +int hci_update_connectable_sync(struct hci_dev *hdev) +{ + int err; + + err = hci_update_scan_sync(hdev); + if (err) + return err; + + /* If BR/EDR is not enabled and we disable advertising as a + * by-product of disabling connectable, we need to update the + * advertising flags. + */ + if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) + err = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance); + + /* Update the advertising parameters if necessary */ + if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || + !list_empty(&hdev->adv_instances)) { + if (ext_adv_capable(hdev)) + err = hci_start_ext_adv_sync(hdev, + hdev->cur_adv_instance); + else + err = hci_enable_advertising_sync(hdev); + + if (err) + return err; + } + + return hci_update_passive_scan_sync(hdev); +} + +static int hci_inquiry_sync(struct hci_dev *hdev, u8 length) +{ + const u8 giac[3] = { 0x33, 0x8b, 0x9e }; + const u8 liac[3] = { 0x00, 0x8b, 0x9e }; + struct hci_cp_inquiry cp; + + bt_dev_dbg(hdev, ""); + + if (hci_dev_test_flag(hdev, HCI_INQUIRY)) + return 0; + + hci_dev_lock(hdev); + hci_inquiry_cache_flush(hdev); + hci_dev_unlock(hdev); + + memset(&cp, 0, sizeof(cp)); + + if (hdev->discovery.limited) + memcpy(&cp.lap, liac, sizeof(cp.lap)); + else + memcpy(&cp.lap, giac, sizeof(cp.lap)); + + cp.length = length; + + return __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +static int hci_active_scan_sync(struct hci_dev *hdev, uint16_t interval) +{ + u8 own_addr_type; + /* Accept list is not used for discovery */ + u8 filter_policy = 0x00; + /* Default is to enable duplicates filter */ + u8 filter_dup = LE_SCAN_FILTER_DUP_ENABLE; + int err; + + bt_dev_dbg(hdev, ""); + + /* If controller is scanning, it means the passive scanning is + * running. Thus, we should temporarily stop it in order to set the + * discovery scanning parameters. + */ + err = hci_scan_disable_sync(hdev); + if (err) { + bt_dev_err(hdev, "Unable to disable scanning: %d", err); + return err; + } + + cancel_interleave_scan(hdev); + + /* Pause address resolution for active scan and stop advertising if + * privacy is enabled. + */ + err = hci_pause_addr_resolution(hdev); + if (err) + goto failed; + + /* All active scans will be done with either a resolvable private + * address (when privacy feature has been enabled) or non-resolvable + * private address. + */ + err = hci_update_random_address_sync(hdev, true, scan_use_rpa(hdev), + &own_addr_type); + if (err < 0) + own_addr_type = ADDR_LE_DEV_PUBLIC; + + if (hci_is_adv_monitoring(hdev)) { + /* Duplicate filter should be disabled when some advertisement + * monitor is activated, otherwise AdvMon can only receive one + * advertisement for one peer(*) during active scanning, and + * might report loss to these peers. + * + * Note that different controllers have different meanings of + * |duplicate|. Some of them consider packets with the same + * address as duplicate, and others consider packets with the + * same address and the same RSSI as duplicate. Although in the + * latter case we don't need to disable duplicate filter, but + * it is common to have active scanning for a short period of + * time, the power impact should be neglectable. + */ + filter_dup = LE_SCAN_FILTER_DUP_DISABLE; + } + + err = hci_start_scan_sync(hdev, LE_SCAN_ACTIVE, interval, + hdev->le_scan_window_discovery, + own_addr_type, filter_policy, filter_dup); + if (!err) + return err; + +failed: + /* Resume advertising if it was paused */ + if (use_ll_privacy(hdev)) + hci_resume_advertising_sync(hdev); + + /* Resume passive scanning */ + hci_update_passive_scan_sync(hdev); + return err; +} + +static int hci_start_interleaved_discovery_sync(struct hci_dev *hdev) +{ + int err; + + bt_dev_dbg(hdev, ""); + + err = hci_active_scan_sync(hdev, hdev->le_scan_int_discovery * 2); + if (err) + return err; + + return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN); +} + +int hci_start_discovery_sync(struct hci_dev *hdev) +{ + unsigned long timeout; + int err; + + bt_dev_dbg(hdev, "type %u", hdev->discovery.type); + + switch (hdev->discovery.type) { + case DISCOV_TYPE_BREDR: + return hci_inquiry_sync(hdev, DISCOV_BREDR_INQUIRY_LEN); + case DISCOV_TYPE_INTERLEAVED: + /* When running simultaneous discovery, the LE scanning time + * should occupy the whole discovery time sine BR/EDR inquiry + * and LE scanning are scheduled by the controller. + * + * For interleaving discovery in comparison, BR/EDR inquiry + * and LE scanning are done sequentially with separate + * timeouts. + */ + if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, + &hdev->quirks)) { + timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT); + /* During simultaneous discovery, we double LE scan + * interval. We must leave some time for the controller + * to do BR/EDR inquiry. + */ + err = hci_start_interleaved_discovery_sync(hdev); + break; + } + + timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout); + err = hci_active_scan_sync(hdev, hdev->le_scan_int_discovery); + break; + case DISCOV_TYPE_LE: + timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT); + err = hci_active_scan_sync(hdev, hdev->le_scan_int_discovery); + break; + default: + return -EINVAL; + } + + if (err) + return err; + + bt_dev_dbg(hdev, "timeout %u ms", jiffies_to_msecs(timeout)); + + /* When service discovery is used and the controller has a + * strict duplicate filter, it is important to remember the + * start and duration of the scan. This is required for + * restarting scanning during the discovery phase. + */ + if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) && + hdev->discovery.result_filtering) { + hdev->discovery.scan_start = jiffies; + hdev->discovery.scan_duration = timeout; + } + + queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable, + timeout); + return 0; +} + +static void hci_suspend_monitor_sync(struct hci_dev *hdev) +{ + switch (hci_get_adv_monitor_offload_ext(hdev)) { + case HCI_ADV_MONITOR_EXT_MSFT: + msft_suspend_sync(hdev); + break; + default: + return; + } +} + +/* This function disables discovery and mark it as paused */ +static int hci_pause_discovery_sync(struct hci_dev *hdev) +{ + int old_state = hdev->discovery.state; + int err; + + /* If discovery already stopped/stopping/paused there nothing to do */ + if (old_state == DISCOVERY_STOPPED || old_state == DISCOVERY_STOPPING || + hdev->discovery_paused) + return 0; + + hci_discovery_set_state(hdev, DISCOVERY_STOPPING); + err = hci_stop_discovery_sync(hdev); + if (err) + return err; + + hdev->discovery_paused = true; + hdev->discovery_old_state = old_state; + hci_discovery_set_state(hdev, DISCOVERY_STOPPED); + + return 0; +} + +static int hci_update_event_filter_sync(struct hci_dev *hdev) +{ + struct bdaddr_list_with_flags *b; + u8 scan = SCAN_DISABLED; + bool scanning = test_bit(HCI_PSCAN, &hdev->flags); + int err; + + if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) + return 0; + + /* Some fake CSR controllers lock up after setting this type of + * filter, so avoid sending the request altogether. + */ + if (test_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks)) + return 0; + + /* Always clear event filter when starting */ + hci_clear_event_filter_sync(hdev); + + list_for_each_entry(b, &hdev->accept_list, list) { + if (!(b->flags & HCI_CONN_FLAG_REMOTE_WAKEUP)) + continue; + + bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr); + + err = hci_set_event_filter_sync(hdev, HCI_FLT_CONN_SETUP, + HCI_CONN_SETUP_ALLOW_BDADDR, + &b->bdaddr, + HCI_CONN_SETUP_AUTO_ON); + if (err) + bt_dev_dbg(hdev, "Failed to set event filter for %pMR", + &b->bdaddr); + else + scan = SCAN_PAGE; + } + + if (scan && !scanning) + hci_write_scan_enable_sync(hdev, scan); + else if (!scan && scanning) + hci_write_scan_enable_sync(hdev, scan); + + return 0; +} + +/* This function disables scan (BR and LE) and mark it as paused */ +static int hci_pause_scan_sync(struct hci_dev *hdev) +{ + if (hdev->scanning_paused) + return 0; + + /* Disable page scan if enabled */ + if (test_bit(HCI_PSCAN, &hdev->flags)) + hci_write_scan_enable_sync(hdev, SCAN_DISABLED); + + hci_scan_disable_sync(hdev); + + hdev->scanning_paused = true; + + return 0; +} + +/* This function performs the HCI suspend procedures in the follow order: + * + * Pause discovery (active scanning/inquiry) + * Pause Directed Advertising/Advertising + * Pause Scanning (passive scanning in case discovery was not active) + * Disconnect all connections + * Set suspend_status to BT_SUSPEND_DISCONNECT if hdev cannot wakeup + * otherwise: + * Update event mask (only set events that are allowed to wake up the host) + * Update event filter (with devices marked with HCI_CONN_FLAG_REMOTE_WAKEUP) + * Update passive scanning (lower duty cycle) + * Set suspend_status to BT_SUSPEND_CONFIGURE_WAKE + */ +int hci_suspend_sync(struct hci_dev *hdev) +{ + int err; + + /* If marked as suspended there nothing to do */ + if (hdev->suspended) + return 0; + + /* Mark device as suspended */ + hdev->suspended = true; + + /* Pause discovery if not already stopped */ + hci_pause_discovery_sync(hdev); + + /* Pause other advertisements */ + hci_pause_advertising_sync(hdev); + + /* Suspend monitor filters */ + hci_suspend_monitor_sync(hdev); + + /* Prevent disconnects from causing scanning to be re-enabled */ + hci_pause_scan_sync(hdev); + + if (hci_conn_count(hdev)) { + /* Soft disconnect everything (power off) */ + err = hci_disconnect_all_sync(hdev, HCI_ERROR_REMOTE_POWER_OFF); + if (err) { + /* Set state to BT_RUNNING so resume doesn't notify */ + hdev->suspend_state = BT_RUNNING; + hci_resume_sync(hdev); + return err; + } + + /* Update event mask so only the allowed event can wakeup the + * host. + */ + hci_set_event_mask_sync(hdev); + } + + /* Only configure accept list if disconnect succeeded and wake + * isn't being prevented. + */ + if (!hdev->wakeup || !hdev->wakeup(hdev)) { + hdev->suspend_state = BT_SUSPEND_DISCONNECT; + return 0; + } + + /* Unpause to take care of updating scanning params */ + hdev->scanning_paused = false; + + /* Enable event filter for paired devices */ + hci_update_event_filter_sync(hdev); + + /* Update LE passive scan if enabled */ + hci_update_passive_scan_sync(hdev); + + /* Pause scan changes again. */ + hdev->scanning_paused = true; + + hdev->suspend_state = BT_SUSPEND_CONFIGURE_WAKE; + + return 0; +} + +/* This function resumes discovery */ +static int hci_resume_discovery_sync(struct hci_dev *hdev) +{ + int err; + + /* If discovery not paused there nothing to do */ + if (!hdev->discovery_paused) + return 0; + + hdev->discovery_paused = false; + + hci_discovery_set_state(hdev, DISCOVERY_STARTING); + + err = hci_start_discovery_sync(hdev); + + hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED : + DISCOVERY_FINDING); + + return err; +} + +static void hci_resume_monitor_sync(struct hci_dev *hdev) +{ + switch (hci_get_adv_monitor_offload_ext(hdev)) { + case HCI_ADV_MONITOR_EXT_MSFT: + msft_resume_sync(hdev); + break; + default: + return; + } +} + +/* This function resume scan and reset paused flag */ +static int hci_resume_scan_sync(struct hci_dev *hdev) +{ + if (!hdev->scanning_paused) + return 0; + + hdev->scanning_paused = false; + + hci_update_scan_sync(hdev); + + /* Reset passive scanning to normal */ + hci_update_passive_scan_sync(hdev); + + return 0; +} + +/* This function performs the HCI suspend procedures in the follow order: + * + * Restore event mask + * Clear event filter + * Update passive scanning (normal duty cycle) + * Resume Directed Advertising/Advertising + * Resume discovery (active scanning/inquiry) + */ +int hci_resume_sync(struct hci_dev *hdev) +{ + /* If not marked as suspended there nothing to do */ + if (!hdev->suspended) + return 0; + + hdev->suspended = false; + + /* Restore event mask */ + hci_set_event_mask_sync(hdev); + + /* Clear any event filters and restore scan state */ + hci_clear_event_filter_sync(hdev); + + /* Resume scanning */ + hci_resume_scan_sync(hdev); + + /* Resume monitor filters */ + hci_resume_monitor_sync(hdev); + + /* Resume other advertisements */ + hci_resume_advertising_sync(hdev); + + /* Resume discovery */ + hci_resume_discovery_sync(hdev); + + return 0; +} + +static bool conn_use_rpa(struct hci_conn *conn) +{ + struct hci_dev *hdev = conn->hdev; + + return hci_dev_test_flag(hdev, HCI_PRIVACY); +} + +static int hci_le_ext_directed_advertising_sync(struct hci_dev *hdev, + struct hci_conn *conn) +{ + struct hci_cp_le_set_ext_adv_params cp; + int err; + bdaddr_t random_addr; + u8 own_addr_type; + + err = hci_update_random_address_sync(hdev, false, conn_use_rpa(conn), + &own_addr_type); + if (err) + return err; + + /* Set require_privacy to false so that the remote device has a + * chance of identifying us. + */ + err = hci_get_random_address(hdev, false, conn_use_rpa(conn), NULL, + &own_addr_type, &random_addr); + if (err) + return err; + + memset(&cp, 0, sizeof(cp)); + + cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_DIRECT_IND); + cp.own_addr_type = own_addr_type; + cp.channel_map = hdev->le_adv_channel_map; + cp.tx_power = HCI_TX_POWER_INVALID; + cp.primary_phy = HCI_ADV_PHY_1M; + cp.secondary_phy = HCI_ADV_PHY_1M; + cp.handle = 0x00; /* Use instance 0 for directed adv */ + cp.own_addr_type = own_addr_type; + cp.peer_addr_type = conn->dst_type; + bacpy(&cp.peer_addr, &conn->dst); + + /* As per Core Spec 5.2 Vol 2, PART E, Sec 7.8.53, for + * advertising_event_property LE_LEGACY_ADV_DIRECT_IND + * does not supports advertising data when the advertising set already + * contains some, the controller shall return erroc code 'Invalid + * HCI Command Parameters(0x12). + * So it is required to remove adv set for handle 0x00. since we use + * instance 0 for directed adv. + */ + err = hci_remove_ext_adv_instance_sync(hdev, cp.handle, NULL); + if (err) + return err; + + err = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_ADV_PARAMS, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); + if (err) + return err; + + /* Check if random address need to be updated */ + if (own_addr_type == ADDR_LE_DEV_RANDOM && + bacmp(&random_addr, BDADDR_ANY) && + bacmp(&random_addr, &hdev->random_addr)) { + err = hci_set_adv_set_random_addr_sync(hdev, 0x00, + &random_addr); + if (err) + return err; + } + + return hci_enable_ext_advertising_sync(hdev, 0x00); +} + +static int hci_le_directed_advertising_sync(struct hci_dev *hdev, + struct hci_conn *conn) +{ + struct hci_cp_le_set_adv_param cp; + u8 status; + u8 own_addr_type; + u8 enable; + + if (ext_adv_capable(hdev)) + return hci_le_ext_directed_advertising_sync(hdev, conn); + + /* Clear the HCI_LE_ADV bit temporarily so that the + * hci_update_random_address knows that it's safe to go ahead + * and write a new random address. The flag will be set back on + * as soon as the SET_ADV_ENABLE HCI command completes. + */ + hci_dev_clear_flag(hdev, HCI_LE_ADV); + + /* Set require_privacy to false so that the remote device has a + * chance of identifying us. + */ + status = hci_update_random_address_sync(hdev, false, conn_use_rpa(conn), + &own_addr_type); + if (status) + return status; + + memset(&cp, 0, sizeof(cp)); + + /* Some controllers might reject command if intervals are not + * within range for undirected advertising. + * BCM20702A0 is known to be affected by this. + */ + cp.min_interval = cpu_to_le16(0x0020); + cp.max_interval = cpu_to_le16(0x0020); + + cp.type = LE_ADV_DIRECT_IND; + cp.own_address_type = own_addr_type; + cp.direct_addr_type = conn->dst_type; + bacpy(&cp.direct_addr, &conn->dst); + cp.channel_map = hdev->le_adv_channel_map; + + status = __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_PARAM, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); + if (status) + return status; + + enable = 0x01; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE, + sizeof(enable), &enable, HCI_CMD_TIMEOUT); +} + +static void set_ext_conn_params(struct hci_conn *conn, + struct hci_cp_le_ext_conn_param *p) +{ + struct hci_dev *hdev = conn->hdev; + + memset(p, 0, sizeof(*p)); + + p->scan_interval = cpu_to_le16(hdev->le_scan_int_connect); + p->scan_window = cpu_to_le16(hdev->le_scan_window_connect); + p->conn_interval_min = cpu_to_le16(conn->le_conn_min_interval); + p->conn_interval_max = cpu_to_le16(conn->le_conn_max_interval); + p->conn_latency = cpu_to_le16(conn->le_conn_latency); + p->supervision_timeout = cpu_to_le16(conn->le_supv_timeout); + p->min_ce_len = cpu_to_le16(0x0000); + p->max_ce_len = cpu_to_le16(0x0000); +} + +static int hci_le_ext_create_conn_sync(struct hci_dev *hdev, + struct hci_conn *conn, u8 own_addr_type) +{ + struct hci_cp_le_ext_create_conn *cp; + struct hci_cp_le_ext_conn_param *p; + u8 data[sizeof(*cp) + sizeof(*p) * 3]; + u32 plen; + + cp = (void *)data; + p = (void *)cp->data; + + memset(cp, 0, sizeof(*cp)); + + bacpy(&cp->peer_addr, &conn->dst); + cp->peer_addr_type = conn->dst_type; + cp->own_addr_type = own_addr_type; + + plen = sizeof(*cp); + + if (scan_1m(hdev)) { + cp->phys |= LE_SCAN_PHY_1M; + set_ext_conn_params(conn, p); + + p++; + plen += sizeof(*p); + } + + if (scan_2m(hdev)) { + cp->phys |= LE_SCAN_PHY_2M; + set_ext_conn_params(conn, p); + + p++; + plen += sizeof(*p); + } + + if (scan_coded(hdev)) { + cp->phys |= LE_SCAN_PHY_CODED; + set_ext_conn_params(conn, p); + + plen += sizeof(*p); + } + + return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_EXT_CREATE_CONN, + plen, data, + HCI_EV_LE_ENHANCED_CONN_COMPLETE, + conn->conn_timeout, NULL); +} + +int hci_le_create_conn_sync(struct hci_dev *hdev, struct hci_conn *conn) +{ + struct hci_cp_le_create_conn cp; + struct hci_conn_params *params; + u8 own_addr_type; + int err; + + /* If requested to connect as peripheral use directed advertising */ + if (conn->role == HCI_ROLE_SLAVE) { + /* If we're active scanning and simultaneous roles is not + * enabled simply reject the attempt. + */ + if (hci_dev_test_flag(hdev, HCI_LE_SCAN) && + hdev->le_scan_type == LE_SCAN_ACTIVE && + !hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES)) { + hci_conn_del(conn); + return -EBUSY; + } + + /* Pause advertising while doing directed advertising. */ + hci_pause_advertising_sync(hdev); + + err = hci_le_directed_advertising_sync(hdev, conn); + goto done; + } + + /* Disable advertising if simultaneous roles is not in use. */ + if (!hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES)) + hci_pause_advertising_sync(hdev); + + params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type); + if (params) { + conn->le_conn_min_interval = params->conn_min_interval; + conn->le_conn_max_interval = params->conn_max_interval; + conn->le_conn_latency = params->conn_latency; + conn->le_supv_timeout = params->supervision_timeout; + } else { + conn->le_conn_min_interval = hdev->le_conn_min_interval; + conn->le_conn_max_interval = hdev->le_conn_max_interval; + conn->le_conn_latency = hdev->le_conn_latency; + conn->le_supv_timeout = hdev->le_supv_timeout; + } + + /* If controller is scanning, we stop it since some controllers are + * not able to scan and connect at the same time. Also set the + * HCI_LE_SCAN_INTERRUPTED flag so that the command complete + * handler for scan disabling knows to set the correct discovery + * state. + */ + if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) { + hci_scan_disable_sync(hdev); + hci_dev_set_flag(hdev, HCI_LE_SCAN_INTERRUPTED); + } + + /* Update random address, but set require_privacy to false so + * that we never connect with an non-resolvable address. + */ + err = hci_update_random_address_sync(hdev, false, conn_use_rpa(conn), + &own_addr_type); + if (err) + goto done; + + if (use_ext_conn(hdev)) { + err = hci_le_ext_create_conn_sync(hdev, conn, own_addr_type); + goto done; + } + + memset(&cp, 0, sizeof(cp)); + + cp.scan_interval = cpu_to_le16(hdev->le_scan_int_connect); + cp.scan_window = cpu_to_le16(hdev->le_scan_window_connect); + + bacpy(&cp.peer_addr, &conn->dst); + cp.peer_addr_type = conn->dst_type; + cp.own_address_type = own_addr_type; + cp.conn_interval_min = cpu_to_le16(conn->le_conn_min_interval); + cp.conn_interval_max = cpu_to_le16(conn->le_conn_max_interval); + cp.conn_latency = cpu_to_le16(conn->le_conn_latency); + cp.supervision_timeout = cpu_to_le16(conn->le_supv_timeout); + cp.min_ce_len = cpu_to_le16(0x0000); + cp.max_ce_len = cpu_to_le16(0x0000); + + /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E page 2261: + * + * If this event is unmasked and the HCI_LE_Connection_Complete event + * is unmasked, only the HCI_LE_Enhanced_Connection_Complete event is + * sent when a new connection has been created. + */ + err = __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_CREATE_CONN, + sizeof(cp), &cp, + use_enhanced_conn_complete(hdev) ? + HCI_EV_LE_ENHANCED_CONN_COMPLETE : + HCI_EV_LE_CONN_COMPLETE, + conn->conn_timeout, NULL); + +done: + if (err == -ETIMEDOUT) + hci_le_connect_cancel_sync(hdev, conn); + + /* Re-enable advertising after the connection attempt is finished. */ + hci_resume_advertising_sync(hdev); + return err; +} + +int hci_le_remove_cig_sync(struct hci_dev *hdev, u8 handle) +{ + struct hci_cp_le_remove_cig cp; + + memset(&cp, 0, sizeof(cp)); + cp.cig_id = handle; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_REMOVE_CIG, sizeof(cp), + &cp, HCI_CMD_TIMEOUT); +} + +int hci_le_big_terminate_sync(struct hci_dev *hdev, u8 handle) +{ + struct hci_cp_le_big_term_sync cp; + + memset(&cp, 0, sizeof(cp)); + cp.handle = handle; + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_BIG_TERM_SYNC, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +int hci_le_pa_terminate_sync(struct hci_dev *hdev, u16 handle) +{ + struct hci_cp_le_pa_term_sync cp; + + memset(&cp, 0, sizeof(cp)); + cp.handle = cpu_to_le16(handle); + + return __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_TERM_SYNC, + sizeof(cp), &cp, HCI_CMD_TIMEOUT); +} + +int hci_get_random_address(struct hci_dev *hdev, bool require_privacy, + bool use_rpa, struct adv_info *adv_instance, + u8 *own_addr_type, bdaddr_t *rand_addr) +{ + int err; + + bacpy(rand_addr, BDADDR_ANY); + + /* If privacy is enabled use a resolvable private address. If + * current RPA has expired then generate a new one. + */ + if (use_rpa) { + /* If Controller supports LL Privacy use own address type is + * 0x03 + */ + if (use_ll_privacy(hdev)) + *own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED; + else + *own_addr_type = ADDR_LE_DEV_RANDOM; + + if (adv_instance) { + if (adv_rpa_valid(adv_instance)) + return 0; + } else { + if (rpa_valid(hdev)) + return 0; + } + + err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa); + if (err < 0) { + bt_dev_err(hdev, "failed to generate new RPA"); + return err; + } + + bacpy(rand_addr, &hdev->rpa); + + return 0; + } + + /* In case of required privacy without resolvable private address, + * use an non-resolvable private address. This is useful for + * non-connectable advertising. + */ + if (require_privacy) { + bdaddr_t nrpa; + + while (true) { + /* The non-resolvable private address is generated + * from random six bytes with the two most significant + * bits cleared. + */ + get_random_bytes(&nrpa, 6); + nrpa.b[5] &= 0x3f; + + /* The non-resolvable private address shall not be + * equal to the public address. + */ + if (bacmp(&hdev->bdaddr, &nrpa)) + break; + } + + *own_addr_type = ADDR_LE_DEV_RANDOM; + bacpy(rand_addr, &nrpa); + + return 0; + } + + /* No privacy so use a public address. */ + *own_addr_type = ADDR_LE_DEV_PUBLIC; + + return 0; +} + +static int _update_adv_data_sync(struct hci_dev *hdev, void *data) +{ + u8 instance = PTR_ERR(data); + + return hci_update_adv_data_sync(hdev, instance); +} + +int hci_update_adv_data(struct hci_dev *hdev, u8 instance) +{ + return hci_cmd_sync_queue(hdev, _update_adv_data_sync, + ERR_PTR(instance), NULL); +} diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c new file mode 100644 index 000000000..633b82d54 --- /dev/null +++ b/net/bluetooth/hci_sysfs.c @@ -0,0 +1,124 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Bluetooth HCI driver model support. */ + +#include <linux/module.h> + +#include <net/bluetooth/bluetooth.h> +#include <net/bluetooth/hci_core.h> + +static struct class *bt_class; + +static void bt_link_release(struct device *dev) +{ + struct hci_conn *conn = to_hci_conn(dev); + kfree(conn); +} + +static const struct device_type bt_link = { + .name = "link", + .release = bt_link_release, +}; + +/* + * The rfcomm tty device will possibly retain even when conn + * is down, and sysfs doesn't support move zombie device, + * so we should move the device before conn device is destroyed. + */ +static int __match_tty(struct device *dev, void *data) +{ + return !strncmp(dev_name(dev), "rfcomm", 6); +} + +void hci_conn_init_sysfs(struct hci_conn *conn) +{ + struct hci_dev *hdev = conn->hdev; + + bt_dev_dbg(hdev, "conn %p", conn); + + conn->dev.type = &bt_link; + conn->dev.class = bt_class; + conn->dev.parent = &hdev->dev; + + device_initialize(&conn->dev); +} + +void hci_conn_add_sysfs(struct hci_conn *conn) +{ + struct hci_dev *hdev = conn->hdev; + + bt_dev_dbg(hdev, "conn %p", conn); + + if (device_is_registered(&conn->dev)) + return; + + dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle); + + if (device_add(&conn->dev) < 0) + bt_dev_err(hdev, "failed to register connection device"); +} + +void hci_conn_del_sysfs(struct hci_conn *conn) +{ + struct hci_dev *hdev = conn->hdev; + + bt_dev_dbg(hdev, "conn %p", conn); + + if (!device_is_registered(&conn->dev)) { + /* If device_add() has *not* succeeded, use *only* put_device() + * to drop the reference count. + */ + put_device(&conn->dev); + return; + } + + while (1) { + struct device *dev; + + dev = device_find_child(&conn->dev, NULL, __match_tty); + if (!dev) + break; + device_move(dev, NULL, DPM_ORDER_DEV_LAST); + put_device(dev); + } + + device_unregister(&conn->dev); +} + +static void bt_host_release(struct device *dev) +{ + struct hci_dev *hdev = to_hci_dev(dev); + + if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) + hci_release_dev(hdev); + else + kfree(hdev); + module_put(THIS_MODULE); +} + +static const struct device_type bt_host = { + .name = "host", + .release = bt_host_release, +}; + +void hci_init_sysfs(struct hci_dev *hdev) +{ + struct device *dev = &hdev->dev; + + dev->type = &bt_host; + dev->class = bt_class; + + __module_get(THIS_MODULE); + device_initialize(dev); +} + +int __init bt_sysfs_init(void) +{ + bt_class = class_create(THIS_MODULE, "bluetooth"); + + return PTR_ERR_OR_ZERO(bt_class); +} + +void bt_sysfs_cleanup(void) +{ + class_destroy(bt_class); +} diff --git a/net/bluetooth/hidp/Kconfig b/net/bluetooth/hidp/Kconfig new file mode 100644 index 000000000..14100f341 --- /dev/null +++ b/net/bluetooth/hidp/Kconfig @@ -0,0 +1,13 @@ +# SPDX-License-Identifier: GPL-2.0-only +config BT_HIDP + tristate "HIDP protocol support" + depends on BT_BREDR && INPUT + select HID + help + HIDP (Human Interface Device Protocol) is a transport layer + for HID reports. HIDP is required for the Bluetooth Human + Interface Device Profile. + + Say Y here to compile HIDP support into the kernel or say M to + compile it as module (hidp). + diff --git a/net/bluetooth/hidp/Makefile b/net/bluetooth/hidp/Makefile new file mode 100644 index 000000000..f41b0aa02 --- /dev/null +++ b/net/bluetooth/hidp/Makefile @@ -0,0 +1,8 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Makefile for the Linux Bluetooth HIDP layer +# + +obj-$(CONFIG_BT_HIDP) += hidp.o + +hidp-objs := core.o sock.o diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c new file mode 100644 index 000000000..82cc15ad9 --- /dev/null +++ b/net/bluetooth/hidp/core.c @@ -0,0 +1,1480 @@ +/* + HIDP implementation for Linux Bluetooth stack (BlueZ). + Copyright (C) 2003-2004 Marcel Holtmann <marcel@holtmann.org> + Copyright (C) 2013 David Herrmann <dh.herrmann@gmail.com> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License version 2 as + published by the Free Software Foundation; + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + SOFTWARE IS DISCLAIMED. +*/ + +#include <linux/kref.h> +#include <linux/module.h> +#include <linux/file.h> +#include <linux/kthread.h> +#include <linux/hidraw.h> + +#include <net/bluetooth/bluetooth.h> +#include <net/bluetooth/hci_core.h> +#include <net/bluetooth/l2cap.h> + +#include "hidp.h" + +#define VERSION "1.2" + +static DECLARE_RWSEM(hidp_session_sem); +static DECLARE_WAIT_QUEUE_HEAD(hidp_session_wq); +static LIST_HEAD(hidp_session_list); + +static unsigned char hidp_keycode[256] = { + 0, 0, 0, 0, 30, 48, 46, 32, 18, 33, 34, 35, 23, 36, + 37, 38, 50, 49, 24, 25, 16, 19, 31, 20, 22, 47, 17, 45, + 21, 44, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 28, 1, + 14, 15, 57, 12, 13, 26, 27, 43, 43, 39, 40, 41, 51, 52, + 53, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 87, 88, + 99, 70, 119, 110, 102, 104, 111, 107, 109, 106, 105, 108, 103, 69, + 98, 55, 74, 78, 96, 79, 80, 81, 75, 76, 77, 71, 72, 73, + 82, 83, 86, 127, 116, 117, 183, 184, 185, 186, 187, 188, 189, 190, + 191, 192, 193, 194, 134, 138, 130, 132, 128, 129, 131, 137, 133, 135, + 136, 113, 115, 114, 0, 0, 0, 121, 0, 89, 93, 124, 92, 94, + 95, 0, 0, 0, 122, 123, 90, 91, 85, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 29, 42, 56, 125, 97, 54, 100, 126, 164, 166, 165, 163, 161, 115, + 114, 113, 150, 158, 159, 128, 136, 177, 178, 176, 142, 152, 173, 140 +}; + +static unsigned char hidp_mkeyspat[] = { 0x01, 0x01, 0x01, 0x01, 0x01, 0x01 }; + +static int hidp_session_probe(struct l2cap_conn *conn, + struct l2cap_user *user); +static void hidp_session_remove(struct l2cap_conn *conn, + struct l2cap_user *user); +static int hidp_session_thread(void *arg); +static void hidp_session_terminate(struct hidp_session *s); + +static void hidp_copy_session(struct hidp_session *session, struct hidp_conninfo *ci) +{ + u32 valid_flags = 0; + memset(ci, 0, sizeof(*ci)); + bacpy(&ci->bdaddr, &session->bdaddr); + + ci->flags = session->flags & valid_flags; + ci->state = BT_CONNECTED; + + if (session->input) { + ci->vendor = session->input->id.vendor; + ci->product = session->input->id.product; + ci->version = session->input->id.version; + if (session->input->name) + strscpy(ci->name, session->input->name, 128); + else + strscpy(ci->name, "HID Boot Device", 128); + } else if (session->hid) { + ci->vendor = session->hid->vendor; + ci->product = session->hid->product; + ci->version = session->hid->version; + strscpy(ci->name, session->hid->name, 128); + } +} + +/* assemble skb, queue message on @transmit and wake up the session thread */ +static int hidp_send_message(struct hidp_session *session, struct socket *sock, + struct sk_buff_head *transmit, unsigned char hdr, + const unsigned char *data, int size) +{ + struct sk_buff *skb; + struct sock *sk = sock->sk; + int ret; + + BT_DBG("session %p data %p size %d", session, data, size); + + if (atomic_read(&session->terminate)) + return -EIO; + + skb = alloc_skb(size + 1, GFP_ATOMIC); + if (!skb) { + BT_ERR("Can't allocate memory for new frame"); + return -ENOMEM; + } + + skb_put_u8(skb, hdr); + if (data && size > 0) { + skb_put_data(skb, data, size); + ret = size; + } else { + ret = 0; + } + + skb_queue_tail(transmit, skb); + wake_up_interruptible(sk_sleep(sk)); + + return ret; +} + +static int hidp_send_ctrl_message(struct hidp_session *session, + unsigned char hdr, const unsigned char *data, + int size) +{ + return hidp_send_message(session, session->ctrl_sock, + &session->ctrl_transmit, hdr, data, size); +} + +static int hidp_send_intr_message(struct hidp_session *session, + unsigned char hdr, const unsigned char *data, + int size) +{ + return hidp_send_message(session, session->intr_sock, + &session->intr_transmit, hdr, data, size); +} + +static int hidp_input_event(struct input_dev *dev, unsigned int type, + unsigned int code, int value) +{ + struct hidp_session *session = input_get_drvdata(dev); + unsigned char newleds; + unsigned char hdr, data[2]; + + BT_DBG("session %p type %d code %d value %d", + session, type, code, value); + + if (type != EV_LED) + return -1; + + newleds = (!!test_bit(LED_KANA, dev->led) << 3) | + (!!test_bit(LED_COMPOSE, dev->led) << 3) | + (!!test_bit(LED_SCROLLL, dev->led) << 2) | + (!!test_bit(LED_CAPSL, dev->led) << 1) | + (!!test_bit(LED_NUML, dev->led) << 0); + + if (session->leds == newleds) + return 0; + + session->leds = newleds; + + hdr = HIDP_TRANS_DATA | HIDP_DATA_RTYPE_OUPUT; + data[0] = 0x01; + data[1] = newleds; + + return hidp_send_intr_message(session, hdr, data, 2); +} + +static void hidp_input_report(struct hidp_session *session, struct sk_buff *skb) +{ + struct input_dev *dev = session->input; + unsigned char *keys = session->keys; + unsigned char *udata = skb->data + 1; + signed char *sdata = skb->data + 1; + int i, size = skb->len - 1; + + switch (skb->data[0]) { + case 0x01: /* Keyboard report */ + for (i = 0; i < 8; i++) + input_report_key(dev, hidp_keycode[i + 224], (udata[0] >> i) & 1); + + /* If all the key codes have been set to 0x01, it means + * too many keys were pressed at the same time. */ + if (!memcmp(udata + 2, hidp_mkeyspat, 6)) + break; + + for (i = 2; i < 8; i++) { + if (keys[i] > 3 && memscan(udata + 2, keys[i], 6) == udata + 8) { + if (hidp_keycode[keys[i]]) + input_report_key(dev, hidp_keycode[keys[i]], 0); + else + BT_ERR("Unknown key (scancode %#x) released.", keys[i]); + } + + if (udata[i] > 3 && memscan(keys + 2, udata[i], 6) == keys + 8) { + if (hidp_keycode[udata[i]]) + input_report_key(dev, hidp_keycode[udata[i]], 1); + else + BT_ERR("Unknown key (scancode %#x) pressed.", udata[i]); + } + } + + memcpy(keys, udata, 8); + break; + + case 0x02: /* Mouse report */ + input_report_key(dev, BTN_LEFT, sdata[0] & 0x01); + input_report_key(dev, BTN_RIGHT, sdata[0] & 0x02); + input_report_key(dev, BTN_MIDDLE, sdata[0] & 0x04); + input_report_key(dev, BTN_SIDE, sdata[0] & 0x08); + input_report_key(dev, BTN_EXTRA, sdata[0] & 0x10); + + input_report_rel(dev, REL_X, sdata[1]); + input_report_rel(dev, REL_Y, sdata[2]); + + if (size > 3) + input_report_rel(dev, REL_WHEEL, sdata[3]); + break; + } + + input_sync(dev); +} + +static int hidp_get_raw_report(struct hid_device *hid, + unsigned char report_number, + unsigned char *data, size_t count, + unsigned char report_type) +{ + struct hidp_session *session = hid->driver_data; + struct sk_buff *skb; + size_t len; + int numbered_reports = hid->report_enum[report_type].numbered; + int ret; + + if (atomic_read(&session->terminate)) + return -EIO; + + switch (report_type) { + case HID_FEATURE_REPORT: + report_type = HIDP_TRANS_GET_REPORT | HIDP_DATA_RTYPE_FEATURE; + break; + case HID_INPUT_REPORT: + report_type = HIDP_TRANS_GET_REPORT | HIDP_DATA_RTYPE_INPUT; + break; + case HID_OUTPUT_REPORT: + report_type = HIDP_TRANS_GET_REPORT | HIDP_DATA_RTYPE_OUPUT; + break; + default: + return -EINVAL; + } + + if (mutex_lock_interruptible(&session->report_mutex)) + return -ERESTARTSYS; + + /* Set up our wait, and send the report request to the device. */ + session->waiting_report_type = report_type & HIDP_DATA_RTYPE_MASK; + session->waiting_report_number = numbered_reports ? report_number : -1; + set_bit(HIDP_WAITING_FOR_RETURN, &session->flags); + data[0] = report_number; + ret = hidp_send_ctrl_message(session, report_type, data, 1); + if (ret < 0) + goto err; + + /* Wait for the return of the report. The returned report + gets put in session->report_return. */ + while (test_bit(HIDP_WAITING_FOR_RETURN, &session->flags) && + !atomic_read(&session->terminate)) { + int res; + + res = wait_event_interruptible_timeout(session->report_queue, + !test_bit(HIDP_WAITING_FOR_RETURN, &session->flags) + || atomic_read(&session->terminate), + 5*HZ); + if (res == 0) { + /* timeout */ + ret = -EIO; + goto err; + } + if (res < 0) { + /* signal */ + ret = -ERESTARTSYS; + goto err; + } + } + + skb = session->report_return; + if (skb) { + len = skb->len < count ? skb->len : count; + memcpy(data, skb->data, len); + + kfree_skb(skb); + session->report_return = NULL; + } else { + /* Device returned a HANDSHAKE, indicating protocol error. */ + len = -EIO; + } + + clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags); + mutex_unlock(&session->report_mutex); + + return len; + +err: + clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags); + mutex_unlock(&session->report_mutex); + return ret; +} + +static int hidp_set_raw_report(struct hid_device *hid, unsigned char reportnum, + unsigned char *data, size_t count, + unsigned char report_type) +{ + struct hidp_session *session = hid->driver_data; + int ret; + + switch (report_type) { + case HID_FEATURE_REPORT: + report_type = HIDP_TRANS_SET_REPORT | HIDP_DATA_RTYPE_FEATURE; + break; + case HID_INPUT_REPORT: + report_type = HIDP_TRANS_SET_REPORT | HIDP_DATA_RTYPE_INPUT; + break; + case HID_OUTPUT_REPORT: + report_type = HIDP_TRANS_SET_REPORT | HIDP_DATA_RTYPE_OUPUT; + break; + default: + return -EINVAL; + } + + if (mutex_lock_interruptible(&session->report_mutex)) + return -ERESTARTSYS; + + /* Set up our wait, and send the report request to the device. */ + data[0] = reportnum; + set_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags); + ret = hidp_send_ctrl_message(session, report_type, data, count); + if (ret < 0) + goto err; + + /* Wait for the ACK from the device. */ + while (test_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags) && + !atomic_read(&session->terminate)) { + int res; + + res = wait_event_interruptible_timeout(session->report_queue, + !test_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags) + || atomic_read(&session->terminate), + 10*HZ); + if (res == 0) { + /* timeout */ + ret = -EIO; + goto err; + } + if (res < 0) { + /* signal */ + ret = -ERESTARTSYS; + goto err; + } + } + + if (!session->output_report_success) { + ret = -EIO; + goto err; + } + + ret = count; + +err: + clear_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags); + mutex_unlock(&session->report_mutex); + return ret; +} + +static int hidp_output_report(struct hid_device *hid, __u8 *data, size_t count) +{ + struct hidp_session *session = hid->driver_data; + + return hidp_send_intr_message(session, + HIDP_TRANS_DATA | HIDP_DATA_RTYPE_OUPUT, + data, count); +} + +static int hidp_raw_request(struct hid_device *hid, unsigned char reportnum, + __u8 *buf, size_t len, unsigned char rtype, + int reqtype) +{ + switch (reqtype) { + case HID_REQ_GET_REPORT: + return hidp_get_raw_report(hid, reportnum, buf, len, rtype); + case HID_REQ_SET_REPORT: + return hidp_set_raw_report(hid, reportnum, buf, len, rtype); + default: + return -EIO; + } +} + +static void hidp_idle_timeout(struct timer_list *t) +{ + struct hidp_session *session = from_timer(session, t, timer); + + /* The HIDP user-space API only contains calls to add and remove + * devices. There is no way to forward events of any kind. Therefore, + * we have to forcefully disconnect a device on idle-timeouts. This is + * unfortunate and weird API design, but it is spec-compliant and + * required for backwards-compatibility. Hence, on idle-timeout, we + * signal driver-detach events, so poll() will be woken up with an + * error-condition on both sockets. + */ + + session->intr_sock->sk->sk_err = EUNATCH; + session->ctrl_sock->sk->sk_err = EUNATCH; + wake_up_interruptible(sk_sleep(session->intr_sock->sk)); + wake_up_interruptible(sk_sleep(session->ctrl_sock->sk)); + + hidp_session_terminate(session); +} + +static void hidp_set_timer(struct hidp_session *session) +{ + if (session->idle_to > 0) + mod_timer(&session->timer, jiffies + HZ * session->idle_to); +} + +static void hidp_del_timer(struct hidp_session *session) +{ + if (session->idle_to > 0) + del_timer_sync(&session->timer); +} + +static void hidp_process_report(struct hidp_session *session, int type, + const u8 *data, unsigned int len, int intr) +{ + if (len > HID_MAX_BUFFER_SIZE) + len = HID_MAX_BUFFER_SIZE; + + memcpy(session->input_buf, data, len); + hid_input_report(session->hid, type, session->input_buf, len, intr); +} + +static void hidp_process_handshake(struct hidp_session *session, + unsigned char param) +{ + BT_DBG("session %p param 0x%02x", session, param); + session->output_report_success = 0; /* default condition */ + + switch (param) { + case HIDP_HSHK_SUCCESSFUL: + /* FIXME: Call into SET_ GET_ handlers here */ + session->output_report_success = 1; + break; + + case HIDP_HSHK_NOT_READY: + case HIDP_HSHK_ERR_INVALID_REPORT_ID: + case HIDP_HSHK_ERR_UNSUPPORTED_REQUEST: + case HIDP_HSHK_ERR_INVALID_PARAMETER: + if (test_and_clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags)) + wake_up_interruptible(&session->report_queue); + + /* FIXME: Call into SET_ GET_ handlers here */ + break; + + case HIDP_HSHK_ERR_UNKNOWN: + break; + + case HIDP_HSHK_ERR_FATAL: + /* Device requests a reboot, as this is the only way this error + * can be recovered. */ + hidp_send_ctrl_message(session, + HIDP_TRANS_HID_CONTROL | HIDP_CTRL_SOFT_RESET, NULL, 0); + break; + + default: + hidp_send_ctrl_message(session, + HIDP_TRANS_HANDSHAKE | HIDP_HSHK_ERR_INVALID_PARAMETER, NULL, 0); + break; + } + + /* Wake up the waiting thread. */ + if (test_and_clear_bit(HIDP_WAITING_FOR_SEND_ACK, &session->flags)) + wake_up_interruptible(&session->report_queue); +} + +static void hidp_process_hid_control(struct hidp_session *session, + unsigned char param) +{ + BT_DBG("session %p param 0x%02x", session, param); + + if (param == HIDP_CTRL_VIRTUAL_CABLE_UNPLUG) { + /* Flush the transmit queues */ + skb_queue_purge(&session->ctrl_transmit); + skb_queue_purge(&session->intr_transmit); + + hidp_session_terminate(session); + } +} + +/* Returns true if the passed-in skb should be freed by the caller. */ +static int hidp_process_data(struct hidp_session *session, struct sk_buff *skb, + unsigned char param) +{ + int done_with_skb = 1; + BT_DBG("session %p skb %p len %u param 0x%02x", session, skb, skb->len, param); + + switch (param) { + case HIDP_DATA_RTYPE_INPUT: + hidp_set_timer(session); + + if (session->input) + hidp_input_report(session, skb); + + if (session->hid) + hidp_process_report(session, HID_INPUT_REPORT, + skb->data, skb->len, 0); + break; + + case HIDP_DATA_RTYPE_OTHER: + case HIDP_DATA_RTYPE_OUPUT: + case HIDP_DATA_RTYPE_FEATURE: + break; + + default: + hidp_send_ctrl_message(session, + HIDP_TRANS_HANDSHAKE | HIDP_HSHK_ERR_INVALID_PARAMETER, NULL, 0); + } + + if (test_bit(HIDP_WAITING_FOR_RETURN, &session->flags) && + param == session->waiting_report_type) { + if (session->waiting_report_number < 0 || + session->waiting_report_number == skb->data[0]) { + /* hidp_get_raw_report() is waiting on this report. */ + session->report_return = skb; + done_with_skb = 0; + clear_bit(HIDP_WAITING_FOR_RETURN, &session->flags); + wake_up_interruptible(&session->report_queue); + } + } + + return done_with_skb; +} + +static void hidp_recv_ctrl_frame(struct hidp_session *session, + struct sk_buff *skb) +{ + unsigned char hdr, type, param; + int free_skb = 1; + + BT_DBG("session %p skb %p len %u", session, skb, skb->len); + + hdr = skb->data[0]; + skb_pull(skb, 1); + + type = hdr & HIDP_HEADER_TRANS_MASK; + param = hdr & HIDP_HEADER_PARAM_MASK; + + switch (type) { + case HIDP_TRANS_HANDSHAKE: + hidp_process_handshake(session, param); + break; + + case HIDP_TRANS_HID_CONTROL: + hidp_process_hid_control(session, param); + break; + + case HIDP_TRANS_DATA: + free_skb = hidp_process_data(session, skb, param); + break; + + default: + hidp_send_ctrl_message(session, + HIDP_TRANS_HANDSHAKE | HIDP_HSHK_ERR_UNSUPPORTED_REQUEST, NULL, 0); + break; + } + + if (free_skb) + kfree_skb(skb); +} + +static void hidp_recv_intr_frame(struct hidp_session *session, + struct sk_buff *skb) +{ + unsigned char hdr; + + BT_DBG("session %p skb %p len %u", session, skb, skb->len); + + hdr = skb->data[0]; + skb_pull(skb, 1); + + if (hdr == (HIDP_TRANS_DATA | HIDP_DATA_RTYPE_INPUT)) { + hidp_set_timer(session); + + if (session->input) + hidp_input_report(session, skb); + + if (session->hid) { + hidp_process_report(session, HID_INPUT_REPORT, + skb->data, skb->len, 1); + BT_DBG("report len %d", skb->len); + } + } else { + BT_DBG("Unsupported protocol header 0x%02x", hdr); + } + + kfree_skb(skb); +} + +static int hidp_send_frame(struct socket *sock, unsigned char *data, int len) +{ + struct kvec iv = { data, len }; + struct msghdr msg; + + BT_DBG("sock %p data %p len %d", sock, data, len); + + if (!len) + return 0; + + memset(&msg, 0, sizeof(msg)); + + return kernel_sendmsg(sock, &msg, &iv, 1, len); +} + +/* dequeue message from @transmit and send via @sock */ +static void hidp_process_transmit(struct hidp_session *session, + struct sk_buff_head *transmit, + struct socket *sock) +{ + struct sk_buff *skb; + int ret; + + BT_DBG("session %p", session); + + while ((skb = skb_dequeue(transmit))) { + ret = hidp_send_frame(sock, skb->data, skb->len); + if (ret == -EAGAIN) { + skb_queue_head(transmit, skb); + break; + } else if (ret < 0) { + hidp_session_terminate(session); + kfree_skb(skb); + break; + } + + hidp_set_timer(session); + kfree_skb(skb); + } +} + +static int hidp_setup_input(struct hidp_session *session, + const struct hidp_connadd_req *req) +{ + struct input_dev *input; + int i; + + input = input_allocate_device(); + if (!input) + return -ENOMEM; + + session->input = input; + + input_set_drvdata(input, session); + + input->name = "Bluetooth HID Boot Protocol Device"; + + input->id.bustype = BUS_BLUETOOTH; + input->id.vendor = req->vendor; + input->id.product = req->product; + input->id.version = req->version; + + if (req->subclass & 0x40) { + set_bit(EV_KEY, input->evbit); + set_bit(EV_LED, input->evbit); + set_bit(EV_REP, input->evbit); + + set_bit(LED_NUML, input->ledbit); + set_bit(LED_CAPSL, input->ledbit); + set_bit(LED_SCROLLL, input->ledbit); + set_bit(LED_COMPOSE, input->ledbit); + set_bit(LED_KANA, input->ledbit); + + for (i = 0; i < sizeof(hidp_keycode); i++) + set_bit(hidp_keycode[i], input->keybit); + clear_bit(0, input->keybit); + } + + if (req->subclass & 0x80) { + input->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL); + input->keybit[BIT_WORD(BTN_MOUSE)] = BIT_MASK(BTN_LEFT) | + BIT_MASK(BTN_RIGHT) | BIT_MASK(BTN_MIDDLE); + input->relbit[0] = BIT_MASK(REL_X) | BIT_MASK(REL_Y); + input->keybit[BIT_WORD(BTN_MOUSE)] |= BIT_MASK(BTN_SIDE) | + BIT_MASK(BTN_EXTRA); + input->relbit[0] |= BIT_MASK(REL_WHEEL); + } + + input->dev.parent = &session->conn->hcon->dev; + + input->event = hidp_input_event; + + return 0; +} + +static int hidp_open(struct hid_device *hid) +{ + return 0; +} + +static void hidp_close(struct hid_device *hid) +{ +} + +static int hidp_parse(struct hid_device *hid) +{ + struct hidp_session *session = hid->driver_data; + + return hid_parse_report(session->hid, session->rd_data, + session->rd_size); +} + +static int hidp_start(struct hid_device *hid) +{ + return 0; +} + +static void hidp_stop(struct hid_device *hid) +{ + struct hidp_session *session = hid->driver_data; + + skb_queue_purge(&session->ctrl_transmit); + skb_queue_purge(&session->intr_transmit); + + hid->claimed = 0; +} + +struct hid_ll_driver hidp_hid_driver = { + .parse = hidp_parse, + .start = hidp_start, + .stop = hidp_stop, + .open = hidp_open, + .close = hidp_close, + .raw_request = hidp_raw_request, + .output_report = hidp_output_report, +}; +EXPORT_SYMBOL_GPL(hidp_hid_driver); + +/* This function sets up the hid device. It does not add it + to the HID system. That is done in hidp_add_connection(). */ +static int hidp_setup_hid(struct hidp_session *session, + const struct hidp_connadd_req *req) +{ + struct hid_device *hid; + int err; + + session->rd_data = memdup_user(req->rd_data, req->rd_size); + if (IS_ERR(session->rd_data)) + return PTR_ERR(session->rd_data); + + session->rd_size = req->rd_size; + + hid = hid_allocate_device(); + if (IS_ERR(hid)) { + err = PTR_ERR(hid); + goto fault; + } + + session->hid = hid; + + hid->driver_data = session; + + hid->bus = BUS_BLUETOOTH; + hid->vendor = req->vendor; + hid->product = req->product; + hid->version = req->version; + hid->country = req->country; + + strscpy(hid->name, req->name, sizeof(hid->name)); + + snprintf(hid->phys, sizeof(hid->phys), "%pMR", + &l2cap_pi(session->ctrl_sock->sk)->chan->src); + + /* NOTE: Some device modules depend on the dst address being stored in + * uniq. Please be aware of this before making changes to this behavior. + */ + snprintf(hid->uniq, sizeof(hid->uniq), "%pMR", + &l2cap_pi(session->ctrl_sock->sk)->chan->dst); + + hid->dev.parent = &session->conn->hcon->dev; + hid->ll_driver = &hidp_hid_driver; + + /* True if device is blocked in drivers/hid/hid-quirks.c */ + if (hid_ignore(hid)) { + hid_destroy_device(session->hid); + session->hid = NULL; + return -ENODEV; + } + + return 0; + +fault: + kfree(session->rd_data); + session->rd_data = NULL; + + return err; +} + +/* initialize session devices */ +static int hidp_session_dev_init(struct hidp_session *session, + const struct hidp_connadd_req *req) +{ + int ret; + + if (req->rd_size > 0) { + ret = hidp_setup_hid(session, req); + if (ret && ret != -ENODEV) + return ret; + } + + if (!session->hid) { + ret = hidp_setup_input(session, req); + if (ret < 0) + return ret; + } + + return 0; +} + +/* destroy session devices */ +static void hidp_session_dev_destroy(struct hidp_session *session) +{ + if (session->hid) + put_device(&session->hid->dev); + else if (session->input) + input_put_device(session->input); + + kfree(session->rd_data); + session->rd_data = NULL; +} + +/* add HID/input devices to their underlying bus systems */ +static int hidp_session_dev_add(struct hidp_session *session) +{ + int ret; + + /* Both HID and input systems drop a ref-count when unregistering the + * device but they don't take a ref-count when registering them. Work + * around this by explicitly taking a refcount during registration + * which is dropped automatically by unregistering the devices. */ + + if (session->hid) { + ret = hid_add_device(session->hid); + if (ret) + return ret; + get_device(&session->hid->dev); + } else if (session->input) { + ret = input_register_device(session->input); + if (ret) + return ret; + input_get_device(session->input); + } + + return 0; +} + +/* remove HID/input devices from their bus systems */ +static void hidp_session_dev_del(struct hidp_session *session) +{ + if (session->hid) + hid_destroy_device(session->hid); + else if (session->input) + input_unregister_device(session->input); +} + +/* + * Asynchronous device registration + * HID device drivers might want to perform I/O during initialization to + * detect device types. Therefore, call device registration in a separate + * worker so the HIDP thread can schedule I/O operations. + * Note that this must be called after the worker thread was initialized + * successfully. This will then add the devices and increase session state + * on success, otherwise it will terminate the session thread. + */ +static void hidp_session_dev_work(struct work_struct *work) +{ + struct hidp_session *session = container_of(work, + struct hidp_session, + dev_init); + int ret; + + ret = hidp_session_dev_add(session); + if (!ret) + atomic_inc(&session->state); + else + hidp_session_terminate(session); +} + +/* + * Create new session object + * Allocate session object, initialize static fields, copy input data into the + * object and take a reference to all sub-objects. + * This returns 0 on success and puts a pointer to the new session object in + * \out. Otherwise, an error code is returned. + * The new session object has an initial ref-count of 1. + */ +static int hidp_session_new(struct hidp_session **out, const bdaddr_t *bdaddr, + struct socket *ctrl_sock, + struct socket *intr_sock, + const struct hidp_connadd_req *req, + struct l2cap_conn *conn) +{ + struct hidp_session *session; + int ret; + struct bt_sock *ctrl, *intr; + + ctrl = bt_sk(ctrl_sock->sk); + intr = bt_sk(intr_sock->sk); + + session = kzalloc(sizeof(*session), GFP_KERNEL); + if (!session) + return -ENOMEM; + + /* object and runtime management */ + kref_init(&session->ref); + atomic_set(&session->state, HIDP_SESSION_IDLING); + init_waitqueue_head(&session->state_queue); + session->flags = req->flags & BIT(HIDP_BLUETOOTH_VENDOR_ID); + + /* connection management */ + bacpy(&session->bdaddr, bdaddr); + session->conn = l2cap_conn_get(conn); + session->user.probe = hidp_session_probe; + session->user.remove = hidp_session_remove; + INIT_LIST_HEAD(&session->user.list); + session->ctrl_sock = ctrl_sock; + session->intr_sock = intr_sock; + skb_queue_head_init(&session->ctrl_transmit); + skb_queue_head_init(&session->intr_transmit); + session->ctrl_mtu = min_t(uint, l2cap_pi(ctrl)->chan->omtu, + l2cap_pi(ctrl)->chan->imtu); + session->intr_mtu = min_t(uint, l2cap_pi(intr)->chan->omtu, + l2cap_pi(intr)->chan->imtu); + session->idle_to = req->idle_to; + + /* device management */ + INIT_WORK(&session->dev_init, hidp_session_dev_work); + timer_setup(&session->timer, hidp_idle_timeout, 0); + + /* session data */ + mutex_init(&session->report_mutex); + init_waitqueue_head(&session->report_queue); + + ret = hidp_session_dev_init(session, req); + if (ret) + goto err_free; + + get_file(session->intr_sock->file); + get_file(session->ctrl_sock->file); + *out = session; + return 0; + +err_free: + l2cap_conn_put(session->conn); + kfree(session); + return ret; +} + +/* increase ref-count of the given session by one */ +static void hidp_session_get(struct hidp_session *session) +{ + kref_get(&session->ref); +} + +/* release callback */ +static void session_free(struct kref *ref) +{ + struct hidp_session *session = container_of(ref, struct hidp_session, + ref); + + hidp_session_dev_destroy(session); + skb_queue_purge(&session->ctrl_transmit); + skb_queue_purge(&session->intr_transmit); + fput(session->intr_sock->file); + fput(session->ctrl_sock->file); + l2cap_conn_put(session->conn); + kfree(session); +} + +/* decrease ref-count of the given session by one */ +static void hidp_session_put(struct hidp_session *session) +{ + kref_put(&session->ref, session_free); +} + +/* + * Search the list of active sessions for a session with target address + * \bdaddr. You must hold at least a read-lock on \hidp_session_sem. As long as + * you do not release this lock, the session objects cannot vanish and you can + * safely take a reference to the session yourself. + */ +static struct hidp_session *__hidp_session_find(const bdaddr_t *bdaddr) +{ + struct hidp_session *session; + + list_for_each_entry(session, &hidp_session_list, list) { + if (!bacmp(bdaddr, &session->bdaddr)) + return session; + } + + return NULL; +} + +/* + * Same as __hidp_session_find() but no locks must be held. This also takes a + * reference of the returned session (if non-NULL) so you must drop this + * reference if you no longer use the object. + */ +static struct hidp_session *hidp_session_find(const bdaddr_t *bdaddr) +{ + struct hidp_session *session; + + down_read(&hidp_session_sem); + + session = __hidp_session_find(bdaddr); + if (session) + hidp_session_get(session); + + up_read(&hidp_session_sem); + + return session; +} + +/* + * Start session synchronously + * This starts a session thread and waits until initialization + * is done or returns an error if it couldn't be started. + * If this returns 0 the session thread is up and running. You must call + * hipd_session_stop_sync() before deleting any runtime resources. + */ +static int hidp_session_start_sync(struct hidp_session *session) +{ + unsigned int vendor, product; + + if (session->hid) { + vendor = session->hid->vendor; + product = session->hid->product; + } else if (session->input) { + vendor = session->input->id.vendor; + product = session->input->id.product; + } else { + vendor = 0x0000; + product = 0x0000; + } + + session->task = kthread_run(hidp_session_thread, session, + "khidpd_%04x%04x", vendor, product); + if (IS_ERR(session->task)) + return PTR_ERR(session->task); + + while (atomic_read(&session->state) <= HIDP_SESSION_IDLING) + wait_event(session->state_queue, + atomic_read(&session->state) > HIDP_SESSION_IDLING); + + return 0; +} + +/* + * Terminate session thread + * Wake up session thread and notify it to stop. This is asynchronous and + * returns immediately. Call this whenever a runtime error occurs and you want + * the session to stop. + * Note: wake_up_interruptible() performs any necessary memory-barriers for us. + */ +static void hidp_session_terminate(struct hidp_session *session) +{ + atomic_inc(&session->terminate); + /* + * See the comment preceding the call to wait_woken() + * in hidp_session_run(). + */ + wake_up_interruptible(&hidp_session_wq); +} + +/* + * Probe HIDP session + * This is called from the l2cap_conn core when our l2cap_user object is bound + * to the hci-connection. We get the session via the \user object and can now + * start the session thread, link it into the global session list and + * schedule HID/input device registration. + * The global session-list owns its own reference to the session object so you + * can drop your own reference after registering the l2cap_user object. + */ +static int hidp_session_probe(struct l2cap_conn *conn, + struct l2cap_user *user) +{ + struct hidp_session *session = container_of(user, + struct hidp_session, + user); + struct hidp_session *s; + int ret; + + down_write(&hidp_session_sem); + + /* check that no other session for this device exists */ + s = __hidp_session_find(&session->bdaddr); + if (s) { + ret = -EEXIST; + goto out_unlock; + } + + if (session->input) { + ret = hidp_session_dev_add(session); + if (ret) + goto out_unlock; + } + + ret = hidp_session_start_sync(session); + if (ret) + goto out_del; + + /* HID device registration is async to allow I/O during probe */ + if (session->input) + atomic_inc(&session->state); + else + schedule_work(&session->dev_init); + + hidp_session_get(session); + list_add(&session->list, &hidp_session_list); + ret = 0; + goto out_unlock; + +out_del: + if (session->input) + hidp_session_dev_del(session); +out_unlock: + up_write(&hidp_session_sem); + return ret; +} + +/* + * Remove HIDP session + * Called from the l2cap_conn core when either we explicitly unregistered + * the l2cap_user object or if the underlying connection is shut down. + * We signal the hidp-session thread to shut down, unregister the HID/input + * devices and unlink the session from the global list. + * This drops the reference to the session that is owned by the global + * session-list. + * Note: We _must_ not synchronosly wait for the session-thread to shut down. + * This is, because the session-thread might be waiting for an HCI lock that is + * held while we are called. Therefore, we only unregister the devices and + * notify the session-thread to terminate. The thread itself owns a reference + * to the session object so it can safely shut down. + */ +static void hidp_session_remove(struct l2cap_conn *conn, + struct l2cap_user *user) +{ + struct hidp_session *session = container_of(user, + struct hidp_session, + user); + + down_write(&hidp_session_sem); + + hidp_session_terminate(session); + + cancel_work_sync(&session->dev_init); + if (session->input || + atomic_read(&session->state) > HIDP_SESSION_PREPARING) + hidp_session_dev_del(session); + + list_del(&session->list); + + up_write(&hidp_session_sem); + + hidp_session_put(session); +} + +/* + * Session Worker + * This performs the actual main-loop of the HIDP worker. We first check + * whether the underlying connection is still alive, then parse all pending + * messages and finally send all outstanding messages. + */ +static void hidp_session_run(struct hidp_session *session) +{ + struct sock *ctrl_sk = session->ctrl_sock->sk; + struct sock *intr_sk = session->intr_sock->sk; + struct sk_buff *skb; + DEFINE_WAIT_FUNC(wait, woken_wake_function); + + add_wait_queue(&hidp_session_wq, &wait); + for (;;) { + /* + * This thread can be woken up two ways: + * - You call hidp_session_terminate() which sets the + * session->terminate flag and wakes this thread up. + * - Via modifying the socket state of ctrl/intr_sock. This + * thread is woken up by ->sk_state_changed(). + */ + + if (atomic_read(&session->terminate)) + break; + + if (ctrl_sk->sk_state != BT_CONNECTED || + intr_sk->sk_state != BT_CONNECTED) + break; + + /* parse incoming intr-skbs */ + while ((skb = skb_dequeue(&intr_sk->sk_receive_queue))) { + skb_orphan(skb); + if (!skb_linearize(skb)) + hidp_recv_intr_frame(session, skb); + else + kfree_skb(skb); + } + + /* send pending intr-skbs */ + hidp_process_transmit(session, &session->intr_transmit, + session->intr_sock); + + /* parse incoming ctrl-skbs */ + while ((skb = skb_dequeue(&ctrl_sk->sk_receive_queue))) { + skb_orphan(skb); + if (!skb_linearize(skb)) + hidp_recv_ctrl_frame(session, skb); + else + kfree_skb(skb); + } + + /* send pending ctrl-skbs */ + hidp_process_transmit(session, &session->ctrl_transmit, + session->ctrl_sock); + + /* + * wait_woken() performs the necessary memory barriers + * for us; see the header comment for this primitive. + */ + wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); + } + remove_wait_queue(&hidp_session_wq, &wait); + + atomic_inc(&session->terminate); +} + +static int hidp_session_wake_function(wait_queue_entry_t *wait, + unsigned int mode, + int sync, void *key) +{ + wake_up_interruptible(&hidp_session_wq); + return false; +} + +/* + * HIDP session thread + * This thread runs the I/O for a single HIDP session. Startup is synchronous + * which allows us to take references to ourself here instead of doing that in + * the caller. + * When we are ready to run we notify the caller and call hidp_session_run(). + */ +static int hidp_session_thread(void *arg) +{ + struct hidp_session *session = arg; + DEFINE_WAIT_FUNC(ctrl_wait, hidp_session_wake_function); + DEFINE_WAIT_FUNC(intr_wait, hidp_session_wake_function); + + BT_DBG("session %p", session); + + /* initialize runtime environment */ + hidp_session_get(session); + __module_get(THIS_MODULE); + set_user_nice(current, -15); + hidp_set_timer(session); + + add_wait_queue(sk_sleep(session->ctrl_sock->sk), &ctrl_wait); + add_wait_queue(sk_sleep(session->intr_sock->sk), &intr_wait); + /* This memory barrier is paired with wq_has_sleeper(). See + * sock_poll_wait() for more information why this is needed. */ + smp_mb__before_atomic(); + + /* notify synchronous startup that we're ready */ + atomic_inc(&session->state); + wake_up(&session->state_queue); + + /* run session */ + hidp_session_run(session); + + /* cleanup runtime environment */ + remove_wait_queue(sk_sleep(session->intr_sock->sk), &intr_wait); + remove_wait_queue(sk_sleep(session->ctrl_sock->sk), &ctrl_wait); + wake_up_interruptible(&session->report_queue); + hidp_del_timer(session); + + /* + * If we stopped ourself due to any internal signal, we should try to + * unregister our own session here to avoid having it linger until the + * parent l2cap_conn dies or user-space cleans it up. + * This does not deadlock as we don't do any synchronous shutdown. + * Instead, this call has the same semantics as if user-space tried to + * delete the session. + */ + l2cap_unregister_user(session->conn, &session->user); + hidp_session_put(session); + + module_put_and_kthread_exit(0); + return 0; +} + +static int hidp_verify_sockets(struct socket *ctrl_sock, + struct socket *intr_sock) +{ + struct l2cap_chan *ctrl_chan, *intr_chan; + struct bt_sock *ctrl, *intr; + struct hidp_session *session; + + if (!l2cap_is_socket(ctrl_sock) || !l2cap_is_socket(intr_sock)) + return -EINVAL; + + ctrl_chan = l2cap_pi(ctrl_sock->sk)->chan; + intr_chan = l2cap_pi(intr_sock->sk)->chan; + + if (bacmp(&ctrl_chan->src, &intr_chan->src) || + bacmp(&ctrl_chan->dst, &intr_chan->dst)) + return -ENOTUNIQ; + + ctrl = bt_sk(ctrl_sock->sk); + intr = bt_sk(intr_sock->sk); + + if (ctrl->sk.sk_state != BT_CONNECTED || + intr->sk.sk_state != BT_CONNECTED) + return -EBADFD; + + /* early session check, we check again during session registration */ + session = hidp_session_find(&ctrl_chan->dst); + if (session) { + hidp_session_put(session); + return -EEXIST; + } + + return 0; +} + +int hidp_connection_add(const struct hidp_connadd_req *req, + struct socket *ctrl_sock, + struct socket *intr_sock) +{ + u32 valid_flags = BIT(HIDP_VIRTUAL_CABLE_UNPLUG) | + BIT(HIDP_BOOT_PROTOCOL_MODE); + struct hidp_session *session; + struct l2cap_conn *conn; + struct l2cap_chan *chan; + int ret; + + ret = hidp_verify_sockets(ctrl_sock, intr_sock); + if (ret) + return ret; + + if (req->flags & ~valid_flags) + return -EINVAL; + + chan = l2cap_pi(ctrl_sock->sk)->chan; + conn = NULL; + l2cap_chan_lock(chan); + if (chan->conn) + conn = l2cap_conn_get(chan->conn); + l2cap_chan_unlock(chan); + + if (!conn) + return -EBADFD; + + ret = hidp_session_new(&session, &chan->dst, ctrl_sock, + intr_sock, req, conn); + if (ret) + goto out_conn; + + ret = l2cap_register_user(conn, &session->user); + if (ret) + goto out_session; + + ret = 0; + +out_session: + hidp_session_put(session); +out_conn: + l2cap_conn_put(conn); + return ret; +} + +int hidp_connection_del(struct hidp_conndel_req *req) +{ + u32 valid_flags = BIT(HIDP_VIRTUAL_CABLE_UNPLUG); + struct hidp_session *session; + + if (req->flags & ~valid_flags) + return -EINVAL; + + session = hidp_session_find(&req->bdaddr); + if (!session) + return -ENOENT; + + if (req->flags & BIT(HIDP_VIRTUAL_CABLE_UNPLUG)) + hidp_send_ctrl_message(session, + HIDP_TRANS_HID_CONTROL | + HIDP_CTRL_VIRTUAL_CABLE_UNPLUG, + NULL, 0); + else + l2cap_unregister_user(session->conn, &session->user); + + hidp_session_put(session); + + return 0; +} + +int hidp_get_connlist(struct hidp_connlist_req *req) +{ + struct hidp_session *session; + int err = 0, n = 0; + + BT_DBG(""); + + down_read(&hidp_session_sem); + + list_for_each_entry(session, &hidp_session_list, list) { + struct hidp_conninfo ci; + + hidp_copy_session(session, &ci); + + if (copy_to_user(req->ci, &ci, sizeof(ci))) { + err = -EFAULT; + break; + } + + if (++n >= req->cnum) + break; + + req->ci++; + } + req->cnum = n; + + up_read(&hidp_session_sem); + return err; +} + +int hidp_get_conninfo(struct hidp_conninfo *ci) +{ + struct hidp_session *session; + + session = hidp_session_find(&ci->bdaddr); + if (session) { + hidp_copy_session(session, ci); + hidp_session_put(session); + } + + return session ? 0 : -ENOENT; +} + +static int __init hidp_init(void) +{ + BT_INFO("HIDP (Human Interface Emulation) ver %s", VERSION); + + return hidp_init_sockets(); +} + +static void __exit hidp_exit(void) +{ + hidp_cleanup_sockets(); +} + +module_init(hidp_init); +module_exit(hidp_exit); + +MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>"); +MODULE_AUTHOR("David Herrmann <dh.herrmann@gmail.com>"); +MODULE_DESCRIPTION("Bluetooth HIDP ver " VERSION); +MODULE_VERSION(VERSION); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("bt-proto-6"); diff --git a/net/bluetooth/hidp/hidp.h b/net/bluetooth/hidp/hidp.h new file mode 100644 index 000000000..6ef88d0a1 --- /dev/null +++ b/net/bluetooth/hidp/hidp.h @@ -0,0 +1,192 @@ +/* + HIDP implementation for Linux Bluetooth stack (BlueZ). + Copyright (C) 2003-2004 Marcel Holtmann <marcel@holtmann.org> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License version 2 as + published by the Free Software Foundation; + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + SOFTWARE IS DISCLAIMED. +*/ + +#ifndef __HIDP_H +#define __HIDP_H + +#include <linux/types.h> +#include <linux/hid.h> +#include <linux/kref.h> +#include <net/bluetooth/bluetooth.h> +#include <net/bluetooth/l2cap.h> + +/* HIDP header masks */ +#define HIDP_HEADER_TRANS_MASK 0xf0 +#define HIDP_HEADER_PARAM_MASK 0x0f + +/* HIDP transaction types */ +#define HIDP_TRANS_HANDSHAKE 0x00 +#define HIDP_TRANS_HID_CONTROL 0x10 +#define HIDP_TRANS_GET_REPORT 0x40 +#define HIDP_TRANS_SET_REPORT 0x50 +#define HIDP_TRANS_GET_PROTOCOL 0x60 +#define HIDP_TRANS_SET_PROTOCOL 0x70 +#define HIDP_TRANS_GET_IDLE 0x80 +#define HIDP_TRANS_SET_IDLE 0x90 +#define HIDP_TRANS_DATA 0xa0 +#define HIDP_TRANS_DATC 0xb0 + +/* HIDP handshake results */ +#define HIDP_HSHK_SUCCESSFUL 0x00 +#define HIDP_HSHK_NOT_READY 0x01 +#define HIDP_HSHK_ERR_INVALID_REPORT_ID 0x02 +#define HIDP_HSHK_ERR_UNSUPPORTED_REQUEST 0x03 +#define HIDP_HSHK_ERR_INVALID_PARAMETER 0x04 +#define HIDP_HSHK_ERR_UNKNOWN 0x0e +#define HIDP_HSHK_ERR_FATAL 0x0f + +/* HIDP control operation parameters */ +#define HIDP_CTRL_NOP 0x00 +#define HIDP_CTRL_HARD_RESET 0x01 +#define HIDP_CTRL_SOFT_RESET 0x02 +#define HIDP_CTRL_SUSPEND 0x03 +#define HIDP_CTRL_EXIT_SUSPEND 0x04 +#define HIDP_CTRL_VIRTUAL_CABLE_UNPLUG 0x05 + +/* HIDP data transaction headers */ +#define HIDP_DATA_RTYPE_MASK 0x03 +#define HIDP_DATA_RSRVD_MASK 0x0c +#define HIDP_DATA_RTYPE_OTHER 0x00 +#define HIDP_DATA_RTYPE_INPUT 0x01 +#define HIDP_DATA_RTYPE_OUPUT 0x02 +#define HIDP_DATA_RTYPE_FEATURE 0x03 + +/* HIDP protocol header parameters */ +#define HIDP_PROTO_BOOT 0x00 +#define HIDP_PROTO_REPORT 0x01 + +/* HIDP ioctl defines */ +#define HIDPCONNADD _IOW('H', 200, int) +#define HIDPCONNDEL _IOW('H', 201, int) +#define HIDPGETCONNLIST _IOR('H', 210, int) +#define HIDPGETCONNINFO _IOR('H', 211, int) + +#define HIDP_VIRTUAL_CABLE_UNPLUG 0 +#define HIDP_BOOT_PROTOCOL_MODE 1 +#define HIDP_BLUETOOTH_VENDOR_ID 9 +#define HIDP_WAITING_FOR_RETURN 10 +#define HIDP_WAITING_FOR_SEND_ACK 11 + +struct hidp_connadd_req { + int ctrl_sock; /* Connected control socket */ + int intr_sock; /* Connected interrupt socket */ + __u16 parser; + __u16 rd_size; + __u8 __user *rd_data; + __u8 country; + __u8 subclass; + __u16 vendor; + __u16 product; + __u16 version; + __u32 flags; + __u32 idle_to; + char name[128]; +}; + +struct hidp_conndel_req { + bdaddr_t bdaddr; + __u32 flags; +}; + +struct hidp_conninfo { + bdaddr_t bdaddr; + __u32 flags; + __u16 state; + __u16 vendor; + __u16 product; + __u16 version; + char name[128]; +}; + +struct hidp_connlist_req { + __u32 cnum; + struct hidp_conninfo __user *ci; +}; + +int hidp_connection_add(const struct hidp_connadd_req *req, struct socket *ctrl_sock, struct socket *intr_sock); +int hidp_connection_del(struct hidp_conndel_req *req); +int hidp_get_connlist(struct hidp_connlist_req *req); +int hidp_get_conninfo(struct hidp_conninfo *ci); + +enum hidp_session_state { + HIDP_SESSION_IDLING, + HIDP_SESSION_PREPARING, + HIDP_SESSION_RUNNING, +}; + +/* HIDP session defines */ +struct hidp_session { + struct list_head list; + struct kref ref; + + /* runtime management */ + atomic_t state; + wait_queue_head_t state_queue; + atomic_t terminate; + struct task_struct *task; + unsigned long flags; + + /* connection management */ + bdaddr_t bdaddr; + struct l2cap_conn *conn; + struct l2cap_user user; + struct socket *ctrl_sock; + struct socket *intr_sock; + struct sk_buff_head ctrl_transmit; + struct sk_buff_head intr_transmit; + uint ctrl_mtu; + uint intr_mtu; + unsigned long idle_to; + + /* device management */ + struct work_struct dev_init; + struct input_dev *input; + struct hid_device *hid; + struct timer_list timer; + + /* Report descriptor */ + __u8 *rd_data; + uint rd_size; + + /* session data */ + unsigned char keys[8]; + unsigned char leds; + + /* Used in hidp_get_raw_report() */ + int waiting_report_type; /* HIDP_DATA_RTYPE_* */ + int waiting_report_number; /* -1 for not numbered */ + struct mutex report_mutex; + struct sk_buff *report_return; + wait_queue_head_t report_queue; + + /* Used in hidp_output_raw_report() */ + int output_report_success; /* boolean */ + + /* temporary input buffer */ + u8 input_buf[HID_MAX_BUFFER_SIZE]; +}; + +/* HIDP init defines */ +int __init hidp_init_sockets(void); +void __exit hidp_cleanup_sockets(void); + +#endif /* __HIDP_H */ diff --git a/net/bluetooth/hidp/sock.c b/net/bluetooth/hidp/sock.c new file mode 100644 index 000000000..369ed92da --- /dev/null +++ b/net/bluetooth/hidp/sock.c @@ -0,0 +1,320 @@ +/* + HIDP implementation for Linux Bluetooth stack (BlueZ). + Copyright (C) 2003-2004 Marcel Holtmann <marcel@holtmann.org> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License version 2 as + published by the Free Software Foundation; + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + SOFTWARE IS DISCLAIMED. +*/ + +#include <linux/compat.h> +#include <linux/export.h> +#include <linux/file.h> + +#include "hidp.h" + +static struct bt_sock_list hidp_sk_list = { + .lock = __RW_LOCK_UNLOCKED(hidp_sk_list.lock) +}; + +static int hidp_sock_release(struct socket *sock) +{ + struct sock *sk = sock->sk; + + BT_DBG("sock %p sk %p", sock, sk); + + if (!sk) + return 0; + + bt_sock_unlink(&hidp_sk_list, sk); + + sock_orphan(sk); + sock_put(sk); + + return 0; +} + +static int do_hidp_sock_ioctl(struct socket *sock, unsigned int cmd, void __user *argp) +{ + struct hidp_connadd_req ca; + struct hidp_conndel_req cd; + struct hidp_connlist_req cl; + struct hidp_conninfo ci; + struct socket *csock; + struct socket *isock; + int err; + + BT_DBG("cmd %x arg %p", cmd, argp); + + switch (cmd) { + case HIDPCONNADD: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if (copy_from_user(&ca, argp, sizeof(ca))) + return -EFAULT; + + csock = sockfd_lookup(ca.ctrl_sock, &err); + if (!csock) + return err; + + isock = sockfd_lookup(ca.intr_sock, &err); + if (!isock) { + sockfd_put(csock); + return err; + } + ca.name[sizeof(ca.name)-1] = 0; + + err = hidp_connection_add(&ca, csock, isock); + if (!err && copy_to_user(argp, &ca, sizeof(ca))) + err = -EFAULT; + + sockfd_put(csock); + sockfd_put(isock); + + return err; + + case HIDPCONNDEL: + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if (copy_from_user(&cd, argp, sizeof(cd))) + return -EFAULT; + + return hidp_connection_del(&cd); + + case HIDPGETCONNLIST: + if (copy_from_user(&cl, argp, sizeof(cl))) + return -EFAULT; + + if (cl.cnum <= 0) + return -EINVAL; + + err = hidp_get_connlist(&cl); + if (!err && copy_to_user(argp, &cl, sizeof(cl))) + return -EFAULT; + + return err; + + case HIDPGETCONNINFO: + if (copy_from_user(&ci, argp, sizeof(ci))) + return -EFAULT; + + err = hidp_get_conninfo(&ci); + if (!err && copy_to_user(argp, &ci, sizeof(ci))) + return -EFAULT; + + return err; + } + + return -EINVAL; +} + +static int hidp_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) +{ + return do_hidp_sock_ioctl(sock, cmd, (void __user *)arg); +} + +#ifdef CONFIG_COMPAT +struct compat_hidp_connadd_req { + int ctrl_sock; /* Connected control socket */ + int intr_sock; /* Connected interrupt socket */ + __u16 parser; + __u16 rd_size; + compat_uptr_t rd_data; + __u8 country; + __u8 subclass; + __u16 vendor; + __u16 product; + __u16 version; + __u32 flags; + __u32 idle_to; + char name[128]; +}; + +static int hidp_sock_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) +{ + void __user *argp = compat_ptr(arg); + int err; + + if (cmd == HIDPGETCONNLIST) { + struct hidp_connlist_req cl; + u32 __user *p = argp; + u32 uci; + + if (get_user(cl.cnum, p) || get_user(uci, p + 1)) + return -EFAULT; + + cl.ci = compat_ptr(uci); + + if (cl.cnum <= 0) + return -EINVAL; + + err = hidp_get_connlist(&cl); + + if (!err && put_user(cl.cnum, p)) + err = -EFAULT; + + return err; + } else if (cmd == HIDPCONNADD) { + struct compat_hidp_connadd_req ca32; + struct hidp_connadd_req ca; + struct socket *csock; + struct socket *isock; + + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + + if (copy_from_user(&ca32, (void __user *) arg, sizeof(ca32))) + return -EFAULT; + + ca.ctrl_sock = ca32.ctrl_sock; + ca.intr_sock = ca32.intr_sock; + ca.parser = ca32.parser; + ca.rd_size = ca32.rd_size; + ca.rd_data = compat_ptr(ca32.rd_data); + ca.country = ca32.country; + ca.subclass = ca32.subclass; + ca.vendor = ca32.vendor; + ca.product = ca32.product; + ca.version = ca32.version; + ca.flags = ca32.flags; + ca.idle_to = ca32.idle_to; + ca32.name[sizeof(ca32.name) - 1] = '\0'; + memcpy(ca.name, ca32.name, 128); + + csock = sockfd_lookup(ca.ctrl_sock, &err); + if (!csock) + return err; + + isock = sockfd_lookup(ca.intr_sock, &err); + if (!isock) { + sockfd_put(csock); + return err; + } + + err = hidp_connection_add(&ca, csock, isock); + if (!err && copy_to_user(argp, &ca32, sizeof(ca32))) + err = -EFAULT; + + sockfd_put(csock); + sockfd_put(isock); + + return err; + } + + return hidp_sock_ioctl(sock, cmd, arg); +} +#endif + +static const struct proto_ops hidp_sock_ops = { + .family = PF_BLUETOOTH, + .owner = THIS_MODULE, + .release = hidp_sock_release, + .ioctl = hidp_sock_ioctl, +#ifdef CONFIG_COMPAT + .compat_ioctl = hidp_sock_compat_ioctl, +#endif + .bind = sock_no_bind, + .getname = sock_no_getname, + .sendmsg = sock_no_sendmsg, + .recvmsg = sock_no_recvmsg, + .listen = sock_no_listen, + .shutdown = sock_no_shutdown, + .connect = sock_no_connect, + .socketpair = sock_no_socketpair, + .accept = sock_no_accept, + .mmap = sock_no_mmap +}; + +static struct proto hidp_proto = { + .name = "HIDP", + .owner = THIS_MODULE, + .obj_size = sizeof(struct bt_sock) +}; + +static int hidp_sock_create(struct net *net, struct socket *sock, int protocol, + int kern) +{ + struct sock *sk; + + BT_DBG("sock %p", sock); + + if (sock->type != SOCK_RAW) + return -ESOCKTNOSUPPORT; + + sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hidp_proto, kern); + if (!sk) + return -ENOMEM; + + sock_init_data(sock, sk); + + sock->ops = &hidp_sock_ops; + + sock->state = SS_UNCONNECTED; + + sock_reset_flag(sk, SOCK_ZAPPED); + + sk->sk_protocol = protocol; + sk->sk_state = BT_OPEN; + + bt_sock_link(&hidp_sk_list, sk); + + return 0; +} + +static const struct net_proto_family hidp_sock_family_ops = { + .family = PF_BLUETOOTH, + .owner = THIS_MODULE, + .create = hidp_sock_create +}; + +int __init hidp_init_sockets(void) +{ + int err; + + err = proto_register(&hidp_proto, 0); + if (err < 0) + return err; + + err = bt_sock_register(BTPROTO_HIDP, &hidp_sock_family_ops); + if (err < 0) { + BT_ERR("Can't register HIDP socket"); + goto error; + } + + err = bt_procfs_init(&init_net, "hidp", &hidp_sk_list, NULL); + if (err < 0) { + BT_ERR("Failed to create HIDP proc file"); + bt_sock_unregister(BTPROTO_HIDP); + goto error; + } + + BT_INFO("HIDP socket layer initialized"); + + return 0; + +error: + proto_unregister(&hidp_proto); + return err; +} + +void __exit hidp_cleanup_sockets(void) +{ + bt_procfs_cleanup(&init_net, "hidp"); + bt_sock_unregister(BTPROTO_HIDP); + proto_unregister(&hidp_proto); +} diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c new file mode 100644 index 000000000..91e990acc --- /dev/null +++ b/net/bluetooth/iso.c @@ -0,0 +1,1884 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * BlueZ - Bluetooth protocol stack for Linux + * + * Copyright (C) 2022 Intel Corporation + */ + +#include <linux/module.h> +#include <linux/debugfs.h> +#include <linux/seq_file.h> +#include <linux/sched/signal.h> + +#include <net/bluetooth/bluetooth.h> +#include <net/bluetooth/hci_core.h> +#include <net/bluetooth/iso.h> + +static const struct proto_ops iso_sock_ops; + +static struct bt_sock_list iso_sk_list = { + .lock = __RW_LOCK_UNLOCKED(iso_sk_list.lock) +}; + +/* ---- ISO connections ---- */ +struct iso_conn { + struct hci_conn *hcon; + + /* @lock: spinlock protecting changes to iso_conn fields */ + spinlock_t lock; + struct sock *sk; + + struct delayed_work timeout_work; + + struct sk_buff *rx_skb; + __u32 rx_len; + __u16 tx_sn; +}; + +#define iso_conn_lock(c) spin_lock(&(c)->lock) +#define iso_conn_unlock(c) spin_unlock(&(c)->lock) + +static void iso_sock_close(struct sock *sk); +static void iso_sock_kill(struct sock *sk); + +/* ----- ISO socket info ----- */ +#define iso_pi(sk) ((struct iso_pinfo *)sk) + +#define EIR_SERVICE_DATA_LENGTH 4 +#define BASE_MAX_LENGTH (HCI_MAX_PER_AD_LENGTH - EIR_SERVICE_DATA_LENGTH) + +struct iso_pinfo { + struct bt_sock bt; + bdaddr_t src; + __u8 src_type; + bdaddr_t dst; + __u8 dst_type; + __u8 bc_sid; + __u8 bc_num_bis; + __u8 bc_bis[ISO_MAX_NUM_BIS]; + __u16 sync_handle; + __u32 flags; + struct bt_iso_qos qos; + __u8 base_len; + __u8 base[BASE_MAX_LENGTH]; + struct iso_conn *conn; +}; + +/* ---- ISO timers ---- */ +#define ISO_CONN_TIMEOUT (HZ * 40) +#define ISO_DISCONN_TIMEOUT (HZ * 2) + +static void iso_sock_timeout(struct work_struct *work) +{ + struct iso_conn *conn = container_of(work, struct iso_conn, + timeout_work.work); + struct sock *sk; + + iso_conn_lock(conn); + sk = conn->sk; + if (sk) + sock_hold(sk); + iso_conn_unlock(conn); + + if (!sk) + return; + + BT_DBG("sock %p state %d", sk, sk->sk_state); + + lock_sock(sk); + sk->sk_err = ETIMEDOUT; + sk->sk_state_change(sk); + release_sock(sk); + sock_put(sk); +} + +static void iso_sock_set_timer(struct sock *sk, long timeout) +{ + if (!iso_pi(sk)->conn) + return; + + BT_DBG("sock %p state %d timeout %ld", sk, sk->sk_state, timeout); + cancel_delayed_work(&iso_pi(sk)->conn->timeout_work); + schedule_delayed_work(&iso_pi(sk)->conn->timeout_work, timeout); +} + +static void iso_sock_clear_timer(struct sock *sk) +{ + if (!iso_pi(sk)->conn) + return; + + BT_DBG("sock %p state %d", sk, sk->sk_state); + cancel_delayed_work(&iso_pi(sk)->conn->timeout_work); +} + +/* ---- ISO connections ---- */ +static struct iso_conn *iso_conn_add(struct hci_conn *hcon) +{ + struct iso_conn *conn = hcon->iso_data; + + if (conn) { + if (!conn->hcon) + conn->hcon = hcon; + return conn; + } + + conn = kzalloc(sizeof(*conn), GFP_KERNEL); + if (!conn) + return NULL; + + spin_lock_init(&conn->lock); + INIT_DELAYED_WORK(&conn->timeout_work, iso_sock_timeout); + + hcon->iso_data = conn; + conn->hcon = hcon; + conn->tx_sn = 0; + + BT_DBG("hcon %p conn %p", hcon, conn); + + return conn; +} + +/* Delete channel. Must be called on the locked socket. */ +static void iso_chan_del(struct sock *sk, int err) +{ + struct iso_conn *conn; + struct sock *parent; + + conn = iso_pi(sk)->conn; + + BT_DBG("sk %p, conn %p, err %d", sk, conn, err); + + if (conn) { + iso_conn_lock(conn); + conn->sk = NULL; + iso_pi(sk)->conn = NULL; + iso_conn_unlock(conn); + + if (conn->hcon) + hci_conn_drop(conn->hcon); + } + + sk->sk_state = BT_CLOSED; + sk->sk_err = err; + + parent = bt_sk(sk)->parent; + if (parent) { + bt_accept_unlink(sk); + parent->sk_data_ready(parent); + } else { + sk->sk_state_change(sk); + } + + sock_set_flag(sk, SOCK_ZAPPED); +} + +static void iso_conn_del(struct hci_conn *hcon, int err) +{ + struct iso_conn *conn = hcon->iso_data; + struct sock *sk; + + if (!conn) + return; + + BT_DBG("hcon %p conn %p, err %d", hcon, conn, err); + + /* Kill socket */ + iso_conn_lock(conn); + sk = conn->sk; + if (sk) + sock_hold(sk); + iso_conn_unlock(conn); + + if (sk) { + lock_sock(sk); + iso_sock_clear_timer(sk); + iso_chan_del(sk, err); + release_sock(sk); + sock_put(sk); + } + + /* Ensure no more work items will run before freeing conn. */ + cancel_delayed_work_sync(&conn->timeout_work); + + hcon->iso_data = NULL; + kfree(conn); +} + +static int __iso_chan_add(struct iso_conn *conn, struct sock *sk, + struct sock *parent) +{ + BT_DBG("conn %p", conn); + + if (iso_pi(sk)->conn == conn && conn->sk == sk) + return 0; + + if (conn->sk) { + BT_ERR("conn->sk already set"); + return -EBUSY; + } + + iso_pi(sk)->conn = conn; + conn->sk = sk; + + if (parent) + bt_accept_enqueue(parent, sk, true); + + return 0; +} + +static int iso_chan_add(struct iso_conn *conn, struct sock *sk, + struct sock *parent) +{ + int err; + + iso_conn_lock(conn); + err = __iso_chan_add(conn, sk, parent); + iso_conn_unlock(conn); + + return err; +} + +static inline u8 le_addr_type(u8 bdaddr_type) +{ + if (bdaddr_type == BDADDR_LE_PUBLIC) + return ADDR_LE_DEV_PUBLIC; + else + return ADDR_LE_DEV_RANDOM; +} + +static int iso_connect_bis(struct sock *sk) +{ + struct iso_conn *conn; + struct hci_conn *hcon; + struct hci_dev *hdev; + int err; + + BT_DBG("%pMR", &iso_pi(sk)->src); + + hdev = hci_get_route(&iso_pi(sk)->dst, &iso_pi(sk)->src, + iso_pi(sk)->src_type); + if (!hdev) + return -EHOSTUNREACH; + + hci_dev_lock(hdev); + + if (!bis_capable(hdev)) { + err = -EOPNOTSUPP; + goto unlock; + } + + /* Fail if out PHYs are marked as disabled */ + if (!iso_pi(sk)->qos.out.phy) { + err = -EINVAL; + goto unlock; + } + + hcon = hci_connect_bis(hdev, &iso_pi(sk)->dst, iso_pi(sk)->dst_type, + &iso_pi(sk)->qos, iso_pi(sk)->base_len, + iso_pi(sk)->base); + if (IS_ERR(hcon)) { + err = PTR_ERR(hcon); + goto unlock; + } + + conn = iso_conn_add(hcon); + if (!conn) { + hci_conn_drop(hcon); + err = -ENOMEM; + goto unlock; + } + + lock_sock(sk); + + err = iso_chan_add(conn, sk, NULL); + if (err) { + release_sock(sk); + goto unlock; + } + + /* Update source addr of the socket */ + bacpy(&iso_pi(sk)->src, &hcon->src); + + if (hcon->state == BT_CONNECTED) { + iso_sock_clear_timer(sk); + sk->sk_state = BT_CONNECTED; + } else { + sk->sk_state = BT_CONNECT; + iso_sock_set_timer(sk, sk->sk_sndtimeo); + } + + release_sock(sk); + +unlock: + hci_dev_unlock(hdev); + hci_dev_put(hdev); + return err; +} + +static int iso_connect_cis(struct sock *sk) +{ + struct iso_conn *conn; + struct hci_conn *hcon; + struct hci_dev *hdev; + int err; + + BT_DBG("%pMR -> %pMR", &iso_pi(sk)->src, &iso_pi(sk)->dst); + + hdev = hci_get_route(&iso_pi(sk)->dst, &iso_pi(sk)->src, + iso_pi(sk)->src_type); + if (!hdev) + return -EHOSTUNREACH; + + hci_dev_lock(hdev); + + if (!cis_central_capable(hdev)) { + err = -EOPNOTSUPP; + goto unlock; + } + + /* Fail if either PHYs are marked as disabled */ + if (!iso_pi(sk)->qos.in.phy && !iso_pi(sk)->qos.out.phy) { + err = -EINVAL; + goto unlock; + } + + /* Just bind if DEFER_SETUP has been set */ + if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) { + hcon = hci_bind_cis(hdev, &iso_pi(sk)->dst, + le_addr_type(iso_pi(sk)->dst_type), + &iso_pi(sk)->qos); + if (IS_ERR(hcon)) { + err = PTR_ERR(hcon); + goto unlock; + } + } else { + hcon = hci_connect_cis(hdev, &iso_pi(sk)->dst, + le_addr_type(iso_pi(sk)->dst_type), + &iso_pi(sk)->qos); + if (IS_ERR(hcon)) { + err = PTR_ERR(hcon); + goto unlock; + } + } + + conn = iso_conn_add(hcon); + if (!conn) { + hci_conn_drop(hcon); + err = -ENOMEM; + goto unlock; + } + + lock_sock(sk); + + err = iso_chan_add(conn, sk, NULL); + if (err) { + release_sock(sk); + goto unlock; + } + + /* Update source addr of the socket */ + bacpy(&iso_pi(sk)->src, &hcon->src); + + if (hcon->state == BT_CONNECTED) { + iso_sock_clear_timer(sk); + sk->sk_state = BT_CONNECTED; + } else if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) { + iso_sock_clear_timer(sk); + sk->sk_state = BT_CONNECT; + } else { + sk->sk_state = BT_CONNECT; + iso_sock_set_timer(sk, sk->sk_sndtimeo); + } + + release_sock(sk); + +unlock: + hci_dev_unlock(hdev); + hci_dev_put(hdev); + return err; +} + +static struct bt_iso_qos *iso_sock_get_qos(struct sock *sk) +{ + if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONNECT2) + return &iso_pi(sk)->conn->hcon->iso_qos; + + return &iso_pi(sk)->qos; +} + +static int iso_send_frame(struct sock *sk, struct sk_buff *skb) +{ + struct iso_conn *conn = iso_pi(sk)->conn; + struct bt_iso_qos *qos = iso_sock_get_qos(sk); + struct hci_iso_data_hdr *hdr; + int len = 0; + + BT_DBG("sk %p len %d", sk, skb->len); + + if (skb->len > qos->out.sdu) + return -EMSGSIZE; + + len = skb->len; + + /* Push ISO data header */ + hdr = skb_push(skb, HCI_ISO_DATA_HDR_SIZE); + hdr->sn = cpu_to_le16(conn->tx_sn++); + hdr->slen = cpu_to_le16(hci_iso_data_len_pack(len, + HCI_ISO_STATUS_VALID)); + + if (sk->sk_state == BT_CONNECTED) + hci_send_iso(conn->hcon, skb); + else + len = -ENOTCONN; + + return len; +} + +static void iso_recv_frame(struct iso_conn *conn, struct sk_buff *skb) +{ + struct sock *sk; + + iso_conn_lock(conn); + sk = conn->sk; + iso_conn_unlock(conn); + + if (!sk) + goto drop; + + BT_DBG("sk %p len %d", sk, skb->len); + + if (sk->sk_state != BT_CONNECTED) + goto drop; + + if (!sock_queue_rcv_skb(sk, skb)) + return; + +drop: + kfree_skb(skb); +} + +/* -------- Socket interface ---------- */ +static struct sock *__iso_get_sock_listen_by_addr(bdaddr_t *src, bdaddr_t *dst) +{ + struct sock *sk; + + sk_for_each(sk, &iso_sk_list.head) { + if (sk->sk_state != BT_LISTEN) + continue; + + if (bacmp(&iso_pi(sk)->dst, dst)) + continue; + + if (!bacmp(&iso_pi(sk)->src, src)) + return sk; + } + + return NULL; +} + +static struct sock *__iso_get_sock_listen_by_sid(bdaddr_t *ba, bdaddr_t *bc, + __u8 sid) +{ + struct sock *sk; + + sk_for_each(sk, &iso_sk_list.head) { + if (sk->sk_state != BT_LISTEN) + continue; + + if (bacmp(&iso_pi(sk)->src, ba)) + continue; + + if (bacmp(&iso_pi(sk)->dst, bc)) + continue; + + if (iso_pi(sk)->bc_sid == sid) + return sk; + } + + return NULL; +} + +typedef bool (*iso_sock_match_t)(struct sock *sk, void *data); + +/* Find socket listening: + * source bdaddr (Unicast) + * destination bdaddr (Broadcast only) + * match func - pass NULL to ignore + * match func data - pass -1 to ignore + * Returns closest match. + */ +static struct sock *iso_get_sock_listen(bdaddr_t *src, bdaddr_t *dst, + iso_sock_match_t match, void *data) +{ + struct sock *sk = NULL, *sk1 = NULL; + + read_lock(&iso_sk_list.lock); + + sk_for_each(sk, &iso_sk_list.head) { + if (sk->sk_state != BT_LISTEN) + continue; + + /* Match Broadcast destination */ + if (bacmp(dst, BDADDR_ANY) && bacmp(&iso_pi(sk)->dst, dst)) + continue; + + /* Use Match function if provided */ + if (match && !match(sk, data)) + continue; + + /* Exact match. */ + if (!bacmp(&iso_pi(sk)->src, src)) + break; + + /* Closest match */ + if (!bacmp(&iso_pi(sk)->src, BDADDR_ANY)) + sk1 = sk; + } + + read_unlock(&iso_sk_list.lock); + + return sk ? sk : sk1; +} + +static void iso_sock_destruct(struct sock *sk) +{ + BT_DBG("sk %p", sk); + + skb_queue_purge(&sk->sk_receive_queue); + skb_queue_purge(&sk->sk_write_queue); +} + +static void iso_sock_cleanup_listen(struct sock *parent) +{ + struct sock *sk; + + BT_DBG("parent %p", parent); + + /* Close not yet accepted channels */ + while ((sk = bt_accept_dequeue(parent, NULL))) { + iso_sock_close(sk); + iso_sock_kill(sk); + } + + parent->sk_state = BT_CLOSED; + sock_set_flag(parent, SOCK_ZAPPED); +} + +/* Kill socket (only if zapped and orphan) + * Must be called on unlocked socket. + */ +static void iso_sock_kill(struct sock *sk) +{ + if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket || + sock_flag(sk, SOCK_DEAD)) + return; + + BT_DBG("sk %p state %d", sk, sk->sk_state); + + /* Kill poor orphan */ + bt_sock_unlink(&iso_sk_list, sk); + sock_set_flag(sk, SOCK_DEAD); + sock_put(sk); +} + +static void iso_conn_defer_reject(struct hci_conn *conn) +{ + struct hci_cp_le_reject_cis cp; + + BT_DBG("conn %p", conn); + + memset(&cp, 0, sizeof(cp)); + cp.handle = cpu_to_le16(conn->handle); + cp.reason = HCI_ERROR_REJ_BAD_ADDR; + hci_send_cmd(conn->hdev, HCI_OP_LE_REJECT_CIS, sizeof(cp), &cp); +} + +static void __iso_sock_close(struct sock *sk) +{ + BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket); + + switch (sk->sk_state) { + case BT_LISTEN: + iso_sock_cleanup_listen(sk); + break; + + case BT_CONNECTED: + case BT_CONFIG: + if (iso_pi(sk)->conn->hcon) { + sk->sk_state = BT_DISCONN; + iso_sock_set_timer(sk, ISO_DISCONN_TIMEOUT); + iso_conn_lock(iso_pi(sk)->conn); + hci_conn_drop(iso_pi(sk)->conn->hcon); + iso_pi(sk)->conn->hcon = NULL; + iso_conn_unlock(iso_pi(sk)->conn); + } else { + iso_chan_del(sk, ECONNRESET); + } + break; + + case BT_CONNECT2: + if (iso_pi(sk)->conn->hcon) + iso_conn_defer_reject(iso_pi(sk)->conn->hcon); + iso_chan_del(sk, ECONNRESET); + break; + case BT_CONNECT: + /* In case of DEFER_SETUP the hcon would be bound to CIG which + * needs to be removed so just call hci_conn_del so the cleanup + * callback do what is needed. + */ + if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags) && + iso_pi(sk)->conn->hcon) { + hci_conn_del(iso_pi(sk)->conn->hcon); + iso_pi(sk)->conn->hcon = NULL; + } + + iso_chan_del(sk, ECONNRESET); + break; + case BT_DISCONN: + iso_chan_del(sk, ECONNRESET); + break; + + default: + sock_set_flag(sk, SOCK_ZAPPED); + break; + } +} + +/* Must be called on unlocked socket. */ +static void iso_sock_close(struct sock *sk) +{ + iso_sock_clear_timer(sk); + lock_sock(sk); + __iso_sock_close(sk); + release_sock(sk); + iso_sock_kill(sk); +} + +static void iso_sock_init(struct sock *sk, struct sock *parent) +{ + BT_DBG("sk %p", sk); + + if (parent) { + sk->sk_type = parent->sk_type; + bt_sk(sk)->flags = bt_sk(parent)->flags; + security_sk_clone(parent, sk); + } +} + +static struct proto iso_proto = { + .name = "ISO", + .owner = THIS_MODULE, + .obj_size = sizeof(struct iso_pinfo) +}; + +#define DEFAULT_IO_QOS \ +{ \ + .interval = 10000u, \ + .latency = 10u, \ + .sdu = 40u, \ + .phy = BT_ISO_PHY_2M, \ + .rtn = 2u, \ +} + +static struct bt_iso_qos default_qos = { + .cig = BT_ISO_QOS_CIG_UNSET, + .cis = BT_ISO_QOS_CIS_UNSET, + .sca = 0x00, + .packing = 0x00, + .framing = 0x00, + .in = DEFAULT_IO_QOS, + .out = DEFAULT_IO_QOS, +}; + +static struct sock *iso_sock_alloc(struct net *net, struct socket *sock, + int proto, gfp_t prio, int kern) +{ + struct sock *sk; + + sk = sk_alloc(net, PF_BLUETOOTH, prio, &iso_proto, kern); + if (!sk) + return NULL; + + sock_init_data(sock, sk); + INIT_LIST_HEAD(&bt_sk(sk)->accept_q); + + sk->sk_destruct = iso_sock_destruct; + sk->sk_sndtimeo = ISO_CONN_TIMEOUT; + + sock_reset_flag(sk, SOCK_ZAPPED); + + sk->sk_protocol = proto; + sk->sk_state = BT_OPEN; + + /* Set address type as public as default src address is BDADDR_ANY */ + iso_pi(sk)->src_type = BDADDR_LE_PUBLIC; + + iso_pi(sk)->qos = default_qos; + + bt_sock_link(&iso_sk_list, sk); + return sk; +} + +static int iso_sock_create(struct net *net, struct socket *sock, int protocol, + int kern) +{ + struct sock *sk; + + BT_DBG("sock %p", sock); + + sock->state = SS_UNCONNECTED; + + if (sock->type != SOCK_SEQPACKET) + return -ESOCKTNOSUPPORT; + + sock->ops = &iso_sock_ops; + + sk = iso_sock_alloc(net, sock, protocol, GFP_ATOMIC, kern); + if (!sk) + return -ENOMEM; + + iso_sock_init(sk, NULL); + return 0; +} + +static int iso_sock_bind_bc(struct socket *sock, struct sockaddr *addr, + int addr_len) +{ + struct sockaddr_iso *sa = (struct sockaddr_iso *)addr; + struct sock *sk = sock->sk; + int i; + + BT_DBG("sk %p bc_sid %u bc_num_bis %u", sk, sa->iso_bc->bc_sid, + sa->iso_bc->bc_num_bis); + + if (addr_len > sizeof(*sa) + sizeof(*sa->iso_bc) || + sa->iso_bc->bc_num_bis < 0x01 || sa->iso_bc->bc_num_bis > 0x1f) + return -EINVAL; + + bacpy(&iso_pi(sk)->dst, &sa->iso_bc->bc_bdaddr); + iso_pi(sk)->dst_type = sa->iso_bc->bc_bdaddr_type; + iso_pi(sk)->sync_handle = -1; + iso_pi(sk)->bc_sid = sa->iso_bc->bc_sid; + iso_pi(sk)->bc_num_bis = sa->iso_bc->bc_num_bis; + + for (i = 0; i < iso_pi(sk)->bc_num_bis; i++) { + if (sa->iso_bc->bc_bis[i] < 0x01 || + sa->iso_bc->bc_bis[i] > 0x1f) + return -EINVAL; + + memcpy(iso_pi(sk)->bc_bis, sa->iso_bc->bc_bis, + iso_pi(sk)->bc_num_bis); + } + + return 0; +} + +static int iso_sock_bind(struct socket *sock, struct sockaddr *addr, + int addr_len) +{ + struct sockaddr_iso *sa = (struct sockaddr_iso *)addr; + struct sock *sk = sock->sk; + int err = 0; + + BT_DBG("sk %p %pMR type %u", sk, &sa->iso_bdaddr, sa->iso_bdaddr_type); + + if (!addr || addr_len < sizeof(struct sockaddr_iso) || + addr->sa_family != AF_BLUETOOTH) + return -EINVAL; + + lock_sock(sk); + + if (sk->sk_state != BT_OPEN) { + err = -EBADFD; + goto done; + } + + if (sk->sk_type != SOCK_SEQPACKET) { + err = -EINVAL; + goto done; + } + + /* Check if the address type is of LE type */ + if (!bdaddr_type_is_le(sa->iso_bdaddr_type)) { + err = -EINVAL; + goto done; + } + + bacpy(&iso_pi(sk)->src, &sa->iso_bdaddr); + iso_pi(sk)->src_type = sa->iso_bdaddr_type; + + /* Check for Broadcast address */ + if (addr_len > sizeof(*sa)) { + err = iso_sock_bind_bc(sock, addr, addr_len); + if (err) + goto done; + } + + sk->sk_state = BT_BOUND; + +done: + release_sock(sk); + return err; +} + +static int iso_sock_connect(struct socket *sock, struct sockaddr *addr, + int alen, int flags) +{ + struct sockaddr_iso *sa = (struct sockaddr_iso *)addr; + struct sock *sk = sock->sk; + int err; + + BT_DBG("sk %p", sk); + + if (alen < sizeof(struct sockaddr_iso) || + addr->sa_family != AF_BLUETOOTH) + return -EINVAL; + + if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND) + return -EBADFD; + + if (sk->sk_type != SOCK_SEQPACKET) + return -EINVAL; + + /* Check if the address type is of LE type */ + if (!bdaddr_type_is_le(sa->iso_bdaddr_type)) + return -EINVAL; + + lock_sock(sk); + + bacpy(&iso_pi(sk)->dst, &sa->iso_bdaddr); + iso_pi(sk)->dst_type = sa->iso_bdaddr_type; + + release_sock(sk); + + if (bacmp(&iso_pi(sk)->dst, BDADDR_ANY)) + err = iso_connect_cis(sk); + else + err = iso_connect_bis(sk); + + if (err) + return err; + + lock_sock(sk); + + if (!test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) { + err = bt_sock_wait_state(sk, BT_CONNECTED, + sock_sndtimeo(sk, flags & O_NONBLOCK)); + } + + release_sock(sk); + return err; +} + +static int iso_listen_bis(struct sock *sk) +{ + struct hci_dev *hdev; + int err = 0; + + BT_DBG("%pMR -> %pMR (SID 0x%2.2x)", &iso_pi(sk)->src, + &iso_pi(sk)->dst, iso_pi(sk)->bc_sid); + + write_lock(&iso_sk_list.lock); + + if (__iso_get_sock_listen_by_sid(&iso_pi(sk)->src, &iso_pi(sk)->dst, + iso_pi(sk)->bc_sid)) + err = -EADDRINUSE; + + write_unlock(&iso_sk_list.lock); + + if (err) + return err; + + hdev = hci_get_route(&iso_pi(sk)->dst, &iso_pi(sk)->src, + iso_pi(sk)->src_type); + if (!hdev) + return -EHOSTUNREACH; + + hci_dev_lock(hdev); + + err = hci_pa_create_sync(hdev, &iso_pi(sk)->dst, iso_pi(sk)->dst_type, + iso_pi(sk)->bc_sid); + + hci_dev_unlock(hdev); + hci_dev_put(hdev); + + return err; +} + +static int iso_listen_cis(struct sock *sk) +{ + int err = 0; + + BT_DBG("%pMR", &iso_pi(sk)->src); + + write_lock(&iso_sk_list.lock); + + if (__iso_get_sock_listen_by_addr(&iso_pi(sk)->src, &iso_pi(sk)->dst)) + err = -EADDRINUSE; + + write_unlock(&iso_sk_list.lock); + + return err; +} + +static int iso_sock_listen(struct socket *sock, int backlog) +{ + struct sock *sk = sock->sk; + int err = 0; + + BT_DBG("sk %p backlog %d", sk, backlog); + + lock_sock(sk); + + if (sk->sk_state != BT_BOUND) { + err = -EBADFD; + goto done; + } + + if (sk->sk_type != SOCK_SEQPACKET) { + err = -EINVAL; + goto done; + } + + if (!bacmp(&iso_pi(sk)->dst, BDADDR_ANY)) + err = iso_listen_cis(sk); + else + err = iso_listen_bis(sk); + + if (err) + goto done; + + sk->sk_max_ack_backlog = backlog; + sk->sk_ack_backlog = 0; + + sk->sk_state = BT_LISTEN; + +done: + release_sock(sk); + return err; +} + +static int iso_sock_accept(struct socket *sock, struct socket *newsock, + int flags, bool kern) +{ + DEFINE_WAIT_FUNC(wait, woken_wake_function); + struct sock *sk = sock->sk, *ch; + long timeo; + int err = 0; + + lock_sock(sk); + + timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); + + BT_DBG("sk %p timeo %ld", sk, timeo); + + /* Wait for an incoming connection. (wake-one). */ + add_wait_queue_exclusive(sk_sleep(sk), &wait); + while (1) { + if (sk->sk_state != BT_LISTEN) { + err = -EBADFD; + break; + } + + ch = bt_accept_dequeue(sk, newsock); + if (ch) + break; + + if (!timeo) { + err = -EAGAIN; + break; + } + + if (signal_pending(current)) { + err = sock_intr_errno(timeo); + break; + } + + release_sock(sk); + + timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo); + lock_sock(sk); + } + remove_wait_queue(sk_sleep(sk), &wait); + + if (err) + goto done; + + newsock->state = SS_CONNECTED; + + BT_DBG("new socket %p", ch); + +done: + release_sock(sk); + return err; +} + +static int iso_sock_getname(struct socket *sock, struct sockaddr *addr, + int peer) +{ + struct sockaddr_iso *sa = (struct sockaddr_iso *)addr; + struct sock *sk = sock->sk; + + BT_DBG("sock %p, sk %p", sock, sk); + + addr->sa_family = AF_BLUETOOTH; + + if (peer) { + bacpy(&sa->iso_bdaddr, &iso_pi(sk)->dst); + sa->iso_bdaddr_type = iso_pi(sk)->dst_type; + } else { + bacpy(&sa->iso_bdaddr, &iso_pi(sk)->src); + sa->iso_bdaddr_type = iso_pi(sk)->src_type; + } + + return sizeof(struct sockaddr_iso); +} + +static int iso_sock_sendmsg(struct socket *sock, struct msghdr *msg, + size_t len) +{ + struct sock *sk = sock->sk; + struct sk_buff *skb, **frag; + size_t mtu; + int err; + + BT_DBG("sock %p, sk %p", sock, sk); + + err = sock_error(sk); + if (err) + return err; + + if (msg->msg_flags & MSG_OOB) + return -EOPNOTSUPP; + + lock_sock(sk); + + if (sk->sk_state != BT_CONNECTED) { + release_sock(sk); + return -ENOTCONN; + } + + mtu = iso_pi(sk)->conn->hcon->hdev->iso_mtu; + + release_sock(sk); + + skb = bt_skb_sendmsg(sk, msg, len, mtu, HCI_ISO_DATA_HDR_SIZE, 0); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + len -= skb->len; + + BT_DBG("skb %p len %d", sk, skb->len); + + /* Continuation fragments */ + frag = &skb_shinfo(skb)->frag_list; + while (len) { + struct sk_buff *tmp; + + tmp = bt_skb_sendmsg(sk, msg, len, mtu, 0, 0); + if (IS_ERR(tmp)) { + kfree_skb(skb); + return PTR_ERR(tmp); + } + + *frag = tmp; + + len -= tmp->len; + + skb->len += tmp->len; + skb->data_len += tmp->len; + + BT_DBG("frag %p len %d", *frag, tmp->len); + + frag = &(*frag)->next; + } + + lock_sock(sk); + + if (sk->sk_state == BT_CONNECTED) + err = iso_send_frame(sk, skb); + else + err = -ENOTCONN; + + release_sock(sk); + + if (err < 0) + kfree_skb(skb); + return err; +} + +static void iso_conn_defer_accept(struct hci_conn *conn) +{ + struct hci_cp_le_accept_cis cp; + struct hci_dev *hdev = conn->hdev; + + BT_DBG("conn %p", conn); + + conn->state = BT_CONFIG; + + cp.handle = cpu_to_le16(conn->handle); + + hci_send_cmd(hdev, HCI_OP_LE_ACCEPT_CIS, sizeof(cp), &cp); +} + +static int iso_sock_recvmsg(struct socket *sock, struct msghdr *msg, + size_t len, int flags) +{ + struct sock *sk = sock->sk; + struct iso_pinfo *pi = iso_pi(sk); + + BT_DBG("sk %p", sk); + + if (test_and_clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) { + lock_sock(sk); + switch (sk->sk_state) { + case BT_CONNECT2: + iso_conn_defer_accept(pi->conn->hcon); + sk->sk_state = BT_CONFIG; + release_sock(sk); + return 0; + case BT_CONNECT: + release_sock(sk); + return iso_connect_cis(sk); + default: + release_sock(sk); + break; + } + } + + return bt_sock_recvmsg(sock, msg, len, flags); +} + +static bool check_io_qos(struct bt_iso_io_qos *qos) +{ + /* If no PHY is enable SDU must be 0 */ + if (!qos->phy && qos->sdu) + return false; + + if (qos->interval && (qos->interval < 0xff || qos->interval > 0xfffff)) + return false; + + if (qos->latency && (qos->latency < 0x05 || qos->latency > 0xfa0)) + return false; + + if (qos->phy > BT_ISO_PHY_ANY) + return false; + + return true; +} + +static bool check_qos(struct bt_iso_qos *qos) +{ + if (qos->sca > 0x07) + return false; + + if (qos->packing > 0x01) + return false; + + if (qos->framing > 0x01) + return false; + + if (!check_io_qos(&qos->in)) + return false; + + if (!check_io_qos(&qos->out)) + return false; + + return true; +} + +static int iso_sock_setsockopt(struct socket *sock, int level, int optname, + sockptr_t optval, unsigned int optlen) +{ + struct sock *sk = sock->sk; + int len, err = 0; + struct bt_iso_qos qos; + u32 opt; + + BT_DBG("sk %p", sk); + + lock_sock(sk); + + switch (optname) { + case BT_DEFER_SETUP: + if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) { + err = -EINVAL; + break; + } + + if (copy_from_sockptr(&opt, optval, sizeof(u32))) { + err = -EFAULT; + break; + } + + if (opt) + set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags); + else + clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags); + break; + + case BT_ISO_QOS: + if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND && + sk->sk_state != BT_CONNECT2) { + err = -EINVAL; + break; + } + + len = min_t(unsigned int, sizeof(qos), optlen); + if (len != sizeof(qos)) { + err = -EINVAL; + break; + } + + memset(&qos, 0, sizeof(qos)); + + if (copy_from_sockptr(&qos, optval, len)) { + err = -EFAULT; + break; + } + + if (!check_qos(&qos)) { + err = -EINVAL; + break; + } + + iso_pi(sk)->qos = qos; + + break; + + case BT_ISO_BASE: + if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND && + sk->sk_state != BT_CONNECT2) { + err = -EINVAL; + break; + } + + if (optlen > sizeof(iso_pi(sk)->base)) { + err = -EOVERFLOW; + break; + } + + len = min_t(unsigned int, sizeof(iso_pi(sk)->base), optlen); + + if (copy_from_sockptr(iso_pi(sk)->base, optval, len)) { + err = -EFAULT; + break; + } + + iso_pi(sk)->base_len = len; + + break; + + default: + err = -ENOPROTOOPT; + break; + } + + release_sock(sk); + return err; +} + +static int iso_sock_getsockopt(struct socket *sock, int level, int optname, + char __user *optval, int __user *optlen) +{ + struct sock *sk = sock->sk; + int len, err = 0; + struct bt_iso_qos *qos; + u8 base_len; + u8 *base; + + BT_DBG("sk %p", sk); + + if (get_user(len, optlen)) + return -EFAULT; + + lock_sock(sk); + + switch (optname) { + case BT_DEFER_SETUP: + if (sk->sk_state == BT_CONNECTED) { + err = -EINVAL; + break; + } + + if (put_user(test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags), + (u32 __user *)optval)) + err = -EFAULT; + + break; + + case BT_ISO_QOS: + qos = iso_sock_get_qos(sk); + + len = min_t(unsigned int, len, sizeof(*qos)); + if (copy_to_user(optval, qos, len)) + err = -EFAULT; + + break; + + case BT_ISO_BASE: + if (sk->sk_state == BT_CONNECTED) { + base_len = iso_pi(sk)->conn->hcon->le_per_adv_data_len; + base = iso_pi(sk)->conn->hcon->le_per_adv_data; + } else { + base_len = iso_pi(sk)->base_len; + base = iso_pi(sk)->base; + } + + len = min_t(unsigned int, len, base_len); + if (copy_to_user(optval, base, len)) + err = -EFAULT; + + break; + + default: + err = -ENOPROTOOPT; + break; + } + + release_sock(sk); + return err; +} + +static int iso_sock_shutdown(struct socket *sock, int how) +{ + struct sock *sk = sock->sk; + int err = 0; + + BT_DBG("sock %p, sk %p, how %d", sock, sk, how); + + if (!sk) + return 0; + + sock_hold(sk); + lock_sock(sk); + + switch (how) { + case SHUT_RD: + if (sk->sk_shutdown & RCV_SHUTDOWN) + goto unlock; + sk->sk_shutdown |= RCV_SHUTDOWN; + break; + case SHUT_WR: + if (sk->sk_shutdown & SEND_SHUTDOWN) + goto unlock; + sk->sk_shutdown |= SEND_SHUTDOWN; + break; + case SHUT_RDWR: + if (sk->sk_shutdown & SHUTDOWN_MASK) + goto unlock; + sk->sk_shutdown |= SHUTDOWN_MASK; + break; + } + + iso_sock_clear_timer(sk); + __iso_sock_close(sk); + + if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime && + !(current->flags & PF_EXITING)) + err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime); + +unlock: + release_sock(sk); + sock_put(sk); + + return err; +} + +static int iso_sock_release(struct socket *sock) +{ + struct sock *sk = sock->sk; + int err = 0; + + BT_DBG("sock %p, sk %p", sock, sk); + + if (!sk) + return 0; + + iso_sock_close(sk); + + if (sock_flag(sk, SOCK_LINGER) && READ_ONCE(sk->sk_lingertime) && + !(current->flags & PF_EXITING)) { + lock_sock(sk); + err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime); + release_sock(sk); + } + + sock_orphan(sk); + iso_sock_kill(sk); + return err; +} + +static void iso_sock_ready(struct sock *sk) +{ + BT_DBG("sk %p", sk); + + if (!sk) + return; + + lock_sock(sk); + iso_sock_clear_timer(sk); + sk->sk_state = BT_CONNECTED; + sk->sk_state_change(sk); + release_sock(sk); +} + +struct iso_list_data { + struct hci_conn *hcon; + int count; +}; + +static bool iso_match_big(struct sock *sk, void *data) +{ + struct hci_evt_le_big_sync_estabilished *ev = data; + + return ev->handle == iso_pi(sk)->qos.big; +} + +static void iso_conn_ready(struct iso_conn *conn) +{ + struct sock *parent; + struct sock *sk = conn->sk; + struct hci_ev_le_big_sync_estabilished *ev; + struct hci_conn *hcon; + + BT_DBG("conn %p", conn); + + if (sk) { + iso_sock_ready(conn->sk); + } else { + hcon = conn->hcon; + if (!hcon) + return; + + ev = hci_recv_event_data(hcon->hdev, + HCI_EVT_LE_BIG_SYNC_ESTABILISHED); + if (ev) + parent = iso_get_sock_listen(&hcon->src, + &hcon->dst, + iso_match_big, ev); + else + parent = iso_get_sock_listen(&hcon->src, + BDADDR_ANY, NULL, NULL); + + if (!parent) + return; + + lock_sock(parent); + + sk = iso_sock_alloc(sock_net(parent), NULL, + BTPROTO_ISO, GFP_ATOMIC, 0); + if (!sk) { + release_sock(parent); + return; + } + + iso_sock_init(sk, parent); + + bacpy(&iso_pi(sk)->src, &hcon->src); + iso_pi(sk)->src_type = hcon->src_type; + + /* If hcon has no destination address (BDADDR_ANY) it means it + * was created by HCI_EV_LE_BIG_SYNC_ESTABILISHED so we need to + * initialize using the parent socket destination address. + */ + if (!bacmp(&hcon->dst, BDADDR_ANY)) { + bacpy(&hcon->dst, &iso_pi(parent)->dst); + hcon->dst_type = iso_pi(parent)->dst_type; + hcon->sync_handle = iso_pi(parent)->sync_handle; + } + + bacpy(&iso_pi(sk)->dst, &hcon->dst); + iso_pi(sk)->dst_type = hcon->dst_type; + + hci_conn_hold(hcon); + iso_chan_add(conn, sk, parent); + + if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags)) + sk->sk_state = BT_CONNECT2; + else + sk->sk_state = BT_CONNECTED; + + /* Wake up parent */ + parent->sk_data_ready(parent); + + release_sock(parent); + } +} + +static bool iso_match_sid(struct sock *sk, void *data) +{ + struct hci_ev_le_pa_sync_established *ev = data; + + return ev->sid == iso_pi(sk)->bc_sid; +} + +static bool iso_match_sync_handle(struct sock *sk, void *data) +{ + struct hci_evt_le_big_info_adv_report *ev = data; + + return le16_to_cpu(ev->sync_handle) == iso_pi(sk)->sync_handle; +} + +/* ----- ISO interface with lower layer (HCI) ----- */ + +int iso_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags) +{ + struct hci_ev_le_pa_sync_established *ev1; + struct hci_evt_le_big_info_adv_report *ev2; + struct sock *sk; + int lm = 0; + + bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr); + + /* Broadcast receiver requires handling of some events before it can + * proceed to establishing a BIG sync: + * + * 1. HCI_EV_LE_PA_SYNC_ESTABLISHED: The socket may specify a specific + * SID to listen to and once sync is estabilished its handle needs to + * be stored in iso_pi(sk)->sync_handle so it can be matched once + * receiving the BIG Info. + * 2. HCI_EVT_LE_BIG_INFO_ADV_REPORT: When connect_ind is triggered by a + * a BIG Info it attempts to check if there any listening socket with + * the same sync_handle and if it does then attempt to create a sync. + */ + ev1 = hci_recv_event_data(hdev, HCI_EV_LE_PA_SYNC_ESTABLISHED); + if (ev1) { + sk = iso_get_sock_listen(&hdev->bdaddr, bdaddr, iso_match_sid, + ev1); + if (sk) + iso_pi(sk)->sync_handle = le16_to_cpu(ev1->handle); + + goto done; + } + + ev2 = hci_recv_event_data(hdev, HCI_EVT_LE_BIG_INFO_ADV_REPORT); + if (ev2) { + sk = iso_get_sock_listen(&hdev->bdaddr, bdaddr, + iso_match_sync_handle, ev2); + if (sk) { + int err; + + if (ev2->num_bis < iso_pi(sk)->bc_num_bis) + iso_pi(sk)->bc_num_bis = ev2->num_bis; + + err = hci_le_big_create_sync(hdev, + &iso_pi(sk)->qos, + iso_pi(sk)->sync_handle, + iso_pi(sk)->bc_num_bis, + iso_pi(sk)->bc_bis); + if (err) { + bt_dev_err(hdev, "hci_le_big_create_sync: %d", + err); + sk = NULL; + } + } + } else { + sk = iso_get_sock_listen(&hdev->bdaddr, BDADDR_ANY, NULL, NULL); + } + +done: + if (!sk) + return lm; + + lm |= HCI_LM_ACCEPT; + + if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) + *flags |= HCI_PROTO_DEFER; + + return lm; +} + +static void iso_connect_cfm(struct hci_conn *hcon, __u8 status) +{ + if (hcon->type != ISO_LINK) { + if (hcon->type != LE_LINK) + return; + + /* Check if LE link has failed */ + if (status) { + if (hcon->link) + iso_conn_del(hcon->link, bt_to_errno(status)); + return; + } + + /* Create CIS if pending */ + hci_le_create_cis(hcon); + return; + } + + BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status); + + if (!status) { + struct iso_conn *conn; + + conn = iso_conn_add(hcon); + if (conn) + iso_conn_ready(conn); + } else { + iso_conn_del(hcon, bt_to_errno(status)); + } +} + +static void iso_disconn_cfm(struct hci_conn *hcon, __u8 reason) +{ + if (hcon->type != ISO_LINK) + return; + + BT_DBG("hcon %p reason %d", hcon, reason); + + iso_conn_del(hcon, bt_to_errno(reason)); +} + +void iso_recv(struct hci_conn *hcon, struct sk_buff *skb, u16 flags) +{ + struct iso_conn *conn = hcon->iso_data; + __u16 pb, ts, len; + + if (!conn) + goto drop; + + pb = hci_iso_flags_pb(flags); + ts = hci_iso_flags_ts(flags); + + BT_DBG("conn %p len %d pb 0x%x ts 0x%x", conn, skb->len, pb, ts); + + switch (pb) { + case ISO_START: + case ISO_SINGLE: + if (conn->rx_len) { + BT_ERR("Unexpected start frame (len %d)", skb->len); + kfree_skb(conn->rx_skb); + conn->rx_skb = NULL; + conn->rx_len = 0; + } + + if (ts) { + struct hci_iso_ts_data_hdr *hdr; + + /* TODO: add timestamp to the packet? */ + hdr = skb_pull_data(skb, HCI_ISO_TS_DATA_HDR_SIZE); + if (!hdr) { + BT_ERR("Frame is too short (len %d)", skb->len); + goto drop; + } + + len = __le16_to_cpu(hdr->slen); + } else { + struct hci_iso_data_hdr *hdr; + + hdr = skb_pull_data(skb, HCI_ISO_DATA_HDR_SIZE); + if (!hdr) { + BT_ERR("Frame is too short (len %d)", skb->len); + goto drop; + } + + len = __le16_to_cpu(hdr->slen); + } + + flags = hci_iso_data_flags(len); + len = hci_iso_data_len(len); + + BT_DBG("Start: total len %d, frag len %d flags 0x%4.4x", len, + skb->len, flags); + + if (len == skb->len) { + /* Complete frame received */ + iso_recv_frame(conn, skb); + return; + } + + if (pb == ISO_SINGLE) { + BT_ERR("Frame malformed (len %d, expected len %d)", + skb->len, len); + goto drop; + } + + if (skb->len > len) { + BT_ERR("Frame is too long (len %d, expected len %d)", + skb->len, len); + goto drop; + } + + /* Allocate skb for the complete frame (with header) */ + conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL); + if (!conn->rx_skb) + goto drop; + + skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len), + skb->len); + conn->rx_len = len - skb->len; + break; + + case ISO_CONT: + BT_DBG("Cont: frag len %d (expecting %d)", skb->len, + conn->rx_len); + + if (!conn->rx_len) { + BT_ERR("Unexpected continuation frame (len %d)", + skb->len); + goto drop; + } + + if (skb->len > conn->rx_len) { + BT_ERR("Fragment is too long (len %d, expected %d)", + skb->len, conn->rx_len); + kfree_skb(conn->rx_skb); + conn->rx_skb = NULL; + conn->rx_len = 0; + goto drop; + } + + skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len), + skb->len); + conn->rx_len -= skb->len; + return; + + case ISO_END: + skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len), + skb->len); + conn->rx_len -= skb->len; + + if (!conn->rx_len) { + struct sk_buff *rx_skb = conn->rx_skb; + + /* Complete frame received. iso_recv_frame + * takes ownership of the skb so set the global + * rx_skb pointer to NULL first. + */ + conn->rx_skb = NULL; + iso_recv_frame(conn, rx_skb); + } + break; + } + +drop: + kfree_skb(skb); +} + +static struct hci_cb iso_cb = { + .name = "ISO", + .connect_cfm = iso_connect_cfm, + .disconn_cfm = iso_disconn_cfm, +}; + +static int iso_debugfs_show(struct seq_file *f, void *p) +{ + struct sock *sk; + + read_lock(&iso_sk_list.lock); + + sk_for_each(sk, &iso_sk_list.head) { + seq_printf(f, "%pMR %pMR %d\n", &iso_pi(sk)->src, + &iso_pi(sk)->dst, sk->sk_state); + } + + read_unlock(&iso_sk_list.lock); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(iso_debugfs); + +static struct dentry *iso_debugfs; + +static const struct proto_ops iso_sock_ops = { + .family = PF_BLUETOOTH, + .owner = THIS_MODULE, + .release = iso_sock_release, + .bind = iso_sock_bind, + .connect = iso_sock_connect, + .listen = iso_sock_listen, + .accept = iso_sock_accept, + .getname = iso_sock_getname, + .sendmsg = iso_sock_sendmsg, + .recvmsg = iso_sock_recvmsg, + .poll = bt_sock_poll, + .ioctl = bt_sock_ioctl, + .mmap = sock_no_mmap, + .socketpair = sock_no_socketpair, + .shutdown = iso_sock_shutdown, + .setsockopt = iso_sock_setsockopt, + .getsockopt = iso_sock_getsockopt +}; + +static const struct net_proto_family iso_sock_family_ops = { + .family = PF_BLUETOOTH, + .owner = THIS_MODULE, + .create = iso_sock_create, +}; + +static bool iso_inited; + +bool iso_enabled(void) +{ + return iso_inited; +} + +int iso_init(void) +{ + int err; + + BUILD_BUG_ON(sizeof(struct sockaddr_iso) > sizeof(struct sockaddr)); + + if (iso_inited) + return -EALREADY; + + err = proto_register(&iso_proto, 0); + if (err < 0) + return err; + + err = bt_sock_register(BTPROTO_ISO, &iso_sock_family_ops); + if (err < 0) { + BT_ERR("ISO socket registration failed"); + goto error; + } + + err = bt_procfs_init(&init_net, "iso", &iso_sk_list, NULL); + if (err < 0) { + BT_ERR("Failed to create ISO proc file"); + bt_sock_unregister(BTPROTO_ISO); + goto error; + } + + BT_INFO("ISO socket layer initialized"); + + hci_register_cb(&iso_cb); + + if (IS_ERR_OR_NULL(bt_debugfs)) + return 0; + + if (!iso_debugfs) { + iso_debugfs = debugfs_create_file("iso", 0444, bt_debugfs, + NULL, &iso_debugfs_fops); + } + + iso_inited = true; + + return 0; + +error: + proto_unregister(&iso_proto); + return err; +} + +int iso_exit(void) +{ + if (!iso_inited) + return -EALREADY; + + bt_procfs_cleanup(&init_net, "iso"); + + debugfs_remove(iso_debugfs); + iso_debugfs = NULL; + + hci_unregister_cb(&iso_cb); + + bt_sock_unregister(BTPROTO_ISO); + + proto_unregister(&iso_proto); + + iso_inited = false; + + return 0; +} diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c new file mode 100644 index 000000000..4c5793053 --- /dev/null +++ b/net/bluetooth/l2cap_core.c @@ -0,0 +1,8657 @@ +/* + BlueZ - Bluetooth protocol stack for Linux + Copyright (C) 2000-2001 Qualcomm Incorporated + Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org> + Copyright (C) 2010 Google Inc. + Copyright (C) 2011 ProFUSION Embedded Systems + Copyright (c) 2012 Code Aurora Forum. All rights reserved. + + Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License version 2 as + published by the Free Software Foundation; + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + SOFTWARE IS DISCLAIMED. +*/ + +/* Bluetooth L2CAP core. */ + +#include <linux/module.h> + +#include <linux/debugfs.h> +#include <linux/crc16.h> +#include <linux/filter.h> + +#include <net/bluetooth/bluetooth.h> +#include <net/bluetooth/hci_core.h> +#include <net/bluetooth/l2cap.h> + +#include "smp.h" +#include "a2mp.h" +#include "amp.h" + +#define LE_FLOWCTL_MAX_CREDITS 65535 + +bool disable_ertm; +bool enable_ecred; + +static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD; + +static LIST_HEAD(chan_list); +static DEFINE_RWLOCK(chan_list_lock); + +static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, + u8 code, u8 ident, u16 dlen, void *data); +static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, + void *data); +static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size); +static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err); + +static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control, + struct sk_buff_head *skbs, u8 event); +static void l2cap_retrans_timeout(struct work_struct *work); +static void l2cap_monitor_timeout(struct work_struct *work); +static void l2cap_ack_timeout(struct work_struct *work); + +static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type) +{ + if (link_type == LE_LINK) { + if (bdaddr_type == ADDR_LE_DEV_PUBLIC) + return BDADDR_LE_PUBLIC; + else + return BDADDR_LE_RANDOM; + } + + return BDADDR_BREDR; +} + +static inline u8 bdaddr_src_type(struct hci_conn *hcon) +{ + return bdaddr_type(hcon->type, hcon->src_type); +} + +static inline u8 bdaddr_dst_type(struct hci_conn *hcon) +{ + return bdaddr_type(hcon->type, hcon->dst_type); +} + +/* ---- L2CAP channels ---- */ + +static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, + u16 cid) +{ + struct l2cap_chan *c; + + list_for_each_entry(c, &conn->chan_l, list) { + if (c->dcid == cid) + return c; + } + return NULL; +} + +static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, + u16 cid) +{ + struct l2cap_chan *c; + + list_for_each_entry(c, &conn->chan_l, list) { + if (c->scid == cid) + return c; + } + return NULL; +} + +/* Find channel with given SCID. + * Returns a reference locked channel. + */ +static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, + u16 cid) +{ + struct l2cap_chan *c; + + mutex_lock(&conn->chan_lock); + c = __l2cap_get_chan_by_scid(conn, cid); + if (c) { + /* Only lock if chan reference is not 0 */ + c = l2cap_chan_hold_unless_zero(c); + if (c) + l2cap_chan_lock(c); + } + mutex_unlock(&conn->chan_lock); + + return c; +} + +/* Find channel with given DCID. + * Returns a reference locked channel. + */ +static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn, + u16 cid) +{ + struct l2cap_chan *c; + + mutex_lock(&conn->chan_lock); + c = __l2cap_get_chan_by_dcid(conn, cid); + if (c) { + /* Only lock if chan reference is not 0 */ + c = l2cap_chan_hold_unless_zero(c); + if (c) + l2cap_chan_lock(c); + } + mutex_unlock(&conn->chan_lock); + + return c; +} + +static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, + u8 ident) +{ + struct l2cap_chan *c; + + list_for_each_entry(c, &conn->chan_l, list) { + if (c->ident == ident) + return c; + } + return NULL; +} + +static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, + u8 ident) +{ + struct l2cap_chan *c; + + mutex_lock(&conn->chan_lock); + c = __l2cap_get_chan_by_ident(conn, ident); + if (c) { + /* Only lock if chan reference is not 0 */ + c = l2cap_chan_hold_unless_zero(c); + if (c) + l2cap_chan_lock(c); + } + mutex_unlock(&conn->chan_lock); + + return c; +} + +static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src, + u8 src_type) +{ + struct l2cap_chan *c; + + list_for_each_entry(c, &chan_list, global_l) { + if (src_type == BDADDR_BREDR && c->src_type != BDADDR_BREDR) + continue; + + if (src_type != BDADDR_BREDR && c->src_type == BDADDR_BREDR) + continue; + + if (c->sport == psm && !bacmp(&c->src, src)) + return c; + } + return NULL; +} + +int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm) +{ + int err; + + write_lock(&chan_list_lock); + + if (psm && __l2cap_global_chan_by_addr(psm, src, chan->src_type)) { + err = -EADDRINUSE; + goto done; + } + + if (psm) { + chan->psm = psm; + chan->sport = psm; + err = 0; + } else { + u16 p, start, end, incr; + + if (chan->src_type == BDADDR_BREDR) { + start = L2CAP_PSM_DYN_START; + end = L2CAP_PSM_AUTO_END; + incr = 2; + } else { + start = L2CAP_PSM_LE_DYN_START; + end = L2CAP_PSM_LE_DYN_END; + incr = 1; + } + + err = -EINVAL; + for (p = start; p <= end; p += incr) + if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src, + chan->src_type)) { + chan->psm = cpu_to_le16(p); + chan->sport = cpu_to_le16(p); + err = 0; + break; + } + } + +done: + write_unlock(&chan_list_lock); + return err; +} +EXPORT_SYMBOL_GPL(l2cap_add_psm); + +int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid) +{ + write_lock(&chan_list_lock); + + /* Override the defaults (which are for conn-oriented) */ + chan->omtu = L2CAP_DEFAULT_MTU; + chan->chan_type = L2CAP_CHAN_FIXED; + + chan->scid = scid; + + write_unlock(&chan_list_lock); + + return 0; +} + +static u16 l2cap_alloc_cid(struct l2cap_conn *conn) +{ + u16 cid, dyn_end; + + if (conn->hcon->type == LE_LINK) + dyn_end = L2CAP_CID_LE_DYN_END; + else + dyn_end = L2CAP_CID_DYN_END; + + for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) { + if (!__l2cap_get_chan_by_scid(conn, cid)) + return cid; + } + + return 0; +} + +static void l2cap_state_change(struct l2cap_chan *chan, int state) +{ + BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state), + state_to_string(state)); + + chan->state = state; + chan->ops->state_change(chan, state, 0); +} + +static inline void l2cap_state_change_and_error(struct l2cap_chan *chan, + int state, int err) +{ + chan->state = state; + chan->ops->state_change(chan, chan->state, err); +} + +static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err) +{ + chan->ops->state_change(chan, chan->state, err); +} + +static void __set_retrans_timer(struct l2cap_chan *chan) +{ + if (!delayed_work_pending(&chan->monitor_timer) && + chan->retrans_timeout) { + l2cap_set_timer(chan, &chan->retrans_timer, + msecs_to_jiffies(chan->retrans_timeout)); + } +} + +static void __set_monitor_timer(struct l2cap_chan *chan) +{ + __clear_retrans_timer(chan); + if (chan->monitor_timeout) { + l2cap_set_timer(chan, &chan->monitor_timer, + msecs_to_jiffies(chan->monitor_timeout)); + } +} + +static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head, + u16 seq) +{ + struct sk_buff *skb; + + skb_queue_walk(head, skb) { + if (bt_cb(skb)->l2cap.txseq == seq) + return skb; + } + + return NULL; +} + +/* ---- L2CAP sequence number lists ---- */ + +/* For ERTM, ordered lists of sequence numbers must be tracked for + * SREJ requests that are received and for frames that are to be + * retransmitted. These seq_list functions implement a singly-linked + * list in an array, where membership in the list can also be checked + * in constant time. Items can also be added to the tail of the list + * and removed from the head in constant time, without further memory + * allocs or frees. + */ + +static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size) +{ + size_t alloc_size, i; + + /* Allocated size is a power of 2 to map sequence numbers + * (which may be up to 14 bits) in to a smaller array that is + * sized for the negotiated ERTM transmit windows. + */ + alloc_size = roundup_pow_of_two(size); + + seq_list->list = kmalloc_array(alloc_size, sizeof(u16), GFP_KERNEL); + if (!seq_list->list) + return -ENOMEM; + + seq_list->mask = alloc_size - 1; + seq_list->head = L2CAP_SEQ_LIST_CLEAR; + seq_list->tail = L2CAP_SEQ_LIST_CLEAR; + for (i = 0; i < alloc_size; i++) + seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR; + + return 0; +} + +static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list) +{ + kfree(seq_list->list); +} + +static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list, + u16 seq) +{ + /* Constant-time check for list membership */ + return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR; +} + +static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list) +{ + u16 seq = seq_list->head; + u16 mask = seq_list->mask; + + seq_list->head = seq_list->list[seq & mask]; + seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR; + + if (seq_list->head == L2CAP_SEQ_LIST_TAIL) { + seq_list->head = L2CAP_SEQ_LIST_CLEAR; + seq_list->tail = L2CAP_SEQ_LIST_CLEAR; + } + + return seq; +} + +static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list) +{ + u16 i; + + if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) + return; + + for (i = 0; i <= seq_list->mask; i++) + seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR; + + seq_list->head = L2CAP_SEQ_LIST_CLEAR; + seq_list->tail = L2CAP_SEQ_LIST_CLEAR; +} + +static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq) +{ + u16 mask = seq_list->mask; + + /* All appends happen in constant time */ + + if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR) + return; + + if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR) + seq_list->head = seq; + else + seq_list->list[seq_list->tail & mask] = seq; + + seq_list->tail = seq; + seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL; +} + +static void l2cap_chan_timeout(struct work_struct *work) +{ + struct l2cap_chan *chan = container_of(work, struct l2cap_chan, + chan_timer.work); + struct l2cap_conn *conn = chan->conn; + int reason; + + BT_DBG("chan %p state %s", chan, state_to_string(chan->state)); + + mutex_lock(&conn->chan_lock); + /* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling + * this work. No need to call l2cap_chan_hold(chan) here again. + */ + l2cap_chan_lock(chan); + + if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG) + reason = ECONNREFUSED; + else if (chan->state == BT_CONNECT && + chan->sec_level != BT_SECURITY_SDP) + reason = ECONNREFUSED; + else + reason = ETIMEDOUT; + + l2cap_chan_close(chan, reason); + + chan->ops->close(chan); + + l2cap_chan_unlock(chan); + l2cap_chan_put(chan); + + mutex_unlock(&conn->chan_lock); +} + +struct l2cap_chan *l2cap_chan_create(void) +{ + struct l2cap_chan *chan; + + chan = kzalloc(sizeof(*chan), GFP_ATOMIC); + if (!chan) + return NULL; + + skb_queue_head_init(&chan->tx_q); + skb_queue_head_init(&chan->srej_q); + mutex_init(&chan->lock); + + /* Set default lock nesting level */ + atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL); + + write_lock(&chan_list_lock); + list_add(&chan->global_l, &chan_list); + write_unlock(&chan_list_lock); + + INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout); + INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout); + INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout); + INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout); + + chan->state = BT_OPEN; + + kref_init(&chan->kref); + + /* This flag is cleared in l2cap_chan_ready() */ + set_bit(CONF_NOT_COMPLETE, &chan->conf_state); + + BT_DBG("chan %p", chan); + + return chan; +} +EXPORT_SYMBOL_GPL(l2cap_chan_create); + +static void l2cap_chan_destroy(struct kref *kref) +{ + struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref); + + BT_DBG("chan %p", chan); + + write_lock(&chan_list_lock); + list_del(&chan->global_l); + write_unlock(&chan_list_lock); + + kfree(chan); +} + +void l2cap_chan_hold(struct l2cap_chan *c) +{ + BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref)); + + kref_get(&c->kref); +} + +struct l2cap_chan *l2cap_chan_hold_unless_zero(struct l2cap_chan *c) +{ + BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref)); + + if (!kref_get_unless_zero(&c->kref)) + return NULL; + + return c; +} + +void l2cap_chan_put(struct l2cap_chan *c) +{ + BT_DBG("chan %p orig refcnt %u", c, kref_read(&c->kref)); + + kref_put(&c->kref, l2cap_chan_destroy); +} +EXPORT_SYMBOL_GPL(l2cap_chan_put); + +void l2cap_chan_set_defaults(struct l2cap_chan *chan) +{ + chan->fcs = L2CAP_FCS_CRC16; + chan->max_tx = L2CAP_DEFAULT_MAX_TX; + chan->tx_win = L2CAP_DEFAULT_TX_WINDOW; + chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW; + chan->remote_max_tx = chan->max_tx; + chan->remote_tx_win = chan->tx_win; + chan->ack_win = L2CAP_DEFAULT_TX_WINDOW; + chan->sec_level = BT_SECURITY_LOW; + chan->flush_to = L2CAP_DEFAULT_FLUSH_TO; + chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO; + chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO; + + chan->conf_state = 0; + set_bit(CONF_NOT_COMPLETE, &chan->conf_state); + + set_bit(FLAG_FORCE_ACTIVE, &chan->flags); +} +EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults); + +static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits) +{ + chan->sdu = NULL; + chan->sdu_last_frag = NULL; + chan->sdu_len = 0; + chan->tx_credits = tx_credits; + /* Derive MPS from connection MTU to stop HCI fragmentation */ + chan->mps = min_t(u16, chan->imtu, chan->conn->mtu - L2CAP_HDR_SIZE); + /* Give enough credits for a full packet */ + chan->rx_credits = (chan->imtu / chan->mps) + 1; + + skb_queue_head_init(&chan->tx_q); +} + +static void l2cap_ecred_init(struct l2cap_chan *chan, u16 tx_credits) +{ + l2cap_le_flowctl_init(chan, tx_credits); + + /* L2CAP implementations shall support a minimum MPS of 64 octets */ + if (chan->mps < L2CAP_ECRED_MIN_MPS) { + chan->mps = L2CAP_ECRED_MIN_MPS; + chan->rx_credits = (chan->imtu / chan->mps) + 1; + } +} + +void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) +{ + BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn, + __le16_to_cpu(chan->psm), chan->dcid); + + conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM; + + chan->conn = conn; + + switch (chan->chan_type) { + case L2CAP_CHAN_CONN_ORIENTED: + /* Alloc CID for connection-oriented socket */ + chan->scid = l2cap_alloc_cid(conn); + if (conn->hcon->type == ACL_LINK) + chan->omtu = L2CAP_DEFAULT_MTU; + break; + + case L2CAP_CHAN_CONN_LESS: + /* Connectionless socket */ + chan->scid = L2CAP_CID_CONN_LESS; + chan->dcid = L2CAP_CID_CONN_LESS; + chan->omtu = L2CAP_DEFAULT_MTU; + break; + + case L2CAP_CHAN_FIXED: + /* Caller will set CID and CID specific MTU values */ + break; + + default: + /* Raw socket can send/recv signalling messages only */ + chan->scid = L2CAP_CID_SIGNALING; + chan->dcid = L2CAP_CID_SIGNALING; + chan->omtu = L2CAP_DEFAULT_MTU; + } + + chan->local_id = L2CAP_BESTEFFORT_ID; + chan->local_stype = L2CAP_SERV_BESTEFFORT; + chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE; + chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME; + chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT; + chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO; + + l2cap_chan_hold(chan); + + /* Only keep a reference for fixed channels if they requested it */ + if (chan->chan_type != L2CAP_CHAN_FIXED || + test_bit(FLAG_HOLD_HCI_CONN, &chan->flags)) + hci_conn_hold(conn->hcon); + + list_add(&chan->list, &conn->chan_l); +} + +void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan) +{ + mutex_lock(&conn->chan_lock); + __l2cap_chan_add(conn, chan); + mutex_unlock(&conn->chan_lock); +} + +void l2cap_chan_del(struct l2cap_chan *chan, int err) +{ + struct l2cap_conn *conn = chan->conn; + + __clear_chan_timer(chan); + + BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err, + state_to_string(chan->state)); + + chan->ops->teardown(chan, err); + + if (conn) { + struct amp_mgr *mgr = conn->hcon->amp_mgr; + /* Delete from channel list */ + list_del(&chan->list); + + l2cap_chan_put(chan); + + chan->conn = NULL; + + /* Reference was only held for non-fixed channels or + * fixed channels that explicitly requested it using the + * FLAG_HOLD_HCI_CONN flag. + */ + if (chan->chan_type != L2CAP_CHAN_FIXED || + test_bit(FLAG_HOLD_HCI_CONN, &chan->flags)) + hci_conn_drop(conn->hcon); + + if (mgr && mgr->bredr_chan == chan) + mgr->bredr_chan = NULL; + } + + if (chan->hs_hchan) { + struct hci_chan *hs_hchan = chan->hs_hchan; + + BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan); + amp_disconnect_logical_link(hs_hchan); + } + + if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state)) + return; + + switch (chan->mode) { + case L2CAP_MODE_BASIC: + break; + + case L2CAP_MODE_LE_FLOWCTL: + case L2CAP_MODE_EXT_FLOWCTL: + skb_queue_purge(&chan->tx_q); + break; + + case L2CAP_MODE_ERTM: + __clear_retrans_timer(chan); + __clear_monitor_timer(chan); + __clear_ack_timer(chan); + + skb_queue_purge(&chan->srej_q); + + l2cap_seq_list_free(&chan->srej_list); + l2cap_seq_list_free(&chan->retrans_list); + fallthrough; + + case L2CAP_MODE_STREAMING: + skb_queue_purge(&chan->tx_q); + break; + } +} +EXPORT_SYMBOL_GPL(l2cap_chan_del); + +static void __l2cap_chan_list_id(struct l2cap_conn *conn, u16 id, + l2cap_chan_func_t func, void *data) +{ + struct l2cap_chan *chan, *l; + + list_for_each_entry_safe(chan, l, &conn->chan_l, list) { + if (chan->ident == id) + func(chan, data); + } +} + +static void __l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func, + void *data) +{ + struct l2cap_chan *chan; + + list_for_each_entry(chan, &conn->chan_l, list) { + func(chan, data); + } +} + +void l2cap_chan_list(struct l2cap_conn *conn, l2cap_chan_func_t func, + void *data) +{ + if (!conn) + return; + + mutex_lock(&conn->chan_lock); + __l2cap_chan_list(conn, func, data); + mutex_unlock(&conn->chan_lock); +} + +EXPORT_SYMBOL_GPL(l2cap_chan_list); + +static void l2cap_conn_update_id_addr(struct work_struct *work) +{ + struct l2cap_conn *conn = container_of(work, struct l2cap_conn, + id_addr_update_work); + struct hci_conn *hcon = conn->hcon; + struct l2cap_chan *chan; + + mutex_lock(&conn->chan_lock); + + list_for_each_entry(chan, &conn->chan_l, list) { + l2cap_chan_lock(chan); + bacpy(&chan->dst, &hcon->dst); + chan->dst_type = bdaddr_dst_type(hcon); + l2cap_chan_unlock(chan); + } + + mutex_unlock(&conn->chan_lock); +} + +static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan) +{ + struct l2cap_conn *conn = chan->conn; + struct l2cap_le_conn_rsp rsp; + u16 result; + + if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) + result = L2CAP_CR_LE_AUTHORIZATION; + else + result = L2CAP_CR_LE_BAD_PSM; + + l2cap_state_change(chan, BT_DISCONN); + + rsp.dcid = cpu_to_le16(chan->scid); + rsp.mtu = cpu_to_le16(chan->imtu); + rsp.mps = cpu_to_le16(chan->mps); + rsp.credits = cpu_to_le16(chan->rx_credits); + rsp.result = cpu_to_le16(result); + + l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), + &rsp); +} + +static void l2cap_chan_ecred_connect_reject(struct l2cap_chan *chan) +{ + l2cap_state_change(chan, BT_DISCONN); + + __l2cap_ecred_conn_rsp_defer(chan); +} + +static void l2cap_chan_connect_reject(struct l2cap_chan *chan) +{ + struct l2cap_conn *conn = chan->conn; + struct l2cap_conn_rsp rsp; + u16 result; + + if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) + result = L2CAP_CR_SEC_BLOCK; + else + result = L2CAP_CR_BAD_PSM; + + l2cap_state_change(chan, BT_DISCONN); + + rsp.scid = cpu_to_le16(chan->dcid); + rsp.dcid = cpu_to_le16(chan->scid); + rsp.result = cpu_to_le16(result); + rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); + + l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp); +} + +void l2cap_chan_close(struct l2cap_chan *chan, int reason) +{ + struct l2cap_conn *conn = chan->conn; + + BT_DBG("chan %p state %s", chan, state_to_string(chan->state)); + + switch (chan->state) { + case BT_LISTEN: + chan->ops->teardown(chan, 0); + break; + + case BT_CONNECTED: + case BT_CONFIG: + if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) { + __set_chan_timer(chan, chan->ops->get_sndtimeo(chan)); + l2cap_send_disconn_req(chan, reason); + } else + l2cap_chan_del(chan, reason); + break; + + case BT_CONNECT2: + if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) { + if (conn->hcon->type == ACL_LINK) + l2cap_chan_connect_reject(chan); + else if (conn->hcon->type == LE_LINK) { + switch (chan->mode) { + case L2CAP_MODE_LE_FLOWCTL: + l2cap_chan_le_connect_reject(chan); + break; + case L2CAP_MODE_EXT_FLOWCTL: + l2cap_chan_ecred_connect_reject(chan); + return; + } + } + } + + l2cap_chan_del(chan, reason); + break; + + case BT_CONNECT: + case BT_DISCONN: + l2cap_chan_del(chan, reason); + break; + + default: + chan->ops->teardown(chan, 0); + break; + } +} +EXPORT_SYMBOL(l2cap_chan_close); + +static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan) +{ + switch (chan->chan_type) { + case L2CAP_CHAN_RAW: + switch (chan->sec_level) { + case BT_SECURITY_HIGH: + case BT_SECURITY_FIPS: + return HCI_AT_DEDICATED_BONDING_MITM; + case BT_SECURITY_MEDIUM: + return HCI_AT_DEDICATED_BONDING; + default: + return HCI_AT_NO_BONDING; + } + break; + case L2CAP_CHAN_CONN_LESS: + if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) { + if (chan->sec_level == BT_SECURITY_LOW) + chan->sec_level = BT_SECURITY_SDP; + } + if (chan->sec_level == BT_SECURITY_HIGH || + chan->sec_level == BT_SECURITY_FIPS) + return HCI_AT_NO_BONDING_MITM; + else + return HCI_AT_NO_BONDING; + break; + case L2CAP_CHAN_CONN_ORIENTED: + if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) { + if (chan->sec_level == BT_SECURITY_LOW) + chan->sec_level = BT_SECURITY_SDP; + + if (chan->sec_level == BT_SECURITY_HIGH || + chan->sec_level == BT_SECURITY_FIPS) + return HCI_AT_NO_BONDING_MITM; + else + return HCI_AT_NO_BONDING; + } + fallthrough; + + default: + switch (chan->sec_level) { + case BT_SECURITY_HIGH: + case BT_SECURITY_FIPS: + return HCI_AT_GENERAL_BONDING_MITM; + case BT_SECURITY_MEDIUM: + return HCI_AT_GENERAL_BONDING; + default: + return HCI_AT_NO_BONDING; + } + break; + } +} + +/* Service level security */ +int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator) +{ + struct l2cap_conn *conn = chan->conn; + __u8 auth_type; + + if (conn->hcon->type == LE_LINK) + return smp_conn_security(conn->hcon, chan->sec_level); + + auth_type = l2cap_get_auth_type(chan); + + return hci_conn_security(conn->hcon, chan->sec_level, auth_type, + initiator); +} + +static u8 l2cap_get_ident(struct l2cap_conn *conn) +{ + u8 id; + + /* Get next available identificator. + * 1 - 128 are used by kernel. + * 129 - 199 are reserved. + * 200 - 254 are used by utilities like l2ping, etc. + */ + + mutex_lock(&conn->ident_lock); + + if (++conn->tx_ident > 128) + conn->tx_ident = 1; + + id = conn->tx_ident; + + mutex_unlock(&conn->ident_lock); + + return id; +} + +static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, + void *data) +{ + struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data); + u8 flags; + + BT_DBG("code 0x%2.2x", code); + + if (!skb) + return; + + /* Use NO_FLUSH if supported or we have an LE link (which does + * not support auto-flushing packets) */ + if (lmp_no_flush_capable(conn->hcon->hdev) || + conn->hcon->type == LE_LINK) + flags = ACL_START_NO_FLUSH; + else + flags = ACL_START; + + bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON; + skb->priority = HCI_PRIO_MAX; + + hci_send_acl(conn->hchan, skb, flags); +} + +static bool __chan_is_moving(struct l2cap_chan *chan) +{ + return chan->move_state != L2CAP_MOVE_STABLE && + chan->move_state != L2CAP_MOVE_WAIT_PREPARE; +} + +static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb) +{ + struct hci_conn *hcon = chan->conn->hcon; + u16 flags; + + BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len, + skb->priority); + + if (chan->hs_hcon && !__chan_is_moving(chan)) { + if (chan->hs_hchan) + hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE); + else + kfree_skb(skb); + + return; + } + + /* Use NO_FLUSH for LE links (where this is the only option) or + * if the BR/EDR link supports it and flushing has not been + * explicitly requested (through FLAG_FLUSHABLE). + */ + if (hcon->type == LE_LINK || + (!test_bit(FLAG_FLUSHABLE, &chan->flags) && + lmp_no_flush_capable(hcon->hdev))) + flags = ACL_START_NO_FLUSH; + else + flags = ACL_START; + + bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags); + hci_send_acl(chan->conn->hchan, skb, flags); +} + +static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control) +{ + control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT; + control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT; + + if (enh & L2CAP_CTRL_FRAME_TYPE) { + /* S-Frame */ + control->sframe = 1; + control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT; + control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT; + + control->sar = 0; + control->txseq = 0; + } else { + /* I-Frame */ + control->sframe = 0; + control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT; + control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT; + + control->poll = 0; + control->super = 0; + } +} + +static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control) +{ + control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT; + control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT; + + if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) { + /* S-Frame */ + control->sframe = 1; + control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT; + control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT; + + control->sar = 0; + control->txseq = 0; + } else { + /* I-Frame */ + control->sframe = 0; + control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT; + control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT; + + control->poll = 0; + control->super = 0; + } +} + +static inline void __unpack_control(struct l2cap_chan *chan, + struct sk_buff *skb) +{ + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) { + __unpack_extended_control(get_unaligned_le32(skb->data), + &bt_cb(skb)->l2cap); + skb_pull(skb, L2CAP_EXT_CTRL_SIZE); + } else { + __unpack_enhanced_control(get_unaligned_le16(skb->data), + &bt_cb(skb)->l2cap); + skb_pull(skb, L2CAP_ENH_CTRL_SIZE); + } +} + +static u32 __pack_extended_control(struct l2cap_ctrl *control) +{ + u32 packed; + + packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT; + packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT; + + if (control->sframe) { + packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT; + packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT; + packed |= L2CAP_EXT_CTRL_FRAME_TYPE; + } else { + packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT; + packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT; + } + + return packed; +} + +static u16 __pack_enhanced_control(struct l2cap_ctrl *control) +{ + u16 packed; + + packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT; + packed |= control->final << L2CAP_CTRL_FINAL_SHIFT; + + if (control->sframe) { + packed |= control->poll << L2CAP_CTRL_POLL_SHIFT; + packed |= control->super << L2CAP_CTRL_SUPER_SHIFT; + packed |= L2CAP_CTRL_FRAME_TYPE; + } else { + packed |= control->sar << L2CAP_CTRL_SAR_SHIFT; + packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT; + } + + return packed; +} + +static inline void __pack_control(struct l2cap_chan *chan, + struct l2cap_ctrl *control, + struct sk_buff *skb) +{ + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) { + put_unaligned_le32(__pack_extended_control(control), + skb->data + L2CAP_HDR_SIZE); + } else { + put_unaligned_le16(__pack_enhanced_control(control), + skb->data + L2CAP_HDR_SIZE); + } +} + +static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan) +{ + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) + return L2CAP_EXT_HDR_SIZE; + else + return L2CAP_ENH_HDR_SIZE; +} + +static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan, + u32 control) +{ + struct sk_buff *skb; + struct l2cap_hdr *lh; + int hlen = __ertm_hdr_size(chan); + + if (chan->fcs == L2CAP_FCS_CRC16) + hlen += L2CAP_FCS_SIZE; + + skb = bt_skb_alloc(hlen, GFP_KERNEL); + + if (!skb) + return ERR_PTR(-ENOMEM); + + lh = skb_put(skb, L2CAP_HDR_SIZE); + lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE); + lh->cid = cpu_to_le16(chan->dcid); + + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) + put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE)); + else + put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE)); + + if (chan->fcs == L2CAP_FCS_CRC16) { + u16 fcs = crc16(0, (u8 *)skb->data, skb->len); + put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE)); + } + + skb->priority = HCI_PRIO_MAX; + return skb; +} + +static void l2cap_send_sframe(struct l2cap_chan *chan, + struct l2cap_ctrl *control) +{ + struct sk_buff *skb; + u32 control_field; + + BT_DBG("chan %p, control %p", chan, control); + + if (!control->sframe) + return; + + if (__chan_is_moving(chan)) + return; + + if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) && + !control->poll) + control->final = 1; + + if (control->super == L2CAP_SUPER_RR) + clear_bit(CONN_RNR_SENT, &chan->conn_state); + else if (control->super == L2CAP_SUPER_RNR) + set_bit(CONN_RNR_SENT, &chan->conn_state); + + if (control->super != L2CAP_SUPER_SREJ) { + chan->last_acked_seq = control->reqseq; + __clear_ack_timer(chan); + } + + BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq, + control->final, control->poll, control->super); + + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) + control_field = __pack_extended_control(control); + else + control_field = __pack_enhanced_control(control); + + skb = l2cap_create_sframe_pdu(chan, control_field); + if (!IS_ERR(skb)) + l2cap_do_send(chan, skb); +} + +static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll) +{ + struct l2cap_ctrl control; + + BT_DBG("chan %p, poll %d", chan, poll); + + memset(&control, 0, sizeof(control)); + control.sframe = 1; + control.poll = poll; + + if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) + control.super = L2CAP_SUPER_RNR; + else + control.super = L2CAP_SUPER_RR; + + control.reqseq = chan->buffer_seq; + l2cap_send_sframe(chan, &control); +} + +static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan) +{ + if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) + return true; + + return !test_bit(CONF_CONNECT_PEND, &chan->conf_state); +} + +static bool __amp_capable(struct l2cap_chan *chan) +{ + struct l2cap_conn *conn = chan->conn; + struct hci_dev *hdev; + bool amp_available = false; + + if (!(conn->local_fixed_chan & L2CAP_FC_A2MP)) + return false; + + if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP)) + return false; + + read_lock(&hci_dev_list_lock); + list_for_each_entry(hdev, &hci_dev_list, list) { + if (hdev->amp_type != AMP_TYPE_BREDR && + test_bit(HCI_UP, &hdev->flags)) { + amp_available = true; + break; + } + } + read_unlock(&hci_dev_list_lock); + + if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED) + return amp_available; + + return false; +} + +static bool l2cap_check_efs(struct l2cap_chan *chan) +{ + /* Check EFS parameters */ + return true; +} + +void l2cap_send_conn_req(struct l2cap_chan *chan) +{ + struct l2cap_conn *conn = chan->conn; + struct l2cap_conn_req req; + + req.scid = cpu_to_le16(chan->scid); + req.psm = chan->psm; + + chan->ident = l2cap_get_ident(conn); + + set_bit(CONF_CONNECT_PEND, &chan->conf_state); + + l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req); +} + +static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id) +{ + struct l2cap_create_chan_req req; + req.scid = cpu_to_le16(chan->scid); + req.psm = chan->psm; + req.amp_id = amp_id; + + chan->ident = l2cap_get_ident(chan->conn); + + l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ, + sizeof(req), &req); +} + +static void l2cap_move_setup(struct l2cap_chan *chan) +{ + struct sk_buff *skb; + + BT_DBG("chan %p", chan); + + if (chan->mode != L2CAP_MODE_ERTM) + return; + + __clear_retrans_timer(chan); + __clear_monitor_timer(chan); + __clear_ack_timer(chan); + + chan->retry_count = 0; + skb_queue_walk(&chan->tx_q, skb) { + if (bt_cb(skb)->l2cap.retries) + bt_cb(skb)->l2cap.retries = 1; + else + break; + } + + chan->expected_tx_seq = chan->buffer_seq; + + clear_bit(CONN_REJ_ACT, &chan->conn_state); + clear_bit(CONN_SREJ_ACT, &chan->conn_state); + l2cap_seq_list_clear(&chan->retrans_list); + l2cap_seq_list_clear(&chan->srej_list); + skb_queue_purge(&chan->srej_q); + + chan->tx_state = L2CAP_TX_STATE_XMIT; + chan->rx_state = L2CAP_RX_STATE_MOVE; + + set_bit(CONN_REMOTE_BUSY, &chan->conn_state); +} + +static void l2cap_move_done(struct l2cap_chan *chan) +{ + u8 move_role = chan->move_role; + BT_DBG("chan %p", chan); + + chan->move_state = L2CAP_MOVE_STABLE; + chan->move_role = L2CAP_MOVE_ROLE_NONE; + + if (chan->mode != L2CAP_MODE_ERTM) + return; + + switch (move_role) { + case L2CAP_MOVE_ROLE_INITIATOR: + l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL); + chan->rx_state = L2CAP_RX_STATE_WAIT_F; + break; + case L2CAP_MOVE_ROLE_RESPONDER: + chan->rx_state = L2CAP_RX_STATE_WAIT_P; + break; + } +} + +static void l2cap_chan_ready(struct l2cap_chan *chan) +{ + /* The channel may have already been flagged as connected in + * case of receiving data before the L2CAP info req/rsp + * procedure is complete. + */ + if (chan->state == BT_CONNECTED) + return; + + /* This clears all conf flags, including CONF_NOT_COMPLETE */ + chan->conf_state = 0; + __clear_chan_timer(chan); + + switch (chan->mode) { + case L2CAP_MODE_LE_FLOWCTL: + case L2CAP_MODE_EXT_FLOWCTL: + if (!chan->tx_credits) + chan->ops->suspend(chan); + break; + } + + chan->state = BT_CONNECTED; + + chan->ops->ready(chan); +} + +static void l2cap_le_connect(struct l2cap_chan *chan) +{ + struct l2cap_conn *conn = chan->conn; + struct l2cap_le_conn_req req; + + if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags)) + return; + + if (!chan->imtu) + chan->imtu = chan->conn->mtu; + + l2cap_le_flowctl_init(chan, 0); + + memset(&req, 0, sizeof(req)); + req.psm = chan->psm; + req.scid = cpu_to_le16(chan->scid); + req.mtu = cpu_to_le16(chan->imtu); + req.mps = cpu_to_le16(chan->mps); + req.credits = cpu_to_le16(chan->rx_credits); + + chan->ident = l2cap_get_ident(conn); + + l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ, + sizeof(req), &req); +} + +struct l2cap_ecred_conn_data { + struct { + struct l2cap_ecred_conn_req req; + __le16 scid[5]; + } __packed pdu; + struct l2cap_chan *chan; + struct pid *pid; + int count; +}; + +static void l2cap_ecred_defer_connect(struct l2cap_chan *chan, void *data) +{ + struct l2cap_ecred_conn_data *conn = data; + struct pid *pid; + + if (chan == conn->chan) + return; + + if (!test_and_clear_bit(FLAG_DEFER_SETUP, &chan->flags)) + return; + + pid = chan->ops->get_peer_pid(chan); + + /* Only add deferred channels with the same PID/PSM */ + if (conn->pid != pid || chan->psm != conn->chan->psm || chan->ident || + chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT) + return; + + if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags)) + return; + + l2cap_ecred_init(chan, 0); + + /* Set the same ident so we can match on the rsp */ + chan->ident = conn->chan->ident; + + /* Include all channels deferred */ + conn->pdu.scid[conn->count] = cpu_to_le16(chan->scid); + + conn->count++; +} + +static void l2cap_ecred_connect(struct l2cap_chan *chan) +{ + struct l2cap_conn *conn = chan->conn; + struct l2cap_ecred_conn_data data; + + if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) + return; + + if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags)) + return; + + l2cap_ecred_init(chan, 0); + + memset(&data, 0, sizeof(data)); + data.pdu.req.psm = chan->psm; + data.pdu.req.mtu = cpu_to_le16(chan->imtu); + data.pdu.req.mps = cpu_to_le16(chan->mps); + data.pdu.req.credits = cpu_to_le16(chan->rx_credits); + data.pdu.scid[0] = cpu_to_le16(chan->scid); + + chan->ident = l2cap_get_ident(conn); + + data.count = 1; + data.chan = chan; + data.pid = chan->ops->get_peer_pid(chan); + + __l2cap_chan_list(conn, l2cap_ecred_defer_connect, &data); + + l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_CONN_REQ, + sizeof(data.pdu.req) + data.count * sizeof(__le16), + &data.pdu); +} + +static void l2cap_le_start(struct l2cap_chan *chan) +{ + struct l2cap_conn *conn = chan->conn; + + if (!smp_conn_security(conn->hcon, chan->sec_level)) + return; + + if (!chan->psm) { + l2cap_chan_ready(chan); + return; + } + + if (chan->state == BT_CONNECT) { + if (chan->mode == L2CAP_MODE_EXT_FLOWCTL) + l2cap_ecred_connect(chan); + else + l2cap_le_connect(chan); + } +} + +static void l2cap_start_connection(struct l2cap_chan *chan) +{ + if (__amp_capable(chan)) { + BT_DBG("chan %p AMP capable: discover AMPs", chan); + a2mp_discover_amp(chan); + } else if (chan->conn->hcon->type == LE_LINK) { + l2cap_le_start(chan); + } else { + l2cap_send_conn_req(chan); + } +} + +static void l2cap_request_info(struct l2cap_conn *conn) +{ + struct l2cap_info_req req; + + if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) + return; + + req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK); + + conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT; + conn->info_ident = l2cap_get_ident(conn); + + schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT); + + l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ, + sizeof(req), &req); +} + +static bool l2cap_check_enc_key_size(struct hci_conn *hcon) +{ + /* The minimum encryption key size needs to be enforced by the + * host stack before establishing any L2CAP connections. The + * specification in theory allows a minimum of 1, but to align + * BR/EDR and LE transports, a minimum of 7 is chosen. + * + * This check might also be called for unencrypted connections + * that have no key size requirements. Ensure that the link is + * actually encrypted before enforcing a key size. + */ + int min_key_size = hcon->hdev->min_enc_key_size; + + /* On FIPS security level, key size must be 16 bytes */ + if (hcon->sec_level == BT_SECURITY_FIPS) + min_key_size = 16; + + return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) || + hcon->enc_key_size >= min_key_size); +} + +static void l2cap_do_start(struct l2cap_chan *chan) +{ + struct l2cap_conn *conn = chan->conn; + + if (conn->hcon->type == LE_LINK) { + l2cap_le_start(chan); + return; + } + + if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) { + l2cap_request_info(conn); + return; + } + + if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)) + return; + + if (!l2cap_chan_check_security(chan, true) || + !__l2cap_no_conn_pending(chan)) + return; + + if (l2cap_check_enc_key_size(conn->hcon)) + l2cap_start_connection(chan); + else + __set_chan_timer(chan, L2CAP_DISC_TIMEOUT); +} + +static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask) +{ + u32 local_feat_mask = l2cap_feat_mask; + if (!disable_ertm) + local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING; + + switch (mode) { + case L2CAP_MODE_ERTM: + return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask; + case L2CAP_MODE_STREAMING: + return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask; + default: + return 0x00; + } +} + +static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err) +{ + struct l2cap_conn *conn = chan->conn; + struct l2cap_disconn_req req; + + if (!conn) + return; + + if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) { + __clear_retrans_timer(chan); + __clear_monitor_timer(chan); + __clear_ack_timer(chan); + } + + if (chan->scid == L2CAP_CID_A2MP) { + l2cap_state_change(chan, BT_DISCONN); + return; + } + + req.dcid = cpu_to_le16(chan->dcid); + req.scid = cpu_to_le16(chan->scid); + l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ, + sizeof(req), &req); + + l2cap_state_change_and_error(chan, BT_DISCONN, err); +} + +/* ---- L2CAP connections ---- */ +static void l2cap_conn_start(struct l2cap_conn *conn) +{ + struct l2cap_chan *chan, *tmp; + + BT_DBG("conn %p", conn); + + mutex_lock(&conn->chan_lock); + + list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) { + l2cap_chan_lock(chan); + + if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) { + l2cap_chan_ready(chan); + l2cap_chan_unlock(chan); + continue; + } + + if (chan->state == BT_CONNECT) { + if (!l2cap_chan_check_security(chan, true) || + !__l2cap_no_conn_pending(chan)) { + l2cap_chan_unlock(chan); + continue; + } + + if (!l2cap_mode_supported(chan->mode, conn->feat_mask) + && test_bit(CONF_STATE2_DEVICE, + &chan->conf_state)) { + l2cap_chan_close(chan, ECONNRESET); + l2cap_chan_unlock(chan); + continue; + } + + if (l2cap_check_enc_key_size(conn->hcon)) + l2cap_start_connection(chan); + else + l2cap_chan_close(chan, ECONNREFUSED); + + } else if (chan->state == BT_CONNECT2) { + struct l2cap_conn_rsp rsp; + char buf[128]; + rsp.scid = cpu_to_le16(chan->dcid); + rsp.dcid = cpu_to_le16(chan->scid); + + if (l2cap_chan_check_security(chan, false)) { + if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) { + rsp.result = cpu_to_le16(L2CAP_CR_PEND); + rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND); + chan->ops->defer(chan); + + } else { + l2cap_state_change(chan, BT_CONFIG); + rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS); + rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); + } + } else { + rsp.result = cpu_to_le16(L2CAP_CR_PEND); + rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND); + } + + l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, + sizeof(rsp), &rsp); + + if (test_bit(CONF_REQ_SENT, &chan->conf_state) || + rsp.result != L2CAP_CR_SUCCESS) { + l2cap_chan_unlock(chan); + continue; + } + + set_bit(CONF_REQ_SENT, &chan->conf_state); + l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, + l2cap_build_conf_req(chan, buf, sizeof(buf)), buf); + chan->num_conf_req++; + } + + l2cap_chan_unlock(chan); + } + + mutex_unlock(&conn->chan_lock); +} + +static void l2cap_le_conn_ready(struct l2cap_conn *conn) +{ + struct hci_conn *hcon = conn->hcon; + struct hci_dev *hdev = hcon->hdev; + + BT_DBG("%s conn %p", hdev->name, conn); + + /* For outgoing pairing which doesn't necessarily have an + * associated socket (e.g. mgmt_pair_device). + */ + if (hcon->out) + smp_conn_security(hcon, hcon->pending_sec_level); + + /* For LE peripheral connections, make sure the connection interval + * is in the range of the minimum and maximum interval that has + * been configured for this connection. If not, then trigger + * the connection update procedure. + */ + if (hcon->role == HCI_ROLE_SLAVE && + (hcon->le_conn_interval < hcon->le_conn_min_interval || + hcon->le_conn_interval > hcon->le_conn_max_interval)) { + struct l2cap_conn_param_update_req req; + + req.min = cpu_to_le16(hcon->le_conn_min_interval); + req.max = cpu_to_le16(hcon->le_conn_max_interval); + req.latency = cpu_to_le16(hcon->le_conn_latency); + req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout); + + l2cap_send_cmd(conn, l2cap_get_ident(conn), + L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req); + } +} + +static void l2cap_conn_ready(struct l2cap_conn *conn) +{ + struct l2cap_chan *chan; + struct hci_conn *hcon = conn->hcon; + + BT_DBG("conn %p", conn); + + if (hcon->type == ACL_LINK) + l2cap_request_info(conn); + + mutex_lock(&conn->chan_lock); + + list_for_each_entry(chan, &conn->chan_l, list) { + + l2cap_chan_lock(chan); + + if (chan->scid == L2CAP_CID_A2MP) { + l2cap_chan_unlock(chan); + continue; + } + + if (hcon->type == LE_LINK) { + l2cap_le_start(chan); + } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) { + if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) + l2cap_chan_ready(chan); + } else if (chan->state == BT_CONNECT) { + l2cap_do_start(chan); + } + + l2cap_chan_unlock(chan); + } + + mutex_unlock(&conn->chan_lock); + + if (hcon->type == LE_LINK) + l2cap_le_conn_ready(conn); + + queue_work(hcon->hdev->workqueue, &conn->pending_rx_work); +} + +/* Notify sockets that we cannot guaranty reliability anymore */ +static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err) +{ + struct l2cap_chan *chan; + + BT_DBG("conn %p", conn); + + mutex_lock(&conn->chan_lock); + + list_for_each_entry(chan, &conn->chan_l, list) { + if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags)) + l2cap_chan_set_err(chan, err); + } + + mutex_unlock(&conn->chan_lock); +} + +static void l2cap_info_timeout(struct work_struct *work) +{ + struct l2cap_conn *conn = container_of(work, struct l2cap_conn, + info_timer.work); + + conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; + conn->info_ident = 0; + + l2cap_conn_start(conn); +} + +/* + * l2cap_user + * External modules can register l2cap_user objects on l2cap_conn. The ->probe + * callback is called during registration. The ->remove callback is called + * during unregistration. + * An l2cap_user object can either be explicitly unregistered or when the + * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon, + * l2cap->hchan, .. are valid as long as the remove callback hasn't been called. + * External modules must own a reference to the l2cap_conn object if they intend + * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at + * any time if they don't. + */ + +int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user) +{ + struct hci_dev *hdev = conn->hcon->hdev; + int ret; + + /* We need to check whether l2cap_conn is registered. If it is not, we + * must not register the l2cap_user. l2cap_conn_del() is unregisters + * l2cap_conn objects, but doesn't provide its own locking. Instead, it + * relies on the parent hci_conn object to be locked. This itself relies + * on the hci_dev object to be locked. So we must lock the hci device + * here, too. */ + + hci_dev_lock(hdev); + + if (!list_empty(&user->list)) { + ret = -EINVAL; + goto out_unlock; + } + + /* conn->hchan is NULL after l2cap_conn_del() was called */ + if (!conn->hchan) { + ret = -ENODEV; + goto out_unlock; + } + + ret = user->probe(conn, user); + if (ret) + goto out_unlock; + + list_add(&user->list, &conn->users); + ret = 0; + +out_unlock: + hci_dev_unlock(hdev); + return ret; +} +EXPORT_SYMBOL(l2cap_register_user); + +void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user) +{ + struct hci_dev *hdev = conn->hcon->hdev; + + hci_dev_lock(hdev); + + if (list_empty(&user->list)) + goto out_unlock; + + list_del_init(&user->list); + user->remove(conn, user); + +out_unlock: + hci_dev_unlock(hdev); +} +EXPORT_SYMBOL(l2cap_unregister_user); + +static void l2cap_unregister_all_users(struct l2cap_conn *conn) +{ + struct l2cap_user *user; + + while (!list_empty(&conn->users)) { + user = list_first_entry(&conn->users, struct l2cap_user, list); + list_del_init(&user->list); + user->remove(conn, user); + } +} + +static void l2cap_conn_del(struct hci_conn *hcon, int err) +{ + struct l2cap_conn *conn = hcon->l2cap_data; + struct l2cap_chan *chan, *l; + + if (!conn) + return; + + BT_DBG("hcon %p conn %p, err %d", hcon, conn, err); + + kfree_skb(conn->rx_skb); + + skb_queue_purge(&conn->pending_rx); + + /* We can not call flush_work(&conn->pending_rx_work) here since we + * might block if we are running on a worker from the same workqueue + * pending_rx_work is waiting on. + */ + if (work_pending(&conn->pending_rx_work)) + cancel_work_sync(&conn->pending_rx_work); + + if (work_pending(&conn->id_addr_update_work)) + cancel_work_sync(&conn->id_addr_update_work); + + l2cap_unregister_all_users(conn); + + /* Force the connection to be immediately dropped */ + hcon->disc_timeout = 0; + + mutex_lock(&conn->chan_lock); + + /* Kill channels */ + list_for_each_entry_safe(chan, l, &conn->chan_l, list) { + l2cap_chan_hold(chan); + l2cap_chan_lock(chan); + + l2cap_chan_del(chan, err); + + chan->ops->close(chan); + + l2cap_chan_unlock(chan); + l2cap_chan_put(chan); + } + + mutex_unlock(&conn->chan_lock); + + hci_chan_del(conn->hchan); + + if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) + cancel_delayed_work_sync(&conn->info_timer); + + hcon->l2cap_data = NULL; + conn->hchan = NULL; + l2cap_conn_put(conn); +} + +static void l2cap_conn_free(struct kref *ref) +{ + struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref); + + hci_conn_put(conn->hcon); + kfree(conn); +} + +struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn) +{ + kref_get(&conn->ref); + return conn; +} +EXPORT_SYMBOL(l2cap_conn_get); + +void l2cap_conn_put(struct l2cap_conn *conn) +{ + kref_put(&conn->ref, l2cap_conn_free); +} +EXPORT_SYMBOL(l2cap_conn_put); + +/* ---- Socket interface ---- */ + +/* Find socket with psm and source / destination bdaddr. + * Returns closest match. + */ +static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, + bdaddr_t *src, + bdaddr_t *dst, + u8 link_type) +{ + struct l2cap_chan *c, *tmp, *c1 = NULL; + + read_lock(&chan_list_lock); + + list_for_each_entry_safe(c, tmp, &chan_list, global_l) { + if (state && c->state != state) + continue; + + if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR) + continue; + + if (link_type == LE_LINK && c->src_type == BDADDR_BREDR) + continue; + + if (c->chan_type != L2CAP_CHAN_FIXED && c->psm == psm) { + int src_match, dst_match; + int src_any, dst_any; + + /* Exact match. */ + src_match = !bacmp(&c->src, src); + dst_match = !bacmp(&c->dst, dst); + if (src_match && dst_match) { + if (!l2cap_chan_hold_unless_zero(c)) + continue; + + read_unlock(&chan_list_lock); + return c; + } + + /* Closest match */ + src_any = !bacmp(&c->src, BDADDR_ANY); + dst_any = !bacmp(&c->dst, BDADDR_ANY); + if ((src_match && dst_any) || (src_any && dst_match) || + (src_any && dst_any)) + c1 = c; + } + } + + if (c1) + c1 = l2cap_chan_hold_unless_zero(c1); + + read_unlock(&chan_list_lock); + + return c1; +} + +static void l2cap_monitor_timeout(struct work_struct *work) +{ + struct l2cap_chan *chan = container_of(work, struct l2cap_chan, + monitor_timer.work); + + BT_DBG("chan %p", chan); + + l2cap_chan_lock(chan); + + if (!chan->conn) { + l2cap_chan_unlock(chan); + l2cap_chan_put(chan); + return; + } + + l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO); + + l2cap_chan_unlock(chan); + l2cap_chan_put(chan); +} + +static void l2cap_retrans_timeout(struct work_struct *work) +{ + struct l2cap_chan *chan = container_of(work, struct l2cap_chan, + retrans_timer.work); + + BT_DBG("chan %p", chan); + + l2cap_chan_lock(chan); + + if (!chan->conn) { + l2cap_chan_unlock(chan); + l2cap_chan_put(chan); + return; + } + + l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO); + l2cap_chan_unlock(chan); + l2cap_chan_put(chan); +} + +static void l2cap_streaming_send(struct l2cap_chan *chan, + struct sk_buff_head *skbs) +{ + struct sk_buff *skb; + struct l2cap_ctrl *control; + + BT_DBG("chan %p, skbs %p", chan, skbs); + + if (__chan_is_moving(chan)) + return; + + skb_queue_splice_tail_init(skbs, &chan->tx_q); + + while (!skb_queue_empty(&chan->tx_q)) { + + skb = skb_dequeue(&chan->tx_q); + + bt_cb(skb)->l2cap.retries = 1; + control = &bt_cb(skb)->l2cap; + + control->reqseq = 0; + control->txseq = chan->next_tx_seq; + + __pack_control(chan, control, skb); + + if (chan->fcs == L2CAP_FCS_CRC16) { + u16 fcs = crc16(0, (u8 *) skb->data, skb->len); + put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE)); + } + + l2cap_do_send(chan, skb); + + BT_DBG("Sent txseq %u", control->txseq); + + chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq); + chan->frames_sent++; + } +} + +static int l2cap_ertm_send(struct l2cap_chan *chan) +{ + struct sk_buff *skb, *tx_skb; + struct l2cap_ctrl *control; + int sent = 0; + + BT_DBG("chan %p", chan); + + if (chan->state != BT_CONNECTED) + return -ENOTCONN; + + if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) + return 0; + + if (__chan_is_moving(chan)) + return 0; + + while (chan->tx_send_head && + chan->unacked_frames < chan->remote_tx_win && + chan->tx_state == L2CAP_TX_STATE_XMIT) { + + skb = chan->tx_send_head; + + bt_cb(skb)->l2cap.retries = 1; + control = &bt_cb(skb)->l2cap; + + if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state)) + control->final = 1; + + control->reqseq = chan->buffer_seq; + chan->last_acked_seq = chan->buffer_seq; + control->txseq = chan->next_tx_seq; + + __pack_control(chan, control, skb); + + if (chan->fcs == L2CAP_FCS_CRC16) { + u16 fcs = crc16(0, (u8 *) skb->data, skb->len); + put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE)); + } + + /* Clone after data has been modified. Data is assumed to be + read-only (for locking purposes) on cloned sk_buffs. + */ + tx_skb = skb_clone(skb, GFP_KERNEL); + + if (!tx_skb) + break; + + __set_retrans_timer(chan); + + chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq); + chan->unacked_frames++; + chan->frames_sent++; + sent++; + + if (skb_queue_is_last(&chan->tx_q, skb)) + chan->tx_send_head = NULL; + else + chan->tx_send_head = skb_queue_next(&chan->tx_q, skb); + + l2cap_do_send(chan, tx_skb); + BT_DBG("Sent txseq %u", control->txseq); + } + + BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent, + chan->unacked_frames, skb_queue_len(&chan->tx_q)); + + return sent; +} + +static void l2cap_ertm_resend(struct l2cap_chan *chan) +{ + struct l2cap_ctrl control; + struct sk_buff *skb; + struct sk_buff *tx_skb; + u16 seq; + + BT_DBG("chan %p", chan); + + if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) + return; + + if (__chan_is_moving(chan)) + return; + + while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) { + seq = l2cap_seq_list_pop(&chan->retrans_list); + + skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq); + if (!skb) { + BT_DBG("Error: Can't retransmit seq %d, frame missing", + seq); + continue; + } + + bt_cb(skb)->l2cap.retries++; + control = bt_cb(skb)->l2cap; + + if (chan->max_tx != 0 && + bt_cb(skb)->l2cap.retries > chan->max_tx) { + BT_DBG("Retry limit exceeded (%d)", chan->max_tx); + l2cap_send_disconn_req(chan, ECONNRESET); + l2cap_seq_list_clear(&chan->retrans_list); + break; + } + + control.reqseq = chan->buffer_seq; + if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state)) + control.final = 1; + else + control.final = 0; + + if (skb_cloned(skb)) { + /* Cloned sk_buffs are read-only, so we need a + * writeable copy + */ + tx_skb = skb_copy(skb, GFP_KERNEL); + } else { + tx_skb = skb_clone(skb, GFP_KERNEL); + } + + if (!tx_skb) { + l2cap_seq_list_clear(&chan->retrans_list); + break; + } + + /* Update skb contents */ + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) { + put_unaligned_le32(__pack_extended_control(&control), + tx_skb->data + L2CAP_HDR_SIZE); + } else { + put_unaligned_le16(__pack_enhanced_control(&control), + tx_skb->data + L2CAP_HDR_SIZE); + } + + /* Update FCS */ + if (chan->fcs == L2CAP_FCS_CRC16) { + u16 fcs = crc16(0, (u8 *) tx_skb->data, + tx_skb->len - L2CAP_FCS_SIZE); + put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) - + L2CAP_FCS_SIZE); + } + + l2cap_do_send(chan, tx_skb); + + BT_DBG("Resent txseq %d", control.txseq); + + chan->last_acked_seq = chan->buffer_seq; + } +} + +static void l2cap_retransmit(struct l2cap_chan *chan, + struct l2cap_ctrl *control) +{ + BT_DBG("chan %p, control %p", chan, control); + + l2cap_seq_list_append(&chan->retrans_list, control->reqseq); + l2cap_ertm_resend(chan); +} + +static void l2cap_retransmit_all(struct l2cap_chan *chan, + struct l2cap_ctrl *control) +{ + struct sk_buff *skb; + + BT_DBG("chan %p, control %p", chan, control); + + if (control->poll) + set_bit(CONN_SEND_FBIT, &chan->conn_state); + + l2cap_seq_list_clear(&chan->retrans_list); + + if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) + return; + + if (chan->unacked_frames) { + skb_queue_walk(&chan->tx_q, skb) { + if (bt_cb(skb)->l2cap.txseq == control->reqseq || + skb == chan->tx_send_head) + break; + } + + skb_queue_walk_from(&chan->tx_q, skb) { + if (skb == chan->tx_send_head) + break; + + l2cap_seq_list_append(&chan->retrans_list, + bt_cb(skb)->l2cap.txseq); + } + + l2cap_ertm_resend(chan); + } +} + +static void l2cap_send_ack(struct l2cap_chan *chan) +{ + struct l2cap_ctrl control; + u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq, + chan->last_acked_seq); + int threshold; + + BT_DBG("chan %p last_acked_seq %d buffer_seq %d", + chan, chan->last_acked_seq, chan->buffer_seq); + + memset(&control, 0, sizeof(control)); + control.sframe = 1; + + if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) && + chan->rx_state == L2CAP_RX_STATE_RECV) { + __clear_ack_timer(chan); + control.super = L2CAP_SUPER_RNR; + control.reqseq = chan->buffer_seq; + l2cap_send_sframe(chan, &control); + } else { + if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) { + l2cap_ertm_send(chan); + /* If any i-frames were sent, they included an ack */ + if (chan->buffer_seq == chan->last_acked_seq) + frames_to_ack = 0; + } + + /* Ack now if the window is 3/4ths full. + * Calculate without mul or div + */ + threshold = chan->ack_win; + threshold += threshold << 1; + threshold >>= 2; + + BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack, + threshold); + + if (frames_to_ack >= threshold) { + __clear_ack_timer(chan); + control.super = L2CAP_SUPER_RR; + control.reqseq = chan->buffer_seq; + l2cap_send_sframe(chan, &control); + frames_to_ack = 0; + } + + if (frames_to_ack) + __set_ack_timer(chan); + } +} + +static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan, + struct msghdr *msg, int len, + int count, struct sk_buff *skb) +{ + struct l2cap_conn *conn = chan->conn; + struct sk_buff **frag; + int sent = 0; + + if (!copy_from_iter_full(skb_put(skb, count), count, &msg->msg_iter)) + return -EFAULT; + + sent += count; + len -= count; + + /* Continuation fragments (no L2CAP header) */ + frag = &skb_shinfo(skb)->frag_list; + while (len) { + struct sk_buff *tmp; + + count = min_t(unsigned int, conn->mtu, len); + + tmp = chan->ops->alloc_skb(chan, 0, count, + msg->msg_flags & MSG_DONTWAIT); + if (IS_ERR(tmp)) + return PTR_ERR(tmp); + + *frag = tmp; + + if (!copy_from_iter_full(skb_put(*frag, count), count, + &msg->msg_iter)) + return -EFAULT; + + sent += count; + len -= count; + + skb->len += (*frag)->len; + skb->data_len += (*frag)->len; + + frag = &(*frag)->next; + } + + return sent; +} + +static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, + struct msghdr *msg, size_t len) +{ + struct l2cap_conn *conn = chan->conn; + struct sk_buff *skb; + int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE; + struct l2cap_hdr *lh; + + BT_DBG("chan %p psm 0x%2.2x len %zu", chan, + __le16_to_cpu(chan->psm), len); + + count = min_t(unsigned int, (conn->mtu - hlen), len); + + skb = chan->ops->alloc_skb(chan, hlen, count, + msg->msg_flags & MSG_DONTWAIT); + if (IS_ERR(skb)) + return skb; + + /* Create L2CAP header */ + lh = skb_put(skb, L2CAP_HDR_SIZE); + lh->cid = cpu_to_le16(chan->dcid); + lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE); + put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE)); + + err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb); + if (unlikely(err < 0)) { + kfree_skb(skb); + return ERR_PTR(err); + } + return skb; +} + +static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, + struct msghdr *msg, size_t len) +{ + struct l2cap_conn *conn = chan->conn; + struct sk_buff *skb; + int err, count; + struct l2cap_hdr *lh; + + BT_DBG("chan %p len %zu", chan, len); + + count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len); + + skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count, + msg->msg_flags & MSG_DONTWAIT); + if (IS_ERR(skb)) + return skb; + + /* Create L2CAP header */ + lh = skb_put(skb, L2CAP_HDR_SIZE); + lh->cid = cpu_to_le16(chan->dcid); + lh->len = cpu_to_le16(len); + + err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb); + if (unlikely(err < 0)) { + kfree_skb(skb); + return ERR_PTR(err); + } + return skb; +} + +static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan, + struct msghdr *msg, size_t len, + u16 sdulen) +{ + struct l2cap_conn *conn = chan->conn; + struct sk_buff *skb; + int err, count, hlen; + struct l2cap_hdr *lh; + + BT_DBG("chan %p len %zu", chan, len); + + if (!conn) + return ERR_PTR(-ENOTCONN); + + hlen = __ertm_hdr_size(chan); + + if (sdulen) + hlen += L2CAP_SDULEN_SIZE; + + if (chan->fcs == L2CAP_FCS_CRC16) + hlen += L2CAP_FCS_SIZE; + + count = min_t(unsigned int, (conn->mtu - hlen), len); + + skb = chan->ops->alloc_skb(chan, hlen, count, + msg->msg_flags & MSG_DONTWAIT); + if (IS_ERR(skb)) + return skb; + + /* Create L2CAP header */ + lh = skb_put(skb, L2CAP_HDR_SIZE); + lh->cid = cpu_to_le16(chan->dcid); + lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE)); + + /* Control header is populated later */ + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) + put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE)); + else + put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE)); + + if (sdulen) + put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE)); + + err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb); + if (unlikely(err < 0)) { + kfree_skb(skb); + return ERR_PTR(err); + } + + bt_cb(skb)->l2cap.fcs = chan->fcs; + bt_cb(skb)->l2cap.retries = 0; + return skb; +} + +static int l2cap_segment_sdu(struct l2cap_chan *chan, + struct sk_buff_head *seg_queue, + struct msghdr *msg, size_t len) +{ + struct sk_buff *skb; + u16 sdu_len; + size_t pdu_len; + u8 sar; + + BT_DBG("chan %p, msg %p, len %zu", chan, msg, len); + + /* It is critical that ERTM PDUs fit in a single HCI fragment, + * so fragmented skbs are not used. The HCI layer's handling + * of fragmented skbs is not compatible with ERTM's queueing. + */ + + /* PDU size is derived from the HCI MTU */ + pdu_len = chan->conn->mtu; + + /* Constrain PDU size for BR/EDR connections */ + if (!chan->hs_hcon) + pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD); + + /* Adjust for largest possible L2CAP overhead. */ + if (chan->fcs) + pdu_len -= L2CAP_FCS_SIZE; + + pdu_len -= __ertm_hdr_size(chan); + + /* Remote device may have requested smaller PDUs */ + pdu_len = min_t(size_t, pdu_len, chan->remote_mps); + + if (len <= pdu_len) { + sar = L2CAP_SAR_UNSEGMENTED; + sdu_len = 0; + pdu_len = len; + } else { + sar = L2CAP_SAR_START; + sdu_len = len; + } + + while (len > 0) { + skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len); + + if (IS_ERR(skb)) { + __skb_queue_purge(seg_queue); + return PTR_ERR(skb); + } + + bt_cb(skb)->l2cap.sar = sar; + __skb_queue_tail(seg_queue, skb); + + len -= pdu_len; + if (sdu_len) + sdu_len = 0; + + if (len <= pdu_len) { + sar = L2CAP_SAR_END; + pdu_len = len; + } else { + sar = L2CAP_SAR_CONTINUE; + } + } + + return 0; +} + +static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan, + struct msghdr *msg, + size_t len, u16 sdulen) +{ + struct l2cap_conn *conn = chan->conn; + struct sk_buff *skb; + int err, count, hlen; + struct l2cap_hdr *lh; + + BT_DBG("chan %p len %zu", chan, len); + + if (!conn) + return ERR_PTR(-ENOTCONN); + + hlen = L2CAP_HDR_SIZE; + + if (sdulen) + hlen += L2CAP_SDULEN_SIZE; + + count = min_t(unsigned int, (conn->mtu - hlen), len); + + skb = chan->ops->alloc_skb(chan, hlen, count, + msg->msg_flags & MSG_DONTWAIT); + if (IS_ERR(skb)) + return skb; + + /* Create L2CAP header */ + lh = skb_put(skb, L2CAP_HDR_SIZE); + lh->cid = cpu_to_le16(chan->dcid); + lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE)); + + if (sdulen) + put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE)); + + err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb); + if (unlikely(err < 0)) { + kfree_skb(skb); + return ERR_PTR(err); + } + + return skb; +} + +static int l2cap_segment_le_sdu(struct l2cap_chan *chan, + struct sk_buff_head *seg_queue, + struct msghdr *msg, size_t len) +{ + struct sk_buff *skb; + size_t pdu_len; + u16 sdu_len; + + BT_DBG("chan %p, msg %p, len %zu", chan, msg, len); + + sdu_len = len; + pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE; + + while (len > 0) { + if (len <= pdu_len) + pdu_len = len; + + skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len); + if (IS_ERR(skb)) { + __skb_queue_purge(seg_queue); + return PTR_ERR(skb); + } + + __skb_queue_tail(seg_queue, skb); + + len -= pdu_len; + + if (sdu_len) { + sdu_len = 0; + pdu_len += L2CAP_SDULEN_SIZE; + } + } + + return 0; +} + +static void l2cap_le_flowctl_send(struct l2cap_chan *chan) +{ + int sent = 0; + + BT_DBG("chan %p", chan); + + while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) { + l2cap_do_send(chan, skb_dequeue(&chan->tx_q)); + chan->tx_credits--; + sent++; + } + + BT_DBG("Sent %d credits %u queued %u", sent, chan->tx_credits, + skb_queue_len(&chan->tx_q)); +} + +int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len) +{ + struct sk_buff *skb; + int err; + struct sk_buff_head seg_queue; + + if (!chan->conn) + return -ENOTCONN; + + /* Connectionless channel */ + if (chan->chan_type == L2CAP_CHAN_CONN_LESS) { + skb = l2cap_create_connless_pdu(chan, msg, len); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + l2cap_do_send(chan, skb); + return len; + } + + switch (chan->mode) { + case L2CAP_MODE_LE_FLOWCTL: + case L2CAP_MODE_EXT_FLOWCTL: + /* Check outgoing MTU */ + if (len > chan->omtu) + return -EMSGSIZE; + + __skb_queue_head_init(&seg_queue); + + err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len); + + if (chan->state != BT_CONNECTED) { + __skb_queue_purge(&seg_queue); + err = -ENOTCONN; + } + + if (err) + return err; + + skb_queue_splice_tail_init(&seg_queue, &chan->tx_q); + + l2cap_le_flowctl_send(chan); + + if (!chan->tx_credits) + chan->ops->suspend(chan); + + err = len; + + break; + + case L2CAP_MODE_BASIC: + /* Check outgoing MTU */ + if (len > chan->omtu) + return -EMSGSIZE; + + /* Create a basic PDU */ + skb = l2cap_create_basic_pdu(chan, msg, len); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + l2cap_do_send(chan, skb); + err = len; + break; + + case L2CAP_MODE_ERTM: + case L2CAP_MODE_STREAMING: + /* Check outgoing MTU */ + if (len > chan->omtu) { + err = -EMSGSIZE; + break; + } + + __skb_queue_head_init(&seg_queue); + + /* Do segmentation before calling in to the state machine, + * since it's possible to block while waiting for memory + * allocation. + */ + err = l2cap_segment_sdu(chan, &seg_queue, msg, len); + + if (err) + break; + + if (chan->mode == L2CAP_MODE_ERTM) + l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST); + else + l2cap_streaming_send(chan, &seg_queue); + + err = len; + + /* If the skbs were not queued for sending, they'll still be in + * seg_queue and need to be purged. + */ + __skb_queue_purge(&seg_queue); + break; + + default: + BT_DBG("bad state %1.1x", chan->mode); + err = -EBADFD; + } + + return err; +} +EXPORT_SYMBOL_GPL(l2cap_chan_send); + +static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq) +{ + struct l2cap_ctrl control; + u16 seq; + + BT_DBG("chan %p, txseq %u", chan, txseq); + + memset(&control, 0, sizeof(control)); + control.sframe = 1; + control.super = L2CAP_SUPER_SREJ; + + for (seq = chan->expected_tx_seq; seq != txseq; + seq = __next_seq(chan, seq)) { + if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) { + control.reqseq = seq; + l2cap_send_sframe(chan, &control); + l2cap_seq_list_append(&chan->srej_list, seq); + } + } + + chan->expected_tx_seq = __next_seq(chan, txseq); +} + +static void l2cap_send_srej_tail(struct l2cap_chan *chan) +{ + struct l2cap_ctrl control; + + BT_DBG("chan %p", chan); + + if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR) + return; + + memset(&control, 0, sizeof(control)); + control.sframe = 1; + control.super = L2CAP_SUPER_SREJ; + control.reqseq = chan->srej_list.tail; + l2cap_send_sframe(chan, &control); +} + +static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq) +{ + struct l2cap_ctrl control; + u16 initial_head; + u16 seq; + + BT_DBG("chan %p, txseq %u", chan, txseq); + + memset(&control, 0, sizeof(control)); + control.sframe = 1; + control.super = L2CAP_SUPER_SREJ; + + /* Capture initial list head to allow only one pass through the list. */ + initial_head = chan->srej_list.head; + + do { + seq = l2cap_seq_list_pop(&chan->srej_list); + if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR) + break; + + control.reqseq = seq; + l2cap_send_sframe(chan, &control); + l2cap_seq_list_append(&chan->srej_list, seq); + } while (chan->srej_list.head != initial_head); +} + +static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq) +{ + struct sk_buff *acked_skb; + u16 ackseq; + + BT_DBG("chan %p, reqseq %u", chan, reqseq); + + if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq) + return; + + BT_DBG("expected_ack_seq %u, unacked_frames %u", + chan->expected_ack_seq, chan->unacked_frames); + + for (ackseq = chan->expected_ack_seq; ackseq != reqseq; + ackseq = __next_seq(chan, ackseq)) { + + acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq); + if (acked_skb) { + skb_unlink(acked_skb, &chan->tx_q); + kfree_skb(acked_skb); + chan->unacked_frames--; + } + } + + chan->expected_ack_seq = reqseq; + + if (chan->unacked_frames == 0) + __clear_retrans_timer(chan); + + BT_DBG("unacked_frames %u", chan->unacked_frames); +} + +static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan) +{ + BT_DBG("chan %p", chan); + + chan->expected_tx_seq = chan->buffer_seq; + l2cap_seq_list_clear(&chan->srej_list); + skb_queue_purge(&chan->srej_q); + chan->rx_state = L2CAP_RX_STATE_RECV; +} + +static void l2cap_tx_state_xmit(struct l2cap_chan *chan, + struct l2cap_ctrl *control, + struct sk_buff_head *skbs, u8 event) +{ + BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs, + event); + + switch (event) { + case L2CAP_EV_DATA_REQUEST: + if (chan->tx_send_head == NULL) + chan->tx_send_head = skb_peek(skbs); + + skb_queue_splice_tail_init(skbs, &chan->tx_q); + l2cap_ertm_send(chan); + break; + case L2CAP_EV_LOCAL_BUSY_DETECTED: + BT_DBG("Enter LOCAL_BUSY"); + set_bit(CONN_LOCAL_BUSY, &chan->conn_state); + + if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) { + /* The SREJ_SENT state must be aborted if we are to + * enter the LOCAL_BUSY state. + */ + l2cap_abort_rx_srej_sent(chan); + } + + l2cap_send_ack(chan); + + break; + case L2CAP_EV_LOCAL_BUSY_CLEAR: + BT_DBG("Exit LOCAL_BUSY"); + clear_bit(CONN_LOCAL_BUSY, &chan->conn_state); + + if (test_bit(CONN_RNR_SENT, &chan->conn_state)) { + struct l2cap_ctrl local_control; + + memset(&local_control, 0, sizeof(local_control)); + local_control.sframe = 1; + local_control.super = L2CAP_SUPER_RR; + local_control.poll = 1; + local_control.reqseq = chan->buffer_seq; + l2cap_send_sframe(chan, &local_control); + + chan->retry_count = 1; + __set_monitor_timer(chan); + chan->tx_state = L2CAP_TX_STATE_WAIT_F; + } + break; + case L2CAP_EV_RECV_REQSEQ_AND_FBIT: + l2cap_process_reqseq(chan, control->reqseq); + break; + case L2CAP_EV_EXPLICIT_POLL: + l2cap_send_rr_or_rnr(chan, 1); + chan->retry_count = 1; + __set_monitor_timer(chan); + __clear_ack_timer(chan); + chan->tx_state = L2CAP_TX_STATE_WAIT_F; + break; + case L2CAP_EV_RETRANS_TO: + l2cap_send_rr_or_rnr(chan, 1); + chan->retry_count = 1; + __set_monitor_timer(chan); + chan->tx_state = L2CAP_TX_STATE_WAIT_F; + break; + case L2CAP_EV_RECV_FBIT: + /* Nothing to process */ + break; + default: + break; + } +} + +static void l2cap_tx_state_wait_f(struct l2cap_chan *chan, + struct l2cap_ctrl *control, + struct sk_buff_head *skbs, u8 event) +{ + BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs, + event); + + switch (event) { + case L2CAP_EV_DATA_REQUEST: + if (chan->tx_send_head == NULL) + chan->tx_send_head = skb_peek(skbs); + /* Queue data, but don't send. */ + skb_queue_splice_tail_init(skbs, &chan->tx_q); + break; + case L2CAP_EV_LOCAL_BUSY_DETECTED: + BT_DBG("Enter LOCAL_BUSY"); + set_bit(CONN_LOCAL_BUSY, &chan->conn_state); + + if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) { + /* The SREJ_SENT state must be aborted if we are to + * enter the LOCAL_BUSY state. + */ + l2cap_abort_rx_srej_sent(chan); + } + + l2cap_send_ack(chan); + + break; + case L2CAP_EV_LOCAL_BUSY_CLEAR: + BT_DBG("Exit LOCAL_BUSY"); + clear_bit(CONN_LOCAL_BUSY, &chan->conn_state); + + if (test_bit(CONN_RNR_SENT, &chan->conn_state)) { + struct l2cap_ctrl local_control; + memset(&local_control, 0, sizeof(local_control)); + local_control.sframe = 1; + local_control.super = L2CAP_SUPER_RR; + local_control.poll = 1; + local_control.reqseq = chan->buffer_seq; + l2cap_send_sframe(chan, &local_control); + + chan->retry_count = 1; + __set_monitor_timer(chan); + chan->tx_state = L2CAP_TX_STATE_WAIT_F; + } + break; + case L2CAP_EV_RECV_REQSEQ_AND_FBIT: + l2cap_process_reqseq(chan, control->reqseq); + fallthrough; + + case L2CAP_EV_RECV_FBIT: + if (control && control->final) { + __clear_monitor_timer(chan); + if (chan->unacked_frames > 0) + __set_retrans_timer(chan); + chan->retry_count = 0; + chan->tx_state = L2CAP_TX_STATE_XMIT; + BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state); + } + break; + case L2CAP_EV_EXPLICIT_POLL: + /* Ignore */ + break; + case L2CAP_EV_MONITOR_TO: + if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) { + l2cap_send_rr_or_rnr(chan, 1); + __set_monitor_timer(chan); + chan->retry_count++; + } else { + l2cap_send_disconn_req(chan, ECONNABORTED); + } + break; + default: + break; + } +} + +static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control, + struct sk_buff_head *skbs, u8 event) +{ + BT_DBG("chan %p, control %p, skbs %p, event %d, state %d", + chan, control, skbs, event, chan->tx_state); + + switch (chan->tx_state) { + case L2CAP_TX_STATE_XMIT: + l2cap_tx_state_xmit(chan, control, skbs, event); + break; + case L2CAP_TX_STATE_WAIT_F: + l2cap_tx_state_wait_f(chan, control, skbs, event); + break; + default: + /* Ignore event */ + break; + } +} + +static void l2cap_pass_to_tx(struct l2cap_chan *chan, + struct l2cap_ctrl *control) +{ + BT_DBG("chan %p, control %p", chan, control); + l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT); +} + +static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan, + struct l2cap_ctrl *control) +{ + BT_DBG("chan %p, control %p", chan, control); + l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT); +} + +/* Copy frame to all raw sockets on that connection */ +static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb) +{ + struct sk_buff *nskb; + struct l2cap_chan *chan; + + BT_DBG("conn %p", conn); + + mutex_lock(&conn->chan_lock); + + list_for_each_entry(chan, &conn->chan_l, list) { + if (chan->chan_type != L2CAP_CHAN_RAW) + continue; + + /* Don't send frame to the channel it came from */ + if (bt_cb(skb)->l2cap.chan == chan) + continue; + + nskb = skb_clone(skb, GFP_KERNEL); + if (!nskb) + continue; + if (chan->ops->recv(chan, nskb)) + kfree_skb(nskb); + } + + mutex_unlock(&conn->chan_lock); +} + +/* ---- L2CAP signalling commands ---- */ +static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code, + u8 ident, u16 dlen, void *data) +{ + struct sk_buff *skb, **frag; + struct l2cap_cmd_hdr *cmd; + struct l2cap_hdr *lh; + int len, count; + + BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u", + conn, code, ident, dlen); + + if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE) + return NULL; + + len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen; + count = min_t(unsigned int, conn->mtu, len); + + skb = bt_skb_alloc(count, GFP_KERNEL); + if (!skb) + return NULL; + + lh = skb_put(skb, L2CAP_HDR_SIZE); + lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen); + + if (conn->hcon->type == LE_LINK) + lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING); + else + lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING); + + cmd = skb_put(skb, L2CAP_CMD_HDR_SIZE); + cmd->code = code; + cmd->ident = ident; + cmd->len = cpu_to_le16(dlen); + + if (dlen) { + count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE; + skb_put_data(skb, data, count); + data += count; + } + + len -= skb->len; + + /* Continuation fragments (no L2CAP header) */ + frag = &skb_shinfo(skb)->frag_list; + while (len) { + count = min_t(unsigned int, conn->mtu, len); + + *frag = bt_skb_alloc(count, GFP_KERNEL); + if (!*frag) + goto fail; + + skb_put_data(*frag, data, count); + + len -= count; + data += count; + + frag = &(*frag)->next; + } + + return skb; + +fail: + kfree_skb(skb); + return NULL; +} + +static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, + unsigned long *val) +{ + struct l2cap_conf_opt *opt = *ptr; + int len; + + len = L2CAP_CONF_OPT_SIZE + opt->len; + *ptr += len; + + *type = opt->type; + *olen = opt->len; + + switch (opt->len) { + case 1: + *val = *((u8 *) opt->val); + break; + + case 2: + *val = get_unaligned_le16(opt->val); + break; + + case 4: + *val = get_unaligned_le32(opt->val); + break; + + default: + *val = (unsigned long) opt->val; + break; + } + + BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val); + return len; +} + +static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size) +{ + struct l2cap_conf_opt *opt = *ptr; + + BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val); + + if (size < L2CAP_CONF_OPT_SIZE + len) + return; + + opt->type = type; + opt->len = len; + + switch (len) { + case 1: + *((u8 *) opt->val) = val; + break; + + case 2: + put_unaligned_le16(val, opt->val); + break; + + case 4: + put_unaligned_le32(val, opt->val); + break; + + default: + memcpy(opt->val, (void *) val, len); + break; + } + + *ptr += L2CAP_CONF_OPT_SIZE + len; +} + +static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size) +{ + struct l2cap_conf_efs efs; + + switch (chan->mode) { + case L2CAP_MODE_ERTM: + efs.id = chan->local_id; + efs.stype = chan->local_stype; + efs.msdu = cpu_to_le16(chan->local_msdu); + efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime); + efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT); + efs.flush_to = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO); + break; + + case L2CAP_MODE_STREAMING: + efs.id = 1; + efs.stype = L2CAP_SERV_BESTEFFORT; + efs.msdu = cpu_to_le16(chan->local_msdu); + efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime); + efs.acc_lat = 0; + efs.flush_to = 0; + break; + + default: + return; + } + + l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs), + (unsigned long) &efs, size); +} + +static void l2cap_ack_timeout(struct work_struct *work) +{ + struct l2cap_chan *chan = container_of(work, struct l2cap_chan, + ack_timer.work); + u16 frames_to_ack; + + BT_DBG("chan %p", chan); + + l2cap_chan_lock(chan); + + frames_to_ack = __seq_offset(chan, chan->buffer_seq, + chan->last_acked_seq); + + if (frames_to_ack) + l2cap_send_rr_or_rnr(chan, 0); + + l2cap_chan_unlock(chan); + l2cap_chan_put(chan); +} + +int l2cap_ertm_init(struct l2cap_chan *chan) +{ + int err; + + chan->next_tx_seq = 0; + chan->expected_tx_seq = 0; + chan->expected_ack_seq = 0; + chan->unacked_frames = 0; + chan->buffer_seq = 0; + chan->frames_sent = 0; + chan->last_acked_seq = 0; + chan->sdu = NULL; + chan->sdu_last_frag = NULL; + chan->sdu_len = 0; + + skb_queue_head_init(&chan->tx_q); + + chan->local_amp_id = AMP_ID_BREDR; + chan->move_id = AMP_ID_BREDR; + chan->move_state = L2CAP_MOVE_STABLE; + chan->move_role = L2CAP_MOVE_ROLE_NONE; + + if (chan->mode != L2CAP_MODE_ERTM) + return 0; + + chan->rx_state = L2CAP_RX_STATE_RECV; + chan->tx_state = L2CAP_TX_STATE_XMIT; + + skb_queue_head_init(&chan->srej_q); + + err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win); + if (err < 0) + return err; + + err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win); + if (err < 0) + l2cap_seq_list_free(&chan->srej_list); + + return err; +} + +static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask) +{ + switch (mode) { + case L2CAP_MODE_STREAMING: + case L2CAP_MODE_ERTM: + if (l2cap_mode_supported(mode, remote_feat_mask)) + return mode; + fallthrough; + default: + return L2CAP_MODE_BASIC; + } +} + +static inline bool __l2cap_ews_supported(struct l2cap_conn *conn) +{ + return ((conn->local_fixed_chan & L2CAP_FC_A2MP) && + (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW)); +} + +static inline bool __l2cap_efs_supported(struct l2cap_conn *conn) +{ + return ((conn->local_fixed_chan & L2CAP_FC_A2MP) && + (conn->feat_mask & L2CAP_FEAT_EXT_FLOW)); +} + +static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan, + struct l2cap_conf_rfc *rfc) +{ + if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) { + u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to; + + /* Class 1 devices have must have ERTM timeouts + * exceeding the Link Supervision Timeout. The + * default Link Supervision Timeout for AMP + * controllers is 10 seconds. + * + * Class 1 devices use 0xffffffff for their + * best-effort flush timeout, so the clamping logic + * will result in a timeout that meets the above + * requirement. ERTM timeouts are 16-bit values, so + * the maximum timeout is 65.535 seconds. + */ + + /* Convert timeout to milliseconds and round */ + ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000); + + /* This is the recommended formula for class 2 devices + * that start ERTM timers when packets are sent to the + * controller. + */ + ertm_to = 3 * ertm_to + 500; + + if (ertm_to > 0xffff) + ertm_to = 0xffff; + + rfc->retrans_timeout = cpu_to_le16((u16) ertm_to); + rfc->monitor_timeout = rfc->retrans_timeout; + } else { + rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO); + rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO); + } +} + +static inline void l2cap_txwin_setup(struct l2cap_chan *chan) +{ + if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW && + __l2cap_ews_supported(chan->conn)) { + /* use extended control field */ + set_bit(FLAG_EXT_CTRL, &chan->flags); + chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW; + } else { + chan->tx_win = min_t(u16, chan->tx_win, + L2CAP_DEFAULT_TX_WINDOW); + chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW; + } + chan->ack_win = chan->tx_win; +} + +static void l2cap_mtu_auto(struct l2cap_chan *chan) +{ + struct hci_conn *conn = chan->conn->hcon; + + chan->imtu = L2CAP_DEFAULT_MIN_MTU; + + /* The 2-DH1 packet has between 2 and 56 information bytes + * (including the 2-byte payload header) + */ + if (!(conn->pkt_type & HCI_2DH1)) + chan->imtu = 54; + + /* The 3-DH1 packet has between 2 and 85 information bytes + * (including the 2-byte payload header) + */ + if (!(conn->pkt_type & HCI_3DH1)) + chan->imtu = 83; + + /* The 2-DH3 packet has between 2 and 369 information bytes + * (including the 2-byte payload header) + */ + if (!(conn->pkt_type & HCI_2DH3)) + chan->imtu = 367; + + /* The 3-DH3 packet has between 2 and 554 information bytes + * (including the 2-byte payload header) + */ + if (!(conn->pkt_type & HCI_3DH3)) + chan->imtu = 552; + + /* The 2-DH5 packet has between 2 and 681 information bytes + * (including the 2-byte payload header) + */ + if (!(conn->pkt_type & HCI_2DH5)) + chan->imtu = 679; + + /* The 3-DH5 packet has between 2 and 1023 information bytes + * (including the 2-byte payload header) + */ + if (!(conn->pkt_type & HCI_3DH5)) + chan->imtu = 1021; +} + +static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size) +{ + struct l2cap_conf_req *req = data; + struct l2cap_conf_rfc rfc = { .mode = chan->mode }; + void *ptr = req->data; + void *endptr = data + data_size; + u16 size; + + BT_DBG("chan %p", chan); + + if (chan->num_conf_req || chan->num_conf_rsp) + goto done; + + switch (chan->mode) { + case L2CAP_MODE_STREAMING: + case L2CAP_MODE_ERTM: + if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) + break; + + if (__l2cap_efs_supported(chan->conn)) + set_bit(FLAG_EFS_ENABLE, &chan->flags); + + fallthrough; + default: + chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask); + break; + } + +done: + if (chan->imtu != L2CAP_DEFAULT_MTU) { + if (!chan->imtu) + l2cap_mtu_auto(chan); + l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, + endptr - ptr); + } + + switch (chan->mode) { + case L2CAP_MODE_BASIC: + if (disable_ertm) + break; + + if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) && + !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING)) + break; + + rfc.mode = L2CAP_MODE_BASIC; + rfc.txwin_size = 0; + rfc.max_transmit = 0; + rfc.retrans_timeout = 0; + rfc.monitor_timeout = 0; + rfc.max_pdu_size = 0; + + l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), + (unsigned long) &rfc, endptr - ptr); + break; + + case L2CAP_MODE_ERTM: + rfc.mode = L2CAP_MODE_ERTM; + rfc.max_transmit = chan->max_tx; + + __l2cap_set_ertm_timeouts(chan, &rfc); + + size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu - + L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE - + L2CAP_FCS_SIZE); + rfc.max_pdu_size = cpu_to_le16(size); + + l2cap_txwin_setup(chan); + + rfc.txwin_size = min_t(u16, chan->tx_win, + L2CAP_DEFAULT_TX_WINDOW); + + l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), + (unsigned long) &rfc, endptr - ptr); + + if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) + l2cap_add_opt_efs(&ptr, chan, endptr - ptr); + + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) + l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2, + chan->tx_win, endptr - ptr); + + if (chan->conn->feat_mask & L2CAP_FEAT_FCS) + if (chan->fcs == L2CAP_FCS_NONE || + test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) { + chan->fcs = L2CAP_FCS_NONE; + l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, + chan->fcs, endptr - ptr); + } + break; + + case L2CAP_MODE_STREAMING: + l2cap_txwin_setup(chan); + rfc.mode = L2CAP_MODE_STREAMING; + rfc.txwin_size = 0; + rfc.max_transmit = 0; + rfc.retrans_timeout = 0; + rfc.monitor_timeout = 0; + + size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu - + L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE - + L2CAP_FCS_SIZE); + rfc.max_pdu_size = cpu_to_le16(size); + + l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), + (unsigned long) &rfc, endptr - ptr); + + if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) + l2cap_add_opt_efs(&ptr, chan, endptr - ptr); + + if (chan->conn->feat_mask & L2CAP_FEAT_FCS) + if (chan->fcs == L2CAP_FCS_NONE || + test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) { + chan->fcs = L2CAP_FCS_NONE; + l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, + chan->fcs, endptr - ptr); + } + break; + } + + req->dcid = cpu_to_le16(chan->dcid); + req->flags = cpu_to_le16(0); + + return ptr - data; +} + +static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size) +{ + struct l2cap_conf_rsp *rsp = data; + void *ptr = rsp->data; + void *endptr = data + data_size; + void *req = chan->conf_req; + int len = chan->conf_len; + int type, hint, olen; + unsigned long val; + struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC }; + struct l2cap_conf_efs efs; + u8 remote_efs = 0; + u16 mtu = L2CAP_DEFAULT_MTU; + u16 result = L2CAP_CONF_SUCCESS; + u16 size; + + BT_DBG("chan %p", chan); + + while (len >= L2CAP_CONF_OPT_SIZE) { + len -= l2cap_get_conf_opt(&req, &type, &olen, &val); + if (len < 0) + break; + + hint = type & L2CAP_CONF_HINT; + type &= L2CAP_CONF_MASK; + + switch (type) { + case L2CAP_CONF_MTU: + if (olen != 2) + break; + mtu = val; + break; + + case L2CAP_CONF_FLUSH_TO: + if (olen != 2) + break; + chan->flush_to = val; + break; + + case L2CAP_CONF_QOS: + break; + + case L2CAP_CONF_RFC: + if (olen != sizeof(rfc)) + break; + memcpy(&rfc, (void *) val, olen); + break; + + case L2CAP_CONF_FCS: + if (olen != 1) + break; + if (val == L2CAP_FCS_NONE) + set_bit(CONF_RECV_NO_FCS, &chan->conf_state); + break; + + case L2CAP_CONF_EFS: + if (olen != sizeof(efs)) + break; + remote_efs = 1; + memcpy(&efs, (void *) val, olen); + break; + + case L2CAP_CONF_EWS: + if (olen != 2) + break; + if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP)) + return -ECONNREFUSED; + set_bit(FLAG_EXT_CTRL, &chan->flags); + set_bit(CONF_EWS_RECV, &chan->conf_state); + chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW; + chan->remote_tx_win = val; + break; + + default: + if (hint) + break; + result = L2CAP_CONF_UNKNOWN; + l2cap_add_conf_opt(&ptr, (u8)type, sizeof(u8), type, endptr - ptr); + break; + } + } + + if (chan->num_conf_rsp || chan->num_conf_req > 1) + goto done; + + switch (chan->mode) { + case L2CAP_MODE_STREAMING: + case L2CAP_MODE_ERTM: + if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) { + chan->mode = l2cap_select_mode(rfc.mode, + chan->conn->feat_mask); + break; + } + + if (remote_efs) { + if (__l2cap_efs_supported(chan->conn)) + set_bit(FLAG_EFS_ENABLE, &chan->flags); + else + return -ECONNREFUSED; + } + + if (chan->mode != rfc.mode) + return -ECONNREFUSED; + + break; + } + +done: + if (chan->mode != rfc.mode) { + result = L2CAP_CONF_UNACCEPT; + rfc.mode = chan->mode; + + if (chan->num_conf_rsp == 1) + return -ECONNREFUSED; + + l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), + (unsigned long) &rfc, endptr - ptr); + } + + if (result == L2CAP_CONF_SUCCESS) { + /* Configure output options and let the other side know + * which ones we don't like. */ + + if (mtu < L2CAP_DEFAULT_MIN_MTU) + result = L2CAP_CONF_UNACCEPT; + else { + chan->omtu = mtu; + set_bit(CONF_MTU_DONE, &chan->conf_state); + } + l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr); + + if (remote_efs) { + if (chan->local_stype != L2CAP_SERV_NOTRAFIC && + efs.stype != L2CAP_SERV_NOTRAFIC && + efs.stype != chan->local_stype) { + + result = L2CAP_CONF_UNACCEPT; + + if (chan->num_conf_req >= 1) + return -ECONNREFUSED; + + l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, + sizeof(efs), + (unsigned long) &efs, endptr - ptr); + } else { + /* Send PENDING Conf Rsp */ + result = L2CAP_CONF_PENDING; + set_bit(CONF_LOC_CONF_PEND, &chan->conf_state); + } + } + + switch (rfc.mode) { + case L2CAP_MODE_BASIC: + chan->fcs = L2CAP_FCS_NONE; + set_bit(CONF_MODE_DONE, &chan->conf_state); + break; + + case L2CAP_MODE_ERTM: + if (!test_bit(CONF_EWS_RECV, &chan->conf_state)) + chan->remote_tx_win = rfc.txwin_size; + else + rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW; + + chan->remote_max_tx = rfc.max_transmit; + + size = min_t(u16, le16_to_cpu(rfc.max_pdu_size), + chan->conn->mtu - L2CAP_EXT_HDR_SIZE - + L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE); + rfc.max_pdu_size = cpu_to_le16(size); + chan->remote_mps = size; + + __l2cap_set_ertm_timeouts(chan, &rfc); + + set_bit(CONF_MODE_DONE, &chan->conf_state); + + l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, + sizeof(rfc), (unsigned long) &rfc, endptr - ptr); + + if (remote_efs && + test_bit(FLAG_EFS_ENABLE, &chan->flags)) { + chan->remote_id = efs.id; + chan->remote_stype = efs.stype; + chan->remote_msdu = le16_to_cpu(efs.msdu); + chan->remote_flush_to = + le32_to_cpu(efs.flush_to); + chan->remote_acc_lat = + le32_to_cpu(efs.acc_lat); + chan->remote_sdu_itime = + le32_to_cpu(efs.sdu_itime); + l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, + sizeof(efs), + (unsigned long) &efs, endptr - ptr); + } + break; + + case L2CAP_MODE_STREAMING: + size = min_t(u16, le16_to_cpu(rfc.max_pdu_size), + chan->conn->mtu - L2CAP_EXT_HDR_SIZE - + L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE); + rfc.max_pdu_size = cpu_to_le16(size); + chan->remote_mps = size; + + set_bit(CONF_MODE_DONE, &chan->conf_state); + + l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), + (unsigned long) &rfc, endptr - ptr); + + break; + + default: + result = L2CAP_CONF_UNACCEPT; + + memset(&rfc, 0, sizeof(rfc)); + rfc.mode = chan->mode; + } + + if (result == L2CAP_CONF_SUCCESS) + set_bit(CONF_OUTPUT_DONE, &chan->conf_state); + } + rsp->scid = cpu_to_le16(chan->dcid); + rsp->result = cpu_to_le16(result); + rsp->flags = cpu_to_le16(0); + + return ptr - data; +} + +static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, + void *data, size_t size, u16 *result) +{ + struct l2cap_conf_req *req = data; + void *ptr = req->data; + void *endptr = data + size; + int type, olen; + unsigned long val; + struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC }; + struct l2cap_conf_efs efs; + + BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data); + + while (len >= L2CAP_CONF_OPT_SIZE) { + len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val); + if (len < 0) + break; + + switch (type) { + case L2CAP_CONF_MTU: + if (olen != 2) + break; + if (val < L2CAP_DEFAULT_MIN_MTU) { + *result = L2CAP_CONF_UNACCEPT; + chan->imtu = L2CAP_DEFAULT_MIN_MTU; + } else + chan->imtu = val; + l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, + endptr - ptr); + break; + + case L2CAP_CONF_FLUSH_TO: + if (olen != 2) + break; + chan->flush_to = val; + l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, + chan->flush_to, endptr - ptr); + break; + + case L2CAP_CONF_RFC: + if (olen != sizeof(rfc)) + break; + memcpy(&rfc, (void *)val, olen); + if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) && + rfc.mode != chan->mode) + return -ECONNREFUSED; + chan->fcs = 0; + l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc), + (unsigned long) &rfc, endptr - ptr); + break; + + case L2CAP_CONF_EWS: + if (olen != 2) + break; + chan->ack_win = min_t(u16, val, chan->ack_win); + l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2, + chan->tx_win, endptr - ptr); + break; + + case L2CAP_CONF_EFS: + if (olen != sizeof(efs)) + break; + memcpy(&efs, (void *)val, olen); + if (chan->local_stype != L2CAP_SERV_NOTRAFIC && + efs.stype != L2CAP_SERV_NOTRAFIC && + efs.stype != chan->local_stype) + return -ECONNREFUSED; + l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs), + (unsigned long) &efs, endptr - ptr); + break; + + case L2CAP_CONF_FCS: + if (olen != 1) + break; + if (*result == L2CAP_CONF_PENDING) + if (val == L2CAP_FCS_NONE) + set_bit(CONF_RECV_NO_FCS, + &chan->conf_state); + break; + } + } + + if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode) + return -ECONNREFUSED; + + chan->mode = rfc.mode; + + if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) { + switch (rfc.mode) { + case L2CAP_MODE_ERTM: + chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout); + chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout); + chan->mps = le16_to_cpu(rfc.max_pdu_size); + if (!test_bit(FLAG_EXT_CTRL, &chan->flags)) + chan->ack_win = min_t(u16, chan->ack_win, + rfc.txwin_size); + + if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) { + chan->local_msdu = le16_to_cpu(efs.msdu); + chan->local_sdu_itime = + le32_to_cpu(efs.sdu_itime); + chan->local_acc_lat = le32_to_cpu(efs.acc_lat); + chan->local_flush_to = + le32_to_cpu(efs.flush_to); + } + break; + + case L2CAP_MODE_STREAMING: + chan->mps = le16_to_cpu(rfc.max_pdu_size); + } + } + + req->dcid = cpu_to_le16(chan->dcid); + req->flags = cpu_to_le16(0); + + return ptr - data; +} + +static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, + u16 result, u16 flags) +{ + struct l2cap_conf_rsp *rsp = data; + void *ptr = rsp->data; + + BT_DBG("chan %p", chan); + + rsp->scid = cpu_to_le16(chan->dcid); + rsp->result = cpu_to_le16(result); + rsp->flags = cpu_to_le16(flags); + + return ptr - data; +} + +void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan) +{ + struct l2cap_le_conn_rsp rsp; + struct l2cap_conn *conn = chan->conn; + + BT_DBG("chan %p", chan); + + rsp.dcid = cpu_to_le16(chan->scid); + rsp.mtu = cpu_to_le16(chan->imtu); + rsp.mps = cpu_to_le16(chan->mps); + rsp.credits = cpu_to_le16(chan->rx_credits); + rsp.result = cpu_to_le16(L2CAP_CR_LE_SUCCESS); + + l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), + &rsp); +} + +static void l2cap_ecred_list_defer(struct l2cap_chan *chan, void *data) +{ + int *result = data; + + if (*result || test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags)) + return; + + switch (chan->state) { + case BT_CONNECT2: + /* If channel still pending accept add to result */ + (*result)++; + return; + case BT_CONNECTED: + return; + default: + /* If not connected or pending accept it has been refused */ + *result = -ECONNREFUSED; + return; + } +} + +struct l2cap_ecred_rsp_data { + struct { + struct l2cap_ecred_conn_rsp rsp; + __le16 scid[L2CAP_ECRED_MAX_CID]; + } __packed pdu; + int count; +}; + +static void l2cap_ecred_rsp_defer(struct l2cap_chan *chan, void *data) +{ + struct l2cap_ecred_rsp_data *rsp = data; + + if (test_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags)) + return; + + /* Reset ident so only one response is sent */ + chan->ident = 0; + + /* Include all channels pending with the same ident */ + if (!rsp->pdu.rsp.result) + rsp->pdu.rsp.dcid[rsp->count++] = cpu_to_le16(chan->scid); + else + l2cap_chan_del(chan, ECONNRESET); +} + +void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan) +{ + struct l2cap_conn *conn = chan->conn; + struct l2cap_ecred_rsp_data data; + u16 id = chan->ident; + int result = 0; + + if (!id) + return; + + BT_DBG("chan %p id %d", chan, id); + + memset(&data, 0, sizeof(data)); + + data.pdu.rsp.mtu = cpu_to_le16(chan->imtu); + data.pdu.rsp.mps = cpu_to_le16(chan->mps); + data.pdu.rsp.credits = cpu_to_le16(chan->rx_credits); + data.pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_SUCCESS); + + /* Verify that all channels are ready */ + __l2cap_chan_list_id(conn, id, l2cap_ecred_list_defer, &result); + + if (result > 0) + return; + + if (result < 0) + data.pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_AUTHORIZATION); + + /* Build response */ + __l2cap_chan_list_id(conn, id, l2cap_ecred_rsp_defer, &data); + + l2cap_send_cmd(conn, id, L2CAP_ECRED_CONN_RSP, + sizeof(data.pdu.rsp) + (data.count * sizeof(__le16)), + &data.pdu); +} + +void __l2cap_connect_rsp_defer(struct l2cap_chan *chan) +{ + struct l2cap_conn_rsp rsp; + struct l2cap_conn *conn = chan->conn; + u8 buf[128]; + u8 rsp_code; + + rsp.scid = cpu_to_le16(chan->dcid); + rsp.dcid = cpu_to_le16(chan->scid); + rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS); + rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); + + if (chan->hs_hcon) + rsp_code = L2CAP_CREATE_CHAN_RSP; + else + rsp_code = L2CAP_CONN_RSP; + + BT_DBG("chan %p rsp_code %u", chan, rsp_code); + + l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp); + + if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) + return; + + l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, + l2cap_build_conf_req(chan, buf, sizeof(buf)), buf); + chan->num_conf_req++; +} + +static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len) +{ + int type, olen; + unsigned long val; + /* Use sane default values in case a misbehaving remote device + * did not send an RFC or extended window size option. + */ + u16 txwin_ext = chan->ack_win; + struct l2cap_conf_rfc rfc = { + .mode = chan->mode, + .retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO), + .monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO), + .max_pdu_size = cpu_to_le16(chan->imtu), + .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW), + }; + + BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len); + + if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING)) + return; + + while (len >= L2CAP_CONF_OPT_SIZE) { + len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val); + if (len < 0) + break; + + switch (type) { + case L2CAP_CONF_RFC: + if (olen != sizeof(rfc)) + break; + memcpy(&rfc, (void *)val, olen); + break; + case L2CAP_CONF_EWS: + if (olen != 2) + break; + txwin_ext = val; + break; + } + } + + switch (rfc.mode) { + case L2CAP_MODE_ERTM: + chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout); + chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout); + chan->mps = le16_to_cpu(rfc.max_pdu_size); + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) + chan->ack_win = min_t(u16, chan->ack_win, txwin_ext); + else + chan->ack_win = min_t(u16, chan->ack_win, + rfc.txwin_size); + break; + case L2CAP_MODE_STREAMING: + chan->mps = le16_to_cpu(rfc.max_pdu_size); + } +} + +static inline int l2cap_command_rej(struct l2cap_conn *conn, + struct l2cap_cmd_hdr *cmd, u16 cmd_len, + u8 *data) +{ + struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data; + + if (cmd_len < sizeof(*rej)) + return -EPROTO; + + if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD) + return 0; + + if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) && + cmd->ident == conn->info_ident) { + cancel_delayed_work(&conn->info_timer); + + conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; + conn->info_ident = 0; + + l2cap_conn_start(conn); + } + + return 0; +} + +static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn, + struct l2cap_cmd_hdr *cmd, + u8 *data, u8 rsp_code, u8 amp_id) +{ + struct l2cap_conn_req *req = (struct l2cap_conn_req *) data; + struct l2cap_conn_rsp rsp; + struct l2cap_chan *chan = NULL, *pchan; + int result, status = L2CAP_CS_NO_INFO; + + u16 dcid = 0, scid = __le16_to_cpu(req->scid); + __le16 psm = req->psm; + + BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid); + + /* Check if we have socket listening on psm */ + pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src, + &conn->hcon->dst, ACL_LINK); + if (!pchan) { + result = L2CAP_CR_BAD_PSM; + goto sendresp; + } + + mutex_lock(&conn->chan_lock); + l2cap_chan_lock(pchan); + + /* Check if the ACL is secure enough (if not SDP) */ + if (psm != cpu_to_le16(L2CAP_PSM_SDP) && + !hci_conn_check_link_mode(conn->hcon)) { + conn->disc_reason = HCI_ERROR_AUTH_FAILURE; + result = L2CAP_CR_SEC_BLOCK; + goto response; + } + + result = L2CAP_CR_NO_MEM; + + /* Check for valid dynamic CID range (as per Erratum 3253) */ + if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_DYN_END) { + result = L2CAP_CR_INVALID_SCID; + goto response; + } + + /* Check if we already have channel with that dcid */ + if (__l2cap_get_chan_by_dcid(conn, scid)) { + result = L2CAP_CR_SCID_IN_USE; + goto response; + } + + chan = pchan->ops->new_connection(pchan); + if (!chan) + goto response; + + /* For certain devices (ex: HID mouse), support for authentication, + * pairing and bonding is optional. For such devices, inorder to avoid + * the ACL alive for too long after L2CAP disconnection, reset the ACL + * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect. + */ + conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT; + + bacpy(&chan->src, &conn->hcon->src); + bacpy(&chan->dst, &conn->hcon->dst); + chan->src_type = bdaddr_src_type(conn->hcon); + chan->dst_type = bdaddr_dst_type(conn->hcon); + chan->psm = psm; + chan->dcid = scid; + chan->local_amp_id = amp_id; + + __l2cap_chan_add(conn, chan); + + dcid = chan->scid; + + __set_chan_timer(chan, chan->ops->get_sndtimeo(chan)); + + chan->ident = cmd->ident; + + if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) { + if (l2cap_chan_check_security(chan, false)) { + if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) { + l2cap_state_change(chan, BT_CONNECT2); + result = L2CAP_CR_PEND; + status = L2CAP_CS_AUTHOR_PEND; + chan->ops->defer(chan); + } else { + /* Force pending result for AMP controllers. + * The connection will succeed after the + * physical link is up. + */ + if (amp_id == AMP_ID_BREDR) { + l2cap_state_change(chan, BT_CONFIG); + result = L2CAP_CR_SUCCESS; + } else { + l2cap_state_change(chan, BT_CONNECT2); + result = L2CAP_CR_PEND; + } + status = L2CAP_CS_NO_INFO; + } + } else { + l2cap_state_change(chan, BT_CONNECT2); + result = L2CAP_CR_PEND; + status = L2CAP_CS_AUTHEN_PEND; + } + } else { + l2cap_state_change(chan, BT_CONNECT2); + result = L2CAP_CR_PEND; + status = L2CAP_CS_NO_INFO; + } + +response: + l2cap_chan_unlock(pchan); + mutex_unlock(&conn->chan_lock); + l2cap_chan_put(pchan); + +sendresp: + rsp.scid = cpu_to_le16(scid); + rsp.dcid = cpu_to_le16(dcid); + rsp.result = cpu_to_le16(result); + rsp.status = cpu_to_le16(status); + l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp); + + if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) { + struct l2cap_info_req info; + info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK); + + conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT; + conn->info_ident = l2cap_get_ident(conn); + + schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT); + + l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ, + sizeof(info), &info); + } + + if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) && + result == L2CAP_CR_SUCCESS) { + u8 buf[128]; + set_bit(CONF_REQ_SENT, &chan->conf_state); + l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, + l2cap_build_conf_req(chan, buf, sizeof(buf)), buf); + chan->num_conf_req++; + } + + return chan; +} + +static int l2cap_connect_req(struct l2cap_conn *conn, + struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data) +{ + struct hci_dev *hdev = conn->hcon->hdev; + struct hci_conn *hcon = conn->hcon; + + if (cmd_len < sizeof(struct l2cap_conn_req)) + return -EPROTO; + + hci_dev_lock(hdev); + if (hci_dev_test_flag(hdev, HCI_MGMT) && + !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags)) + mgmt_device_connected(hdev, hcon, NULL, 0); + hci_dev_unlock(hdev); + + l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0); + return 0; +} + +static int l2cap_connect_create_rsp(struct l2cap_conn *conn, + struct l2cap_cmd_hdr *cmd, u16 cmd_len, + u8 *data) +{ + struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data; + u16 scid, dcid, result, status; + struct l2cap_chan *chan; + u8 req[128]; + int err; + + if (cmd_len < sizeof(*rsp)) + return -EPROTO; + + scid = __le16_to_cpu(rsp->scid); + dcid = __le16_to_cpu(rsp->dcid); + result = __le16_to_cpu(rsp->result); + status = __le16_to_cpu(rsp->status); + + if (result == L2CAP_CR_SUCCESS && (dcid < L2CAP_CID_DYN_START || + dcid > L2CAP_CID_DYN_END)) + return -EPROTO; + + BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", + dcid, scid, result, status); + + mutex_lock(&conn->chan_lock); + + if (scid) { + chan = __l2cap_get_chan_by_scid(conn, scid); + if (!chan) { + err = -EBADSLT; + goto unlock; + } + } else { + chan = __l2cap_get_chan_by_ident(conn, cmd->ident); + if (!chan) { + err = -EBADSLT; + goto unlock; + } + } + + chan = l2cap_chan_hold_unless_zero(chan); + if (!chan) { + err = -EBADSLT; + goto unlock; + } + + err = 0; + + l2cap_chan_lock(chan); + + switch (result) { + case L2CAP_CR_SUCCESS: + if (__l2cap_get_chan_by_dcid(conn, dcid)) { + err = -EBADSLT; + break; + } + + l2cap_state_change(chan, BT_CONFIG); + chan->ident = 0; + chan->dcid = dcid; + clear_bit(CONF_CONNECT_PEND, &chan->conf_state); + + if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) + break; + + l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, + l2cap_build_conf_req(chan, req, sizeof(req)), req); + chan->num_conf_req++; + break; + + case L2CAP_CR_PEND: + set_bit(CONF_CONNECT_PEND, &chan->conf_state); + break; + + default: + l2cap_chan_del(chan, ECONNREFUSED); + break; + } + + l2cap_chan_unlock(chan); + l2cap_chan_put(chan); + +unlock: + mutex_unlock(&conn->chan_lock); + + return err; +} + +static inline void set_default_fcs(struct l2cap_chan *chan) +{ + /* FCS is enabled only in ERTM or streaming mode, if one or both + * sides request it. + */ + if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING) + chan->fcs = L2CAP_FCS_NONE; + else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) + chan->fcs = L2CAP_FCS_CRC16; +} + +static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data, + u8 ident, u16 flags) +{ + struct l2cap_conn *conn = chan->conn; + + BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident, + flags); + + clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state); + set_bit(CONF_OUTPUT_DONE, &chan->conf_state); + + l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP, + l2cap_build_conf_rsp(chan, data, + L2CAP_CONF_SUCCESS, flags), data); +} + +static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident, + u16 scid, u16 dcid) +{ + struct l2cap_cmd_rej_cid rej; + + rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID); + rej.scid = __cpu_to_le16(scid); + rej.dcid = __cpu_to_le16(dcid); + + l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej); +} + +static inline int l2cap_config_req(struct l2cap_conn *conn, + struct l2cap_cmd_hdr *cmd, u16 cmd_len, + u8 *data) +{ + struct l2cap_conf_req *req = (struct l2cap_conf_req *) data; + u16 dcid, flags; + u8 rsp[64]; + struct l2cap_chan *chan; + int len, err = 0; + + if (cmd_len < sizeof(*req)) + return -EPROTO; + + dcid = __le16_to_cpu(req->dcid); + flags = __le16_to_cpu(req->flags); + + BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags); + + chan = l2cap_get_chan_by_scid(conn, dcid); + if (!chan) { + cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0); + return 0; + } + + if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 && + chan->state != BT_CONNECTED) { + cmd_reject_invalid_cid(conn, cmd->ident, chan->scid, + chan->dcid); + goto unlock; + } + + /* Reject if config buffer is too small. */ + len = cmd_len - sizeof(*req); + if (chan->conf_len + len > sizeof(chan->conf_req)) { + l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, + l2cap_build_conf_rsp(chan, rsp, + L2CAP_CONF_REJECT, flags), rsp); + goto unlock; + } + + /* Store config. */ + memcpy(chan->conf_req + chan->conf_len, req->data, len); + chan->conf_len += len; + + if (flags & L2CAP_CONF_FLAG_CONTINUATION) { + /* Incomplete config. Send empty response. */ + l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, + l2cap_build_conf_rsp(chan, rsp, + L2CAP_CONF_SUCCESS, flags), rsp); + goto unlock; + } + + /* Complete config. */ + len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp)); + if (len < 0) { + l2cap_send_disconn_req(chan, ECONNRESET); + goto unlock; + } + + chan->ident = cmd->ident; + l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp); + if (chan->num_conf_rsp < L2CAP_CONF_MAX_CONF_RSP) + chan->num_conf_rsp++; + + /* Reset config buffer. */ + chan->conf_len = 0; + + if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) + goto unlock; + + if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) { + set_default_fcs(chan); + + if (chan->mode == L2CAP_MODE_ERTM || + chan->mode == L2CAP_MODE_STREAMING) + err = l2cap_ertm_init(chan); + + if (err < 0) + l2cap_send_disconn_req(chan, -err); + else + l2cap_chan_ready(chan); + + goto unlock; + } + + if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) { + u8 buf[64]; + l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ, + l2cap_build_conf_req(chan, buf, sizeof(buf)), buf); + chan->num_conf_req++; + } + + /* Got Conf Rsp PENDING from remote side and assume we sent + Conf Rsp PENDING in the code above */ + if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) && + test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) { + + /* check compatibility */ + + /* Send rsp for BR/EDR channel */ + if (!chan->hs_hcon) + l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags); + else + chan->ident = cmd->ident; + } + +unlock: + l2cap_chan_unlock(chan); + l2cap_chan_put(chan); + return err; +} + +static inline int l2cap_config_rsp(struct l2cap_conn *conn, + struct l2cap_cmd_hdr *cmd, u16 cmd_len, + u8 *data) +{ + struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data; + u16 scid, flags, result; + struct l2cap_chan *chan; + int len = cmd_len - sizeof(*rsp); + int err = 0; + + if (cmd_len < sizeof(*rsp)) + return -EPROTO; + + scid = __le16_to_cpu(rsp->scid); + flags = __le16_to_cpu(rsp->flags); + result = __le16_to_cpu(rsp->result); + + BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags, + result, len); + + chan = l2cap_get_chan_by_scid(conn, scid); + if (!chan) + return 0; + + switch (result) { + case L2CAP_CONF_SUCCESS: + l2cap_conf_rfc_get(chan, rsp->data, len); + clear_bit(CONF_REM_CONF_PEND, &chan->conf_state); + break; + + case L2CAP_CONF_PENDING: + set_bit(CONF_REM_CONF_PEND, &chan->conf_state); + + if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) { + char buf[64]; + + len = l2cap_parse_conf_rsp(chan, rsp->data, len, + buf, sizeof(buf), &result); + if (len < 0) { + l2cap_send_disconn_req(chan, ECONNRESET); + goto done; + } + + if (!chan->hs_hcon) { + l2cap_send_efs_conf_rsp(chan, buf, cmd->ident, + 0); + } else { + if (l2cap_check_efs(chan)) { + amp_create_logical_link(chan); + chan->ident = cmd->ident; + } + } + } + goto done; + + case L2CAP_CONF_UNKNOWN: + case L2CAP_CONF_UNACCEPT: + if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) { + char req[64]; + + if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) { + l2cap_send_disconn_req(chan, ECONNRESET); + goto done; + } + + /* throw out any old stored conf requests */ + result = L2CAP_CONF_SUCCESS; + len = l2cap_parse_conf_rsp(chan, rsp->data, len, + req, sizeof(req), &result); + if (len < 0) { + l2cap_send_disconn_req(chan, ECONNRESET); + goto done; + } + + l2cap_send_cmd(conn, l2cap_get_ident(conn), + L2CAP_CONF_REQ, len, req); + chan->num_conf_req++; + if (result != L2CAP_CONF_SUCCESS) + goto done; + break; + } + fallthrough; + + default: + l2cap_chan_set_err(chan, ECONNRESET); + + __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT); + l2cap_send_disconn_req(chan, ECONNRESET); + goto done; + } + + if (flags & L2CAP_CONF_FLAG_CONTINUATION) + goto done; + + set_bit(CONF_INPUT_DONE, &chan->conf_state); + + if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) { + set_default_fcs(chan); + + if (chan->mode == L2CAP_MODE_ERTM || + chan->mode == L2CAP_MODE_STREAMING) + err = l2cap_ertm_init(chan); + + if (err < 0) + l2cap_send_disconn_req(chan, -err); + else + l2cap_chan_ready(chan); + } + +done: + l2cap_chan_unlock(chan); + l2cap_chan_put(chan); + return err; +} + +static inline int l2cap_disconnect_req(struct l2cap_conn *conn, + struct l2cap_cmd_hdr *cmd, u16 cmd_len, + u8 *data) +{ + struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data; + struct l2cap_disconn_rsp rsp; + u16 dcid, scid; + struct l2cap_chan *chan; + + if (cmd_len != sizeof(*req)) + return -EPROTO; + + scid = __le16_to_cpu(req->scid); + dcid = __le16_to_cpu(req->dcid); + + BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid); + + chan = l2cap_get_chan_by_scid(conn, dcid); + if (!chan) { + cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid); + return 0; + } + + rsp.dcid = cpu_to_le16(chan->scid); + rsp.scid = cpu_to_le16(chan->dcid); + l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp); + + chan->ops->set_shutdown(chan); + + l2cap_chan_unlock(chan); + mutex_lock(&conn->chan_lock); + l2cap_chan_lock(chan); + l2cap_chan_del(chan, ECONNRESET); + mutex_unlock(&conn->chan_lock); + + chan->ops->close(chan); + + l2cap_chan_unlock(chan); + l2cap_chan_put(chan); + + return 0; +} + +static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, + struct l2cap_cmd_hdr *cmd, u16 cmd_len, + u8 *data) +{ + struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data; + u16 dcid, scid; + struct l2cap_chan *chan; + + if (cmd_len != sizeof(*rsp)) + return -EPROTO; + + scid = __le16_to_cpu(rsp->scid); + dcid = __le16_to_cpu(rsp->dcid); + + BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid); + + chan = l2cap_get_chan_by_scid(conn, scid); + if (!chan) { + return 0; + } + + if (chan->state != BT_DISCONN) { + l2cap_chan_unlock(chan); + l2cap_chan_put(chan); + return 0; + } + + l2cap_chan_unlock(chan); + mutex_lock(&conn->chan_lock); + l2cap_chan_lock(chan); + l2cap_chan_del(chan, 0); + mutex_unlock(&conn->chan_lock); + + chan->ops->close(chan); + + l2cap_chan_unlock(chan); + l2cap_chan_put(chan); + + return 0; +} + +static inline int l2cap_information_req(struct l2cap_conn *conn, + struct l2cap_cmd_hdr *cmd, u16 cmd_len, + u8 *data) +{ + struct l2cap_info_req *req = (struct l2cap_info_req *) data; + u16 type; + + if (cmd_len != sizeof(*req)) + return -EPROTO; + + type = __le16_to_cpu(req->type); + + BT_DBG("type 0x%4.4x", type); + + if (type == L2CAP_IT_FEAT_MASK) { + u8 buf[8]; + u32 feat_mask = l2cap_feat_mask; + struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf; + rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK); + rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS); + if (!disable_ertm) + feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING + | L2CAP_FEAT_FCS; + if (conn->local_fixed_chan & L2CAP_FC_A2MP) + feat_mask |= L2CAP_FEAT_EXT_FLOW + | L2CAP_FEAT_EXT_WINDOW; + + put_unaligned_le32(feat_mask, rsp->data); + l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf), + buf); + } else if (type == L2CAP_IT_FIXED_CHAN) { + u8 buf[12]; + struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf; + + rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN); + rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS); + rsp->data[0] = conn->local_fixed_chan; + memset(rsp->data + 1, 0, 7); + l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf), + buf); + } else { + struct l2cap_info_rsp rsp; + rsp.type = cpu_to_le16(type); + rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP); + l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp), + &rsp); + } + + return 0; +} + +static inline int l2cap_information_rsp(struct l2cap_conn *conn, + struct l2cap_cmd_hdr *cmd, u16 cmd_len, + u8 *data) +{ + struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data; + u16 type, result; + + if (cmd_len < sizeof(*rsp)) + return -EPROTO; + + type = __le16_to_cpu(rsp->type); + result = __le16_to_cpu(rsp->result); + + BT_DBG("type 0x%4.4x result 0x%2.2x", type, result); + + /* L2CAP Info req/rsp are unbound to channels, add extra checks */ + if (cmd->ident != conn->info_ident || + conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) + return 0; + + cancel_delayed_work(&conn->info_timer); + + if (result != L2CAP_IR_SUCCESS) { + conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; + conn->info_ident = 0; + + l2cap_conn_start(conn); + + return 0; + } + + switch (type) { + case L2CAP_IT_FEAT_MASK: + conn->feat_mask = get_unaligned_le32(rsp->data); + + if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) { + struct l2cap_info_req req; + req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN); + + conn->info_ident = l2cap_get_ident(conn); + + l2cap_send_cmd(conn, conn->info_ident, + L2CAP_INFO_REQ, sizeof(req), &req); + } else { + conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; + conn->info_ident = 0; + + l2cap_conn_start(conn); + } + break; + + case L2CAP_IT_FIXED_CHAN: + conn->remote_fixed_chan = rsp->data[0]; + conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE; + conn->info_ident = 0; + + l2cap_conn_start(conn); + break; + } + + return 0; +} + +static int l2cap_create_channel_req(struct l2cap_conn *conn, + struct l2cap_cmd_hdr *cmd, + u16 cmd_len, void *data) +{ + struct l2cap_create_chan_req *req = data; + struct l2cap_create_chan_rsp rsp; + struct l2cap_chan *chan; + struct hci_dev *hdev; + u16 psm, scid; + + if (cmd_len != sizeof(*req)) + return -EPROTO; + + if (!(conn->local_fixed_chan & L2CAP_FC_A2MP)) + return -EINVAL; + + psm = le16_to_cpu(req->psm); + scid = le16_to_cpu(req->scid); + + BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id); + + /* For controller id 0 make BR/EDR connection */ + if (req->amp_id == AMP_ID_BREDR) { + l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP, + req->amp_id); + return 0; + } + + /* Validate AMP controller id */ + hdev = hci_dev_get(req->amp_id); + if (!hdev) + goto error; + + if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) { + hci_dev_put(hdev); + goto error; + } + + chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP, + req->amp_id); + if (chan) { + struct amp_mgr *mgr = conn->hcon->amp_mgr; + struct hci_conn *hs_hcon; + + hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK, + &conn->hcon->dst); + if (!hs_hcon) { + hci_dev_put(hdev); + cmd_reject_invalid_cid(conn, cmd->ident, chan->scid, + chan->dcid); + return 0; + } + + BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon); + + mgr->bredr_chan = chan; + chan->hs_hcon = hs_hcon; + chan->fcs = L2CAP_FCS_NONE; + conn->mtu = hdev->block_mtu; + } + + hci_dev_put(hdev); + + return 0; + +error: + rsp.dcid = 0; + rsp.scid = cpu_to_le16(scid); + rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP); + rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); + + l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP, + sizeof(rsp), &rsp); + + return 0; +} + +static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id) +{ + struct l2cap_move_chan_req req; + u8 ident; + + BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id); + + ident = l2cap_get_ident(chan->conn); + chan->ident = ident; + + req.icid = cpu_to_le16(chan->scid); + req.dest_amp_id = dest_amp_id; + + l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req), + &req); + + __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT); +} + +static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result) +{ + struct l2cap_move_chan_rsp rsp; + + BT_DBG("chan %p, result 0x%4.4x", chan, result); + + rsp.icid = cpu_to_le16(chan->dcid); + rsp.result = cpu_to_le16(result); + + l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP, + sizeof(rsp), &rsp); +} + +static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result) +{ + struct l2cap_move_chan_cfm cfm; + + BT_DBG("chan %p, result 0x%4.4x", chan, result); + + chan->ident = l2cap_get_ident(chan->conn); + + cfm.icid = cpu_to_le16(chan->scid); + cfm.result = cpu_to_le16(result); + + l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM, + sizeof(cfm), &cfm); + + __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT); +} + +static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid) +{ + struct l2cap_move_chan_cfm cfm; + + BT_DBG("conn %p, icid 0x%4.4x", conn, icid); + + cfm.icid = cpu_to_le16(icid); + cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED); + + l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM, + sizeof(cfm), &cfm); +} + +static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident, + u16 icid) +{ + struct l2cap_move_chan_cfm_rsp rsp; + + BT_DBG("icid 0x%4.4x", icid); + + rsp.icid = cpu_to_le16(icid); + l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp); +} + +static void __release_logical_link(struct l2cap_chan *chan) +{ + chan->hs_hchan = NULL; + chan->hs_hcon = NULL; + + /* Placeholder - release the logical link */ +} + +static void l2cap_logical_fail(struct l2cap_chan *chan) +{ + /* Logical link setup failed */ + if (chan->state != BT_CONNECTED) { + /* Create channel failure, disconnect */ + l2cap_send_disconn_req(chan, ECONNRESET); + return; + } + + switch (chan->move_role) { + case L2CAP_MOVE_ROLE_RESPONDER: + l2cap_move_done(chan); + l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP); + break; + case L2CAP_MOVE_ROLE_INITIATOR: + if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP || + chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) { + /* Remote has only sent pending or + * success responses, clean up + */ + l2cap_move_done(chan); + } + + /* Other amp move states imply that the move + * has already aborted + */ + l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED); + break; + } +} + +static void l2cap_logical_finish_create(struct l2cap_chan *chan, + struct hci_chan *hchan) +{ + struct l2cap_conf_rsp rsp; + + chan->hs_hchan = hchan; + chan->hs_hcon->l2cap_data = chan->conn; + + l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0); + + if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) { + int err; + + set_default_fcs(chan); + + err = l2cap_ertm_init(chan); + if (err < 0) + l2cap_send_disconn_req(chan, -err); + else + l2cap_chan_ready(chan); + } +} + +static void l2cap_logical_finish_move(struct l2cap_chan *chan, + struct hci_chan *hchan) +{ + chan->hs_hcon = hchan->conn; + chan->hs_hcon->l2cap_data = chan->conn; + + BT_DBG("move_state %d", chan->move_state); + + switch (chan->move_state) { + case L2CAP_MOVE_WAIT_LOGICAL_COMP: + /* Move confirm will be sent after a success + * response is received + */ + chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS; + break; + case L2CAP_MOVE_WAIT_LOGICAL_CFM: + if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { + chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY; + } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) { + chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP; + l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED); + } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) { + chan->move_state = L2CAP_MOVE_WAIT_CONFIRM; + l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS); + } + break; + default: + /* Move was not in expected state, free the channel */ + __release_logical_link(chan); + + chan->move_state = L2CAP_MOVE_STABLE; + } +} + +/* Call with chan locked */ +void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan, + u8 status) +{ + BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status); + + if (status) { + l2cap_logical_fail(chan); + __release_logical_link(chan); + return; + } + + if (chan->state != BT_CONNECTED) { + /* Ignore logical link if channel is on BR/EDR */ + if (chan->local_amp_id != AMP_ID_BREDR) + l2cap_logical_finish_create(chan, hchan); + } else { + l2cap_logical_finish_move(chan, hchan); + } +} + +void l2cap_move_start(struct l2cap_chan *chan) +{ + BT_DBG("chan %p", chan); + + if (chan->local_amp_id == AMP_ID_BREDR) { + if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED) + return; + chan->move_role = L2CAP_MOVE_ROLE_INITIATOR; + chan->move_state = L2CAP_MOVE_WAIT_PREPARE; + /* Placeholder - start physical link setup */ + } else { + chan->move_role = L2CAP_MOVE_ROLE_INITIATOR; + chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS; + chan->move_id = 0; + l2cap_move_setup(chan); + l2cap_send_move_chan_req(chan, 0); + } +} + +static void l2cap_do_create(struct l2cap_chan *chan, int result, + u8 local_amp_id, u8 remote_amp_id) +{ + BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state), + local_amp_id, remote_amp_id); + + chan->fcs = L2CAP_FCS_NONE; + + /* Outgoing channel on AMP */ + if (chan->state == BT_CONNECT) { + if (result == L2CAP_CR_SUCCESS) { + chan->local_amp_id = local_amp_id; + l2cap_send_create_chan_req(chan, remote_amp_id); + } else { + /* Revert to BR/EDR connect */ + l2cap_send_conn_req(chan); + } + + return; + } + + /* Incoming channel on AMP */ + if (__l2cap_no_conn_pending(chan)) { + struct l2cap_conn_rsp rsp; + char buf[128]; + rsp.scid = cpu_to_le16(chan->dcid); + rsp.dcid = cpu_to_le16(chan->scid); + + if (result == L2CAP_CR_SUCCESS) { + /* Send successful response */ + rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS); + rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); + } else { + /* Send negative response */ + rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM); + rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO); + } + + l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP, + sizeof(rsp), &rsp); + + if (result == L2CAP_CR_SUCCESS) { + l2cap_state_change(chan, BT_CONFIG); + set_bit(CONF_REQ_SENT, &chan->conf_state); + l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn), + L2CAP_CONF_REQ, + l2cap_build_conf_req(chan, buf, sizeof(buf)), buf); + chan->num_conf_req++; + } + } +} + +static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id, + u8 remote_amp_id) +{ + l2cap_move_setup(chan); + chan->move_id = local_amp_id; + chan->move_state = L2CAP_MOVE_WAIT_RSP; + + l2cap_send_move_chan_req(chan, remote_amp_id); +} + +static void l2cap_do_move_respond(struct l2cap_chan *chan, int result) +{ + struct hci_chan *hchan = NULL; + + /* Placeholder - get hci_chan for logical link */ + + if (hchan) { + if (hchan->state == BT_CONNECTED) { + /* Logical link is ready to go */ + chan->hs_hcon = hchan->conn; + chan->hs_hcon->l2cap_data = chan->conn; + chan->move_state = L2CAP_MOVE_WAIT_CONFIRM; + l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS); + + l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS); + } else { + /* Wait for logical link to be ready */ + chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM; + } + } else { + /* Logical link not available */ + l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED); + } +} + +static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result) +{ + if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) { + u8 rsp_result; + if (result == -EINVAL) + rsp_result = L2CAP_MR_BAD_ID; + else + rsp_result = L2CAP_MR_NOT_ALLOWED; + + l2cap_send_move_chan_rsp(chan, rsp_result); + } + + chan->move_role = L2CAP_MOVE_ROLE_NONE; + chan->move_state = L2CAP_MOVE_STABLE; + + /* Restart data transmission */ + l2cap_ertm_send(chan); +} + +/* Invoke with locked chan */ +void __l2cap_physical_cfm(struct l2cap_chan *chan, int result) +{ + u8 local_amp_id = chan->local_amp_id; + u8 remote_amp_id = chan->remote_amp_id; + + BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d", + chan, result, local_amp_id, remote_amp_id); + + if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) + return; + + if (chan->state != BT_CONNECTED) { + l2cap_do_create(chan, result, local_amp_id, remote_amp_id); + } else if (result != L2CAP_MR_SUCCESS) { + l2cap_do_move_cancel(chan, result); + } else { + switch (chan->move_role) { + case L2CAP_MOVE_ROLE_INITIATOR: + l2cap_do_move_initiate(chan, local_amp_id, + remote_amp_id); + break; + case L2CAP_MOVE_ROLE_RESPONDER: + l2cap_do_move_respond(chan, result); + break; + default: + l2cap_do_move_cancel(chan, result); + break; + } + } +} + +static inline int l2cap_move_channel_req(struct l2cap_conn *conn, + struct l2cap_cmd_hdr *cmd, + u16 cmd_len, void *data) +{ + struct l2cap_move_chan_req *req = data; + struct l2cap_move_chan_rsp rsp; + struct l2cap_chan *chan; + u16 icid = 0; + u16 result = L2CAP_MR_NOT_ALLOWED; + + if (cmd_len != sizeof(*req)) + return -EPROTO; + + icid = le16_to_cpu(req->icid); + + BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id); + + if (!(conn->local_fixed_chan & L2CAP_FC_A2MP)) + return -EINVAL; + + chan = l2cap_get_chan_by_dcid(conn, icid); + if (!chan) { + rsp.icid = cpu_to_le16(icid); + rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED); + l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP, + sizeof(rsp), &rsp); + return 0; + } + + chan->ident = cmd->ident; + + if (chan->scid < L2CAP_CID_DYN_START || + chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY || + (chan->mode != L2CAP_MODE_ERTM && + chan->mode != L2CAP_MODE_STREAMING)) { + result = L2CAP_MR_NOT_ALLOWED; + goto send_move_response; + } + + if (chan->local_amp_id == req->dest_amp_id) { + result = L2CAP_MR_SAME_ID; + goto send_move_response; + } + + if (req->dest_amp_id != AMP_ID_BREDR) { + struct hci_dev *hdev; + hdev = hci_dev_get(req->dest_amp_id); + if (!hdev || hdev->dev_type != HCI_AMP || + !test_bit(HCI_UP, &hdev->flags)) { + if (hdev) + hci_dev_put(hdev); + + result = L2CAP_MR_BAD_ID; + goto send_move_response; + } + hci_dev_put(hdev); + } + + /* Detect a move collision. Only send a collision response + * if this side has "lost", otherwise proceed with the move. + * The winner has the larger bd_addr. + */ + if ((__chan_is_moving(chan) || + chan->move_role != L2CAP_MOVE_ROLE_NONE) && + bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) { + result = L2CAP_MR_COLLISION; + goto send_move_response; + } + + chan->move_role = L2CAP_MOVE_ROLE_RESPONDER; + l2cap_move_setup(chan); + chan->move_id = req->dest_amp_id; + + if (req->dest_amp_id == AMP_ID_BREDR) { + /* Moving to BR/EDR */ + if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { + chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY; + result = L2CAP_MR_PEND; + } else { + chan->move_state = L2CAP_MOVE_WAIT_CONFIRM; + result = L2CAP_MR_SUCCESS; + } + } else { + chan->move_state = L2CAP_MOVE_WAIT_PREPARE; + /* Placeholder - uncomment when amp functions are available */ + /*amp_accept_physical(chan, req->dest_amp_id);*/ + result = L2CAP_MR_PEND; + } + +send_move_response: + l2cap_send_move_chan_rsp(chan, result); + + l2cap_chan_unlock(chan); + l2cap_chan_put(chan); + + return 0; +} + +static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result) +{ + struct l2cap_chan *chan; + struct hci_chan *hchan = NULL; + + chan = l2cap_get_chan_by_scid(conn, icid); + if (!chan) { + l2cap_send_move_chan_cfm_icid(conn, icid); + return; + } + + __clear_chan_timer(chan); + if (result == L2CAP_MR_PEND) + __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT); + + switch (chan->move_state) { + case L2CAP_MOVE_WAIT_LOGICAL_COMP: + /* Move confirm will be sent when logical link + * is complete. + */ + chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM; + break; + case L2CAP_MOVE_WAIT_RSP_SUCCESS: + if (result == L2CAP_MR_PEND) { + break; + } else if (test_bit(CONN_LOCAL_BUSY, + &chan->conn_state)) { + chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY; + } else { + /* Logical link is up or moving to BR/EDR, + * proceed with move + */ + chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP; + l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED); + } + break; + case L2CAP_MOVE_WAIT_RSP: + /* Moving to AMP */ + if (result == L2CAP_MR_SUCCESS) { + /* Remote is ready, send confirm immediately + * after logical link is ready + */ + chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM; + } else { + /* Both logical link and move success + * are required to confirm + */ + chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP; + } + + /* Placeholder - get hci_chan for logical link */ + if (!hchan) { + /* Logical link not available */ + l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED); + break; + } + + /* If the logical link is not yet connected, do not + * send confirmation. + */ + if (hchan->state != BT_CONNECTED) + break; + + /* Logical link is already ready to go */ + + chan->hs_hcon = hchan->conn; + chan->hs_hcon->l2cap_data = chan->conn; + + if (result == L2CAP_MR_SUCCESS) { + /* Can confirm now */ + l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED); + } else { + /* Now only need move success + * to confirm + */ + chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS; + } + + l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS); + break; + default: + /* Any other amp move state means the move failed. */ + chan->move_id = chan->local_amp_id; + l2cap_move_done(chan); + l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED); + } + + l2cap_chan_unlock(chan); + l2cap_chan_put(chan); +} + +static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid, + u16 result) +{ + struct l2cap_chan *chan; + + chan = l2cap_get_chan_by_ident(conn, ident); + if (!chan) { + /* Could not locate channel, icid is best guess */ + l2cap_send_move_chan_cfm_icid(conn, icid); + return; + } + + __clear_chan_timer(chan); + + if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) { + if (result == L2CAP_MR_COLLISION) { + chan->move_role = L2CAP_MOVE_ROLE_RESPONDER; + } else { + /* Cleanup - cancel move */ + chan->move_id = chan->local_amp_id; + l2cap_move_done(chan); + } + } + + l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED); + + l2cap_chan_unlock(chan); + l2cap_chan_put(chan); +} + +static int l2cap_move_channel_rsp(struct l2cap_conn *conn, + struct l2cap_cmd_hdr *cmd, + u16 cmd_len, void *data) +{ + struct l2cap_move_chan_rsp *rsp = data; + u16 icid, result; + + if (cmd_len != sizeof(*rsp)) + return -EPROTO; + + icid = le16_to_cpu(rsp->icid); + result = le16_to_cpu(rsp->result); + + BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result); + + if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND) + l2cap_move_continue(conn, icid, result); + else + l2cap_move_fail(conn, cmd->ident, icid, result); + + return 0; +} + +static int l2cap_move_channel_confirm(struct l2cap_conn *conn, + struct l2cap_cmd_hdr *cmd, + u16 cmd_len, void *data) +{ + struct l2cap_move_chan_cfm *cfm = data; + struct l2cap_chan *chan; + u16 icid, result; + + if (cmd_len != sizeof(*cfm)) + return -EPROTO; + + icid = le16_to_cpu(cfm->icid); + result = le16_to_cpu(cfm->result); + + BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result); + + chan = l2cap_get_chan_by_dcid(conn, icid); + if (!chan) { + /* Spec requires a response even if the icid was not found */ + l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid); + return 0; + } + + if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) { + if (result == L2CAP_MC_CONFIRMED) { + chan->local_amp_id = chan->move_id; + if (chan->local_amp_id == AMP_ID_BREDR) + __release_logical_link(chan); + } else { + chan->move_id = chan->local_amp_id; + } + + l2cap_move_done(chan); + } + + l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid); + + l2cap_chan_unlock(chan); + l2cap_chan_put(chan); + + return 0; +} + +static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn, + struct l2cap_cmd_hdr *cmd, + u16 cmd_len, void *data) +{ + struct l2cap_move_chan_cfm_rsp *rsp = data; + struct l2cap_chan *chan; + u16 icid; + + if (cmd_len != sizeof(*rsp)) + return -EPROTO; + + icid = le16_to_cpu(rsp->icid); + + BT_DBG("icid 0x%4.4x", icid); + + chan = l2cap_get_chan_by_scid(conn, icid); + if (!chan) + return 0; + + __clear_chan_timer(chan); + + if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) { + chan->local_amp_id = chan->move_id; + + if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan) + __release_logical_link(chan); + + l2cap_move_done(chan); + } + + l2cap_chan_unlock(chan); + l2cap_chan_put(chan); + + return 0; +} + +static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn, + struct l2cap_cmd_hdr *cmd, + u16 cmd_len, u8 *data) +{ + struct hci_conn *hcon = conn->hcon; + struct l2cap_conn_param_update_req *req; + struct l2cap_conn_param_update_rsp rsp; + u16 min, max, latency, to_multiplier; + int err; + + if (hcon->role != HCI_ROLE_MASTER) + return -EINVAL; + + if (cmd_len != sizeof(struct l2cap_conn_param_update_req)) + return -EPROTO; + + req = (struct l2cap_conn_param_update_req *) data; + min = __le16_to_cpu(req->min); + max = __le16_to_cpu(req->max); + latency = __le16_to_cpu(req->latency); + to_multiplier = __le16_to_cpu(req->to_multiplier); + + BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x", + min, max, latency, to_multiplier); + + memset(&rsp, 0, sizeof(rsp)); + + err = hci_check_conn_params(min, max, latency, to_multiplier); + if (err) + rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED); + else + rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED); + + l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP, + sizeof(rsp), &rsp); + + if (!err) { + u8 store_hint; + + store_hint = hci_le_conn_update(hcon, min, max, latency, + to_multiplier); + mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type, + store_hint, min, max, latency, + to_multiplier); + + } + + return 0; +} + +static int l2cap_le_connect_rsp(struct l2cap_conn *conn, + struct l2cap_cmd_hdr *cmd, u16 cmd_len, + u8 *data) +{ + struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data; + struct hci_conn *hcon = conn->hcon; + u16 dcid, mtu, mps, credits, result; + struct l2cap_chan *chan; + int err, sec_level; + + if (cmd_len < sizeof(*rsp)) + return -EPROTO; + + dcid = __le16_to_cpu(rsp->dcid); + mtu = __le16_to_cpu(rsp->mtu); + mps = __le16_to_cpu(rsp->mps); + credits = __le16_to_cpu(rsp->credits); + result = __le16_to_cpu(rsp->result); + + if (result == L2CAP_CR_LE_SUCCESS && (mtu < 23 || mps < 23 || + dcid < L2CAP_CID_DYN_START || + dcid > L2CAP_CID_LE_DYN_END)) + return -EPROTO; + + BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x", + dcid, mtu, mps, credits, result); + + mutex_lock(&conn->chan_lock); + + chan = __l2cap_get_chan_by_ident(conn, cmd->ident); + if (!chan) { + err = -EBADSLT; + goto unlock; + } + + err = 0; + + l2cap_chan_lock(chan); + + switch (result) { + case L2CAP_CR_LE_SUCCESS: + if (__l2cap_get_chan_by_dcid(conn, dcid)) { + err = -EBADSLT; + break; + } + + chan->ident = 0; + chan->dcid = dcid; + chan->omtu = mtu; + chan->remote_mps = mps; + chan->tx_credits = credits; + l2cap_chan_ready(chan); + break; + + case L2CAP_CR_LE_AUTHENTICATION: + case L2CAP_CR_LE_ENCRYPTION: + /* If we already have MITM protection we can't do + * anything. + */ + if (hcon->sec_level > BT_SECURITY_MEDIUM) { + l2cap_chan_del(chan, ECONNREFUSED); + break; + } + + sec_level = hcon->sec_level + 1; + if (chan->sec_level < sec_level) + chan->sec_level = sec_level; + + /* We'll need to send a new Connect Request */ + clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags); + + smp_conn_security(hcon, chan->sec_level); + break; + + default: + l2cap_chan_del(chan, ECONNREFUSED); + break; + } + + l2cap_chan_unlock(chan); + +unlock: + mutex_unlock(&conn->chan_lock); + + return err; +} + +static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn, + struct l2cap_cmd_hdr *cmd, u16 cmd_len, + u8 *data) +{ + int err = 0; + + switch (cmd->code) { + case L2CAP_COMMAND_REJ: + l2cap_command_rej(conn, cmd, cmd_len, data); + break; + + case L2CAP_CONN_REQ: + err = l2cap_connect_req(conn, cmd, cmd_len, data); + break; + + case L2CAP_CONN_RSP: + case L2CAP_CREATE_CHAN_RSP: + l2cap_connect_create_rsp(conn, cmd, cmd_len, data); + break; + + case L2CAP_CONF_REQ: + err = l2cap_config_req(conn, cmd, cmd_len, data); + break; + + case L2CAP_CONF_RSP: + l2cap_config_rsp(conn, cmd, cmd_len, data); + break; + + case L2CAP_DISCONN_REQ: + err = l2cap_disconnect_req(conn, cmd, cmd_len, data); + break; + + case L2CAP_DISCONN_RSP: + l2cap_disconnect_rsp(conn, cmd, cmd_len, data); + break; + + case L2CAP_ECHO_REQ: + l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data); + break; + + case L2CAP_ECHO_RSP: + break; + + case L2CAP_INFO_REQ: + err = l2cap_information_req(conn, cmd, cmd_len, data); + break; + + case L2CAP_INFO_RSP: + l2cap_information_rsp(conn, cmd, cmd_len, data); + break; + + case L2CAP_CREATE_CHAN_REQ: + err = l2cap_create_channel_req(conn, cmd, cmd_len, data); + break; + + case L2CAP_MOVE_CHAN_REQ: + err = l2cap_move_channel_req(conn, cmd, cmd_len, data); + break; + + case L2CAP_MOVE_CHAN_RSP: + l2cap_move_channel_rsp(conn, cmd, cmd_len, data); + break; + + case L2CAP_MOVE_CHAN_CFM: + err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data); + break; + + case L2CAP_MOVE_CHAN_CFM_RSP: + l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data); + break; + + default: + BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code); + err = -EINVAL; + break; + } + + return err; +} + +static int l2cap_le_connect_req(struct l2cap_conn *conn, + struct l2cap_cmd_hdr *cmd, u16 cmd_len, + u8 *data) +{ + struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data; + struct l2cap_le_conn_rsp rsp; + struct l2cap_chan *chan, *pchan; + u16 dcid, scid, credits, mtu, mps; + __le16 psm; + u8 result; + + if (cmd_len != sizeof(*req)) + return -EPROTO; + + scid = __le16_to_cpu(req->scid); + mtu = __le16_to_cpu(req->mtu); + mps = __le16_to_cpu(req->mps); + psm = req->psm; + dcid = 0; + credits = 0; + + if (mtu < 23 || mps < 23) + return -EPROTO; + + BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm), + scid, mtu, mps); + + /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A + * page 1059: + * + * Valid range: 0x0001-0x00ff + * + * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges + */ + if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) { + result = L2CAP_CR_LE_BAD_PSM; + chan = NULL; + goto response; + } + + /* Check if we have socket listening on psm */ + pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src, + &conn->hcon->dst, LE_LINK); + if (!pchan) { + result = L2CAP_CR_LE_BAD_PSM; + chan = NULL; + goto response; + } + + mutex_lock(&conn->chan_lock); + l2cap_chan_lock(pchan); + + if (!smp_sufficient_security(conn->hcon, pchan->sec_level, + SMP_ALLOW_STK)) { + result = L2CAP_CR_LE_AUTHENTICATION; + chan = NULL; + goto response_unlock; + } + + /* Check for valid dynamic CID range */ + if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) { + result = L2CAP_CR_LE_INVALID_SCID; + chan = NULL; + goto response_unlock; + } + + /* Check if we already have channel with that dcid */ + if (__l2cap_get_chan_by_dcid(conn, scid)) { + result = L2CAP_CR_LE_SCID_IN_USE; + chan = NULL; + goto response_unlock; + } + + chan = pchan->ops->new_connection(pchan); + if (!chan) { + result = L2CAP_CR_LE_NO_MEM; + goto response_unlock; + } + + bacpy(&chan->src, &conn->hcon->src); + bacpy(&chan->dst, &conn->hcon->dst); + chan->src_type = bdaddr_src_type(conn->hcon); + chan->dst_type = bdaddr_dst_type(conn->hcon); + chan->psm = psm; + chan->dcid = scid; + chan->omtu = mtu; + chan->remote_mps = mps; + + __l2cap_chan_add(conn, chan); + + l2cap_le_flowctl_init(chan, __le16_to_cpu(req->credits)); + + dcid = chan->scid; + credits = chan->rx_credits; + + __set_chan_timer(chan, chan->ops->get_sndtimeo(chan)); + + chan->ident = cmd->ident; + + if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) { + l2cap_state_change(chan, BT_CONNECT2); + /* The following result value is actually not defined + * for LE CoC but we use it to let the function know + * that it should bail out after doing its cleanup + * instead of sending a response. + */ + result = L2CAP_CR_PEND; + chan->ops->defer(chan); + } else { + l2cap_chan_ready(chan); + result = L2CAP_CR_LE_SUCCESS; + } + +response_unlock: + l2cap_chan_unlock(pchan); + mutex_unlock(&conn->chan_lock); + l2cap_chan_put(pchan); + + if (result == L2CAP_CR_PEND) + return 0; + +response: + if (chan) { + rsp.mtu = cpu_to_le16(chan->imtu); + rsp.mps = cpu_to_le16(chan->mps); + } else { + rsp.mtu = 0; + rsp.mps = 0; + } + + rsp.dcid = cpu_to_le16(dcid); + rsp.credits = cpu_to_le16(credits); + rsp.result = cpu_to_le16(result); + + l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp); + + return 0; +} + +static inline int l2cap_le_credits(struct l2cap_conn *conn, + struct l2cap_cmd_hdr *cmd, u16 cmd_len, + u8 *data) +{ + struct l2cap_le_credits *pkt; + struct l2cap_chan *chan; + u16 cid, credits, max_credits; + + if (cmd_len != sizeof(*pkt)) + return -EPROTO; + + pkt = (struct l2cap_le_credits *) data; + cid = __le16_to_cpu(pkt->cid); + credits = __le16_to_cpu(pkt->credits); + + BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits); + + chan = l2cap_get_chan_by_dcid(conn, cid); + if (!chan) + return -EBADSLT; + + max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits; + if (credits > max_credits) { + BT_ERR("LE credits overflow"); + l2cap_send_disconn_req(chan, ECONNRESET); + + /* Return 0 so that we don't trigger an unnecessary + * command reject packet. + */ + goto unlock; + } + + chan->tx_credits += credits; + + /* Resume sending */ + l2cap_le_flowctl_send(chan); + + if (chan->tx_credits) + chan->ops->resume(chan); + +unlock: + l2cap_chan_unlock(chan); + l2cap_chan_put(chan); + + return 0; +} + +static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn, + struct l2cap_cmd_hdr *cmd, u16 cmd_len, + u8 *data) +{ + struct l2cap_ecred_conn_req *req = (void *) data; + struct { + struct l2cap_ecred_conn_rsp rsp; + __le16 dcid[L2CAP_ECRED_MAX_CID]; + } __packed pdu; + struct l2cap_chan *chan, *pchan; + u16 mtu, mps; + __le16 psm; + u8 result, len = 0; + int i, num_scid; + bool defer = false; + + if (!enable_ecred) + return -EINVAL; + + if (cmd_len < sizeof(*req) || (cmd_len - sizeof(*req)) % sizeof(u16)) { + result = L2CAP_CR_LE_INVALID_PARAMS; + goto response; + } + + cmd_len -= sizeof(*req); + num_scid = cmd_len / sizeof(u16); + + if (num_scid > ARRAY_SIZE(pdu.dcid)) { + result = L2CAP_CR_LE_INVALID_PARAMS; + goto response; + } + + mtu = __le16_to_cpu(req->mtu); + mps = __le16_to_cpu(req->mps); + + if (mtu < L2CAP_ECRED_MIN_MTU || mps < L2CAP_ECRED_MIN_MPS) { + result = L2CAP_CR_LE_UNACCEPT_PARAMS; + goto response; + } + + psm = req->psm; + + /* BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 3, Part A + * page 1059: + * + * Valid range: 0x0001-0x00ff + * + * Table 4.15: L2CAP_LE_CREDIT_BASED_CONNECTION_REQ SPSM ranges + */ + if (!psm || __le16_to_cpu(psm) > L2CAP_PSM_LE_DYN_END) { + result = L2CAP_CR_LE_BAD_PSM; + goto response; + } + + BT_DBG("psm 0x%2.2x mtu %u mps %u", __le16_to_cpu(psm), mtu, mps); + + memset(&pdu, 0, sizeof(pdu)); + + /* Check if we have socket listening on psm */ + pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src, + &conn->hcon->dst, LE_LINK); + if (!pchan) { + result = L2CAP_CR_LE_BAD_PSM; + goto response; + } + + mutex_lock(&conn->chan_lock); + l2cap_chan_lock(pchan); + + if (!smp_sufficient_security(conn->hcon, pchan->sec_level, + SMP_ALLOW_STK)) { + result = L2CAP_CR_LE_AUTHENTICATION; + goto unlock; + } + + result = L2CAP_CR_LE_SUCCESS; + + for (i = 0; i < num_scid; i++) { + u16 scid = __le16_to_cpu(req->scid[i]); + + BT_DBG("scid[%d] 0x%4.4x", i, scid); + + pdu.dcid[i] = 0x0000; + len += sizeof(*pdu.dcid); + + /* Check for valid dynamic CID range */ + if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) { + result = L2CAP_CR_LE_INVALID_SCID; + continue; + } + + /* Check if we already have channel with that dcid */ + if (__l2cap_get_chan_by_dcid(conn, scid)) { + result = L2CAP_CR_LE_SCID_IN_USE; + continue; + } + + chan = pchan->ops->new_connection(pchan); + if (!chan) { + result = L2CAP_CR_LE_NO_MEM; + continue; + } + + bacpy(&chan->src, &conn->hcon->src); + bacpy(&chan->dst, &conn->hcon->dst); + chan->src_type = bdaddr_src_type(conn->hcon); + chan->dst_type = bdaddr_dst_type(conn->hcon); + chan->psm = psm; + chan->dcid = scid; + chan->omtu = mtu; + chan->remote_mps = mps; + + __l2cap_chan_add(conn, chan); + + l2cap_ecred_init(chan, __le16_to_cpu(req->credits)); + + /* Init response */ + if (!pdu.rsp.credits) { + pdu.rsp.mtu = cpu_to_le16(chan->imtu); + pdu.rsp.mps = cpu_to_le16(chan->mps); + pdu.rsp.credits = cpu_to_le16(chan->rx_credits); + } + + pdu.dcid[i] = cpu_to_le16(chan->scid); + + __set_chan_timer(chan, chan->ops->get_sndtimeo(chan)); + + chan->ident = cmd->ident; + chan->mode = L2CAP_MODE_EXT_FLOWCTL; + + if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) { + l2cap_state_change(chan, BT_CONNECT2); + defer = true; + chan->ops->defer(chan); + } else { + l2cap_chan_ready(chan); + } + } + +unlock: + l2cap_chan_unlock(pchan); + mutex_unlock(&conn->chan_lock); + l2cap_chan_put(pchan); + +response: + pdu.rsp.result = cpu_to_le16(result); + + if (defer) + return 0; + + l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_CONN_RSP, + sizeof(pdu.rsp) + len, &pdu); + + return 0; +} + +static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn, + struct l2cap_cmd_hdr *cmd, u16 cmd_len, + u8 *data) +{ + struct l2cap_ecred_conn_rsp *rsp = (void *) data; + struct hci_conn *hcon = conn->hcon; + u16 mtu, mps, credits, result; + struct l2cap_chan *chan, *tmp; + int err = 0, sec_level; + int i = 0; + + if (cmd_len < sizeof(*rsp)) + return -EPROTO; + + mtu = __le16_to_cpu(rsp->mtu); + mps = __le16_to_cpu(rsp->mps); + credits = __le16_to_cpu(rsp->credits); + result = __le16_to_cpu(rsp->result); + + BT_DBG("mtu %u mps %u credits %u result 0x%4.4x", mtu, mps, credits, + result); + + mutex_lock(&conn->chan_lock); + + cmd_len -= sizeof(*rsp); + + list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) { + u16 dcid; + + if (chan->ident != cmd->ident || + chan->mode != L2CAP_MODE_EXT_FLOWCTL || + chan->state == BT_CONNECTED) + continue; + + l2cap_chan_lock(chan); + + /* Check that there is a dcid for each pending channel */ + if (cmd_len < sizeof(dcid)) { + l2cap_chan_del(chan, ECONNREFUSED); + l2cap_chan_unlock(chan); + continue; + } + + dcid = __le16_to_cpu(rsp->dcid[i++]); + cmd_len -= sizeof(u16); + + BT_DBG("dcid[%d] 0x%4.4x", i, dcid); + + /* Check if dcid is already in use */ + if (dcid && __l2cap_get_chan_by_dcid(conn, dcid)) { + /* If a device receives a + * L2CAP_CREDIT_BASED_CONNECTION_RSP packet with an + * already-assigned Destination CID, then both the + * original channel and the new channel shall be + * immediately discarded and not used. + */ + l2cap_chan_del(chan, ECONNREFUSED); + l2cap_chan_unlock(chan); + chan = __l2cap_get_chan_by_dcid(conn, dcid); + l2cap_chan_lock(chan); + l2cap_chan_del(chan, ECONNRESET); + l2cap_chan_unlock(chan); + continue; + } + + switch (result) { + case L2CAP_CR_LE_AUTHENTICATION: + case L2CAP_CR_LE_ENCRYPTION: + /* If we already have MITM protection we can't do + * anything. + */ + if (hcon->sec_level > BT_SECURITY_MEDIUM) { + l2cap_chan_del(chan, ECONNREFUSED); + break; + } + + sec_level = hcon->sec_level + 1; + if (chan->sec_level < sec_level) + chan->sec_level = sec_level; + + /* We'll need to send a new Connect Request */ + clear_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags); + + smp_conn_security(hcon, chan->sec_level); + break; + + case L2CAP_CR_LE_BAD_PSM: + l2cap_chan_del(chan, ECONNREFUSED); + break; + + default: + /* If dcid was not set it means channels was refused */ + if (!dcid) { + l2cap_chan_del(chan, ECONNREFUSED); + break; + } + + chan->ident = 0; + chan->dcid = dcid; + chan->omtu = mtu; + chan->remote_mps = mps; + chan->tx_credits = credits; + l2cap_chan_ready(chan); + break; + } + + l2cap_chan_unlock(chan); + } + + mutex_unlock(&conn->chan_lock); + + return err; +} + +static inline int l2cap_ecred_reconf_req(struct l2cap_conn *conn, + struct l2cap_cmd_hdr *cmd, u16 cmd_len, + u8 *data) +{ + struct l2cap_ecred_reconf_req *req = (void *) data; + struct l2cap_ecred_reconf_rsp rsp; + u16 mtu, mps, result; + struct l2cap_chan *chan; + int i, num_scid; + + if (!enable_ecred) + return -EINVAL; + + if (cmd_len < sizeof(*req) || cmd_len - sizeof(*req) % sizeof(u16)) { + result = L2CAP_CR_LE_INVALID_PARAMS; + goto respond; + } + + mtu = __le16_to_cpu(req->mtu); + mps = __le16_to_cpu(req->mps); + + BT_DBG("mtu %u mps %u", mtu, mps); + + if (mtu < L2CAP_ECRED_MIN_MTU) { + result = L2CAP_RECONF_INVALID_MTU; + goto respond; + } + + if (mps < L2CAP_ECRED_MIN_MPS) { + result = L2CAP_RECONF_INVALID_MPS; + goto respond; + } + + cmd_len -= sizeof(*req); + num_scid = cmd_len / sizeof(u16); + result = L2CAP_RECONF_SUCCESS; + + for (i = 0; i < num_scid; i++) { + u16 scid; + + scid = __le16_to_cpu(req->scid[i]); + if (!scid) + return -EPROTO; + + chan = __l2cap_get_chan_by_dcid(conn, scid); + if (!chan) + continue; + + /* If the MTU value is decreased for any of the included + * channels, then the receiver shall disconnect all + * included channels. + */ + if (chan->omtu > mtu) { + BT_ERR("chan %p decreased MTU %u -> %u", chan, + chan->omtu, mtu); + result = L2CAP_RECONF_INVALID_MTU; + } + + chan->omtu = mtu; + chan->remote_mps = mps; + } + +respond: + rsp.result = cpu_to_le16(result); + + l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_RECONF_RSP, sizeof(rsp), + &rsp); + + return 0; +} + +static inline int l2cap_ecred_reconf_rsp(struct l2cap_conn *conn, + struct l2cap_cmd_hdr *cmd, u16 cmd_len, + u8 *data) +{ + struct l2cap_chan *chan, *tmp; + struct l2cap_ecred_conn_rsp *rsp = (void *) data; + u16 result; + + if (cmd_len < sizeof(*rsp)) + return -EPROTO; + + result = __le16_to_cpu(rsp->result); + + BT_DBG("result 0x%4.4x", rsp->result); + + if (!result) + return 0; + + list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) { + if (chan->ident != cmd->ident) + continue; + + l2cap_chan_del(chan, ECONNRESET); + } + + return 0; +} + +static inline int l2cap_le_command_rej(struct l2cap_conn *conn, + struct l2cap_cmd_hdr *cmd, u16 cmd_len, + u8 *data) +{ + struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data; + struct l2cap_chan *chan; + + if (cmd_len < sizeof(*rej)) + return -EPROTO; + + mutex_lock(&conn->chan_lock); + + chan = __l2cap_get_chan_by_ident(conn, cmd->ident); + if (!chan) + goto done; + + chan = l2cap_chan_hold_unless_zero(chan); + if (!chan) + goto done; + + l2cap_chan_lock(chan); + l2cap_chan_del(chan, ECONNREFUSED); + l2cap_chan_unlock(chan); + l2cap_chan_put(chan); + +done: + mutex_unlock(&conn->chan_lock); + return 0; +} + +static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn, + struct l2cap_cmd_hdr *cmd, u16 cmd_len, + u8 *data) +{ + int err = 0; + + switch (cmd->code) { + case L2CAP_COMMAND_REJ: + l2cap_le_command_rej(conn, cmd, cmd_len, data); + break; + + case L2CAP_CONN_PARAM_UPDATE_REQ: + err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data); + break; + + case L2CAP_CONN_PARAM_UPDATE_RSP: + break; + + case L2CAP_LE_CONN_RSP: + l2cap_le_connect_rsp(conn, cmd, cmd_len, data); + break; + + case L2CAP_LE_CONN_REQ: + err = l2cap_le_connect_req(conn, cmd, cmd_len, data); + break; + + case L2CAP_LE_CREDITS: + err = l2cap_le_credits(conn, cmd, cmd_len, data); + break; + + case L2CAP_ECRED_CONN_REQ: + err = l2cap_ecred_conn_req(conn, cmd, cmd_len, data); + break; + + case L2CAP_ECRED_CONN_RSP: + err = l2cap_ecred_conn_rsp(conn, cmd, cmd_len, data); + break; + + case L2CAP_ECRED_RECONF_REQ: + err = l2cap_ecred_reconf_req(conn, cmd, cmd_len, data); + break; + + case L2CAP_ECRED_RECONF_RSP: + err = l2cap_ecred_reconf_rsp(conn, cmd, cmd_len, data); + break; + + case L2CAP_DISCONN_REQ: + err = l2cap_disconnect_req(conn, cmd, cmd_len, data); + break; + + case L2CAP_DISCONN_RSP: + l2cap_disconnect_rsp(conn, cmd, cmd_len, data); + break; + + default: + BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code); + err = -EINVAL; + break; + } + + return err; +} + +static inline void l2cap_le_sig_channel(struct l2cap_conn *conn, + struct sk_buff *skb) +{ + struct hci_conn *hcon = conn->hcon; + struct l2cap_cmd_hdr *cmd; + u16 len; + int err; + + if (hcon->type != LE_LINK) + goto drop; + + if (skb->len < L2CAP_CMD_HDR_SIZE) + goto drop; + + cmd = (void *) skb->data; + skb_pull(skb, L2CAP_CMD_HDR_SIZE); + + len = le16_to_cpu(cmd->len); + + BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident); + + if (len != skb->len || !cmd->ident) { + BT_DBG("corrupted command"); + goto drop; + } + + err = l2cap_le_sig_cmd(conn, cmd, len, skb->data); + if (err) { + struct l2cap_cmd_rej_unk rej; + + BT_ERR("Wrong link type (%d)", err); + + rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD); + l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ, + sizeof(rej), &rej); + } + +drop: + kfree_skb(skb); +} + +static inline void l2cap_sig_send_rej(struct l2cap_conn *conn, u16 ident) +{ + struct l2cap_cmd_rej_unk rej; + + rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD); + l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej); +} + +static inline void l2cap_sig_channel(struct l2cap_conn *conn, + struct sk_buff *skb) +{ + struct hci_conn *hcon = conn->hcon; + struct l2cap_cmd_hdr *cmd; + int err; + + l2cap_raw_recv(conn, skb); + + if (hcon->type != ACL_LINK) + goto drop; + + while (skb->len >= L2CAP_CMD_HDR_SIZE) { + u16 len; + + cmd = (void *) skb->data; + skb_pull(skb, L2CAP_CMD_HDR_SIZE); + + len = le16_to_cpu(cmd->len); + + BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, + cmd->ident); + + if (len > skb->len || !cmd->ident) { + BT_DBG("corrupted command"); + l2cap_sig_send_rej(conn, cmd->ident); + break; + } + + err = l2cap_bredr_sig_cmd(conn, cmd, len, skb->data); + if (err) { + BT_ERR("Wrong link type (%d)", err); + l2cap_sig_send_rej(conn, cmd->ident); + } + + skb_pull(skb, len); + } + + if (skb->len > 0) { + BT_DBG("corrupted command"); + l2cap_sig_send_rej(conn, 0); + } + +drop: + kfree_skb(skb); +} + +static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb) +{ + u16 our_fcs, rcv_fcs; + int hdr_size; + + if (test_bit(FLAG_EXT_CTRL, &chan->flags)) + hdr_size = L2CAP_EXT_HDR_SIZE; + else + hdr_size = L2CAP_ENH_HDR_SIZE; + + if (chan->fcs == L2CAP_FCS_CRC16) { + skb_trim(skb, skb->len - L2CAP_FCS_SIZE); + rcv_fcs = get_unaligned_le16(skb->data + skb->len); + our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size); + + if (our_fcs != rcv_fcs) + return -EBADMSG; + } + return 0; +} + +static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan) +{ + struct l2cap_ctrl control; + + BT_DBG("chan %p", chan); + + memset(&control, 0, sizeof(control)); + control.sframe = 1; + control.final = 1; + control.reqseq = chan->buffer_seq; + set_bit(CONN_SEND_FBIT, &chan->conn_state); + + if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { + control.super = L2CAP_SUPER_RNR; + l2cap_send_sframe(chan, &control); + } + + if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) && + chan->unacked_frames > 0) + __set_retrans_timer(chan); + + /* Send pending iframes */ + l2cap_ertm_send(chan); + + if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) && + test_bit(CONN_SEND_FBIT, &chan->conn_state)) { + /* F-bit wasn't sent in an s-frame or i-frame yet, so + * send it now. + */ + control.super = L2CAP_SUPER_RR; + l2cap_send_sframe(chan, &control); + } +} + +static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag, + struct sk_buff **last_frag) +{ + /* skb->len reflects data in skb as well as all fragments + * skb->data_len reflects only data in fragments + */ + if (!skb_has_frag_list(skb)) + skb_shinfo(skb)->frag_list = new_frag; + + new_frag->next = NULL; + + (*last_frag)->next = new_frag; + *last_frag = new_frag; + + skb->len += new_frag->len; + skb->data_len += new_frag->len; + skb->truesize += new_frag->truesize; +} + +static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, + struct l2cap_ctrl *control) +{ + int err = -EINVAL; + + switch (control->sar) { + case L2CAP_SAR_UNSEGMENTED: + if (chan->sdu) + break; + + err = chan->ops->recv(chan, skb); + break; + + case L2CAP_SAR_START: + if (chan->sdu) + break; + + if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE)) + break; + + chan->sdu_len = get_unaligned_le16(skb->data); + skb_pull(skb, L2CAP_SDULEN_SIZE); + + if (chan->sdu_len > chan->imtu) { + err = -EMSGSIZE; + break; + } + + if (skb->len >= chan->sdu_len) + break; + + chan->sdu = skb; + chan->sdu_last_frag = skb; + + skb = NULL; + err = 0; + break; + + case L2CAP_SAR_CONTINUE: + if (!chan->sdu) + break; + + append_skb_frag(chan->sdu, skb, + &chan->sdu_last_frag); + skb = NULL; + + if (chan->sdu->len >= chan->sdu_len) + break; + + err = 0; + break; + + case L2CAP_SAR_END: + if (!chan->sdu) + break; + + append_skb_frag(chan->sdu, skb, + &chan->sdu_last_frag); + skb = NULL; + + if (chan->sdu->len != chan->sdu_len) + break; + + err = chan->ops->recv(chan, chan->sdu); + + if (!err) { + /* Reassembly complete */ + chan->sdu = NULL; + chan->sdu_last_frag = NULL; + chan->sdu_len = 0; + } + break; + } + + if (err) { + kfree_skb(skb); + kfree_skb(chan->sdu); + chan->sdu = NULL; + chan->sdu_last_frag = NULL; + chan->sdu_len = 0; + } + + return err; +} + +static int l2cap_resegment(struct l2cap_chan *chan) +{ + /* Placeholder */ + return 0; +} + +void l2cap_chan_busy(struct l2cap_chan *chan, int busy) +{ + u8 event; + + if (chan->mode != L2CAP_MODE_ERTM) + return; + + event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR; + l2cap_tx(chan, NULL, NULL, event); +} + +static int l2cap_rx_queued_iframes(struct l2cap_chan *chan) +{ + int err = 0; + /* Pass sequential frames to l2cap_reassemble_sdu() + * until a gap is encountered. + */ + + BT_DBG("chan %p", chan); + + while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { + struct sk_buff *skb; + BT_DBG("Searching for skb with txseq %d (queue len %d)", + chan->buffer_seq, skb_queue_len(&chan->srej_q)); + + skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq); + + if (!skb) + break; + + skb_unlink(skb, &chan->srej_q); + chan->buffer_seq = __next_seq(chan, chan->buffer_seq); + err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap); + if (err) + break; + } + + if (skb_queue_empty(&chan->srej_q)) { + chan->rx_state = L2CAP_RX_STATE_RECV; + l2cap_send_ack(chan); + } + + return err; +} + +static void l2cap_handle_srej(struct l2cap_chan *chan, + struct l2cap_ctrl *control) +{ + struct sk_buff *skb; + + BT_DBG("chan %p, control %p", chan, control); + + if (control->reqseq == chan->next_tx_seq) { + BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq); + l2cap_send_disconn_req(chan, ECONNRESET); + return; + } + + skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq); + + if (skb == NULL) { + BT_DBG("Seq %d not available for retransmission", + control->reqseq); + return; + } + + if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) { + BT_DBG("Retry limit exceeded (%d)", chan->max_tx); + l2cap_send_disconn_req(chan, ECONNRESET); + return; + } + + clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); + + if (control->poll) { + l2cap_pass_to_tx(chan, control); + + set_bit(CONN_SEND_FBIT, &chan->conn_state); + l2cap_retransmit(chan, control); + l2cap_ertm_send(chan); + + if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) { + set_bit(CONN_SREJ_ACT, &chan->conn_state); + chan->srej_save_reqseq = control->reqseq; + } + } else { + l2cap_pass_to_tx_fbit(chan, control); + + if (control->final) { + if (chan->srej_save_reqseq != control->reqseq || + !test_and_clear_bit(CONN_SREJ_ACT, + &chan->conn_state)) + l2cap_retransmit(chan, control); + } else { + l2cap_retransmit(chan, control); + if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) { + set_bit(CONN_SREJ_ACT, &chan->conn_state); + chan->srej_save_reqseq = control->reqseq; + } + } + } +} + +static void l2cap_handle_rej(struct l2cap_chan *chan, + struct l2cap_ctrl *control) +{ + struct sk_buff *skb; + + BT_DBG("chan %p, control %p", chan, control); + + if (control->reqseq == chan->next_tx_seq) { + BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq); + l2cap_send_disconn_req(chan, ECONNRESET); + return; + } + + skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq); + + if (chan->max_tx && skb && + bt_cb(skb)->l2cap.retries >= chan->max_tx) { + BT_DBG("Retry limit exceeded (%d)", chan->max_tx); + l2cap_send_disconn_req(chan, ECONNRESET); + return; + } + + clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); + + l2cap_pass_to_tx(chan, control); + + if (control->final) { + if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state)) + l2cap_retransmit_all(chan, control); + } else { + l2cap_retransmit_all(chan, control); + l2cap_ertm_send(chan); + if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) + set_bit(CONN_REJ_ACT, &chan->conn_state); + } +} + +static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq) +{ + BT_DBG("chan %p, txseq %d", chan, txseq); + + BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq, + chan->expected_tx_seq); + + if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) { + if (__seq_offset(chan, txseq, chan->last_acked_seq) >= + chan->tx_win) { + /* See notes below regarding "double poll" and + * invalid packets. + */ + if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) { + BT_DBG("Invalid/Ignore - after SREJ"); + return L2CAP_TXSEQ_INVALID_IGNORE; + } else { + BT_DBG("Invalid - in window after SREJ sent"); + return L2CAP_TXSEQ_INVALID; + } + } + + if (chan->srej_list.head == txseq) { + BT_DBG("Expected SREJ"); + return L2CAP_TXSEQ_EXPECTED_SREJ; + } + + if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) { + BT_DBG("Duplicate SREJ - txseq already stored"); + return L2CAP_TXSEQ_DUPLICATE_SREJ; + } + + if (l2cap_seq_list_contains(&chan->srej_list, txseq)) { + BT_DBG("Unexpected SREJ - not requested"); + return L2CAP_TXSEQ_UNEXPECTED_SREJ; + } + } + + if (chan->expected_tx_seq == txseq) { + if (__seq_offset(chan, txseq, chan->last_acked_seq) >= + chan->tx_win) { + BT_DBG("Invalid - txseq outside tx window"); + return L2CAP_TXSEQ_INVALID; + } else { + BT_DBG("Expected"); + return L2CAP_TXSEQ_EXPECTED; + } + } + + if (__seq_offset(chan, txseq, chan->last_acked_seq) < + __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) { + BT_DBG("Duplicate - expected_tx_seq later than txseq"); + return L2CAP_TXSEQ_DUPLICATE; + } + + if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) { + /* A source of invalid packets is a "double poll" condition, + * where delays cause us to send multiple poll packets. If + * the remote stack receives and processes both polls, + * sequence numbers can wrap around in such a way that a + * resent frame has a sequence number that looks like new data + * with a sequence gap. This would trigger an erroneous SREJ + * request. + * + * Fortunately, this is impossible with a tx window that's + * less than half of the maximum sequence number, which allows + * invalid frames to be safely ignored. + * + * With tx window sizes greater than half of the tx window + * maximum, the frame is invalid and cannot be ignored. This + * causes a disconnect. + */ + + if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) { + BT_DBG("Invalid/Ignore - txseq outside tx window"); + return L2CAP_TXSEQ_INVALID_IGNORE; + } else { + BT_DBG("Invalid - txseq outside tx window"); + return L2CAP_TXSEQ_INVALID; + } + } else { + BT_DBG("Unexpected - txseq indicates missing frames"); + return L2CAP_TXSEQ_UNEXPECTED; + } +} + +static int l2cap_rx_state_recv(struct l2cap_chan *chan, + struct l2cap_ctrl *control, + struct sk_buff *skb, u8 event) +{ + struct l2cap_ctrl local_control; + int err = 0; + bool skb_in_use = false; + + BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb, + event); + + switch (event) { + case L2CAP_EV_RECV_IFRAME: + switch (l2cap_classify_txseq(chan, control->txseq)) { + case L2CAP_TXSEQ_EXPECTED: + l2cap_pass_to_tx(chan, control); + + if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { + BT_DBG("Busy, discarding expected seq %d", + control->txseq); + break; + } + + chan->expected_tx_seq = __next_seq(chan, + control->txseq); + + chan->buffer_seq = chan->expected_tx_seq; + skb_in_use = true; + + /* l2cap_reassemble_sdu may free skb, hence invalidate + * control, so make a copy in advance to use it after + * l2cap_reassemble_sdu returns and to avoid the race + * condition, for example: + * + * The current thread calls: + * l2cap_reassemble_sdu + * chan->ops->recv == l2cap_sock_recv_cb + * __sock_queue_rcv_skb + * Another thread calls: + * bt_sock_recvmsg + * skb_recv_datagram + * skb_free_datagram + * Then the current thread tries to access control, but + * it was freed by skb_free_datagram. + */ + local_control = *control; + err = l2cap_reassemble_sdu(chan, skb, control); + if (err) + break; + + if (local_control.final) { + if (!test_and_clear_bit(CONN_REJ_ACT, + &chan->conn_state)) { + local_control.final = 0; + l2cap_retransmit_all(chan, &local_control); + l2cap_ertm_send(chan); + } + } + + if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) + l2cap_send_ack(chan); + break; + case L2CAP_TXSEQ_UNEXPECTED: + l2cap_pass_to_tx(chan, control); + + /* Can't issue SREJ frames in the local busy state. + * Drop this frame, it will be seen as missing + * when local busy is exited. + */ + if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) { + BT_DBG("Busy, discarding unexpected seq %d", + control->txseq); + break; + } + + /* There was a gap in the sequence, so an SREJ + * must be sent for each missing frame. The + * current frame is stored for later use. + */ + skb_queue_tail(&chan->srej_q, skb); + skb_in_use = true; + BT_DBG("Queued %p (queue len %d)", skb, + skb_queue_len(&chan->srej_q)); + + clear_bit(CONN_SREJ_ACT, &chan->conn_state); + l2cap_seq_list_clear(&chan->srej_list); + l2cap_send_srej(chan, control->txseq); + + chan->rx_state = L2CAP_RX_STATE_SREJ_SENT; + break; + case L2CAP_TXSEQ_DUPLICATE: + l2cap_pass_to_tx(chan, control); + break; + case L2CAP_TXSEQ_INVALID_IGNORE: + break; + case L2CAP_TXSEQ_INVALID: + default: + l2cap_send_disconn_req(chan, ECONNRESET); + break; + } + break; + case L2CAP_EV_RECV_RR: + l2cap_pass_to_tx(chan, control); + if (control->final) { + clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); + + if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) && + !__chan_is_moving(chan)) { + control->final = 0; + l2cap_retransmit_all(chan, control); + } + + l2cap_ertm_send(chan); + } else if (control->poll) { + l2cap_send_i_or_rr_or_rnr(chan); + } else { + if (test_and_clear_bit(CONN_REMOTE_BUSY, + &chan->conn_state) && + chan->unacked_frames) + __set_retrans_timer(chan); + + l2cap_ertm_send(chan); + } + break; + case L2CAP_EV_RECV_RNR: + set_bit(CONN_REMOTE_BUSY, &chan->conn_state); + l2cap_pass_to_tx(chan, control); + if (control && control->poll) { + set_bit(CONN_SEND_FBIT, &chan->conn_state); + l2cap_send_rr_or_rnr(chan, 0); + } + __clear_retrans_timer(chan); + l2cap_seq_list_clear(&chan->retrans_list); + break; + case L2CAP_EV_RECV_REJ: + l2cap_handle_rej(chan, control); + break; + case L2CAP_EV_RECV_SREJ: + l2cap_handle_srej(chan, control); + break; + default: + break; + } + + if (skb && !skb_in_use) { + BT_DBG("Freeing %p", skb); + kfree_skb(skb); + } + + return err; +} + +static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan, + struct l2cap_ctrl *control, + struct sk_buff *skb, u8 event) +{ + int err = 0; + u16 txseq = control->txseq; + bool skb_in_use = false; + + BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb, + event); + + switch (event) { + case L2CAP_EV_RECV_IFRAME: + switch (l2cap_classify_txseq(chan, txseq)) { + case L2CAP_TXSEQ_EXPECTED: + /* Keep frame for reassembly later */ + l2cap_pass_to_tx(chan, control); + skb_queue_tail(&chan->srej_q, skb); + skb_in_use = true; + BT_DBG("Queued %p (queue len %d)", skb, + skb_queue_len(&chan->srej_q)); + + chan->expected_tx_seq = __next_seq(chan, txseq); + break; + case L2CAP_TXSEQ_EXPECTED_SREJ: + l2cap_seq_list_pop(&chan->srej_list); + + l2cap_pass_to_tx(chan, control); + skb_queue_tail(&chan->srej_q, skb); + skb_in_use = true; + BT_DBG("Queued %p (queue len %d)", skb, + skb_queue_len(&chan->srej_q)); + + err = l2cap_rx_queued_iframes(chan); + if (err) + break; + + break; + case L2CAP_TXSEQ_UNEXPECTED: + /* Got a frame that can't be reassembled yet. + * Save it for later, and send SREJs to cover + * the missing frames. + */ + skb_queue_tail(&chan->srej_q, skb); + skb_in_use = true; + BT_DBG("Queued %p (queue len %d)", skb, + skb_queue_len(&chan->srej_q)); + + l2cap_pass_to_tx(chan, control); + l2cap_send_srej(chan, control->txseq); + break; + case L2CAP_TXSEQ_UNEXPECTED_SREJ: + /* This frame was requested with an SREJ, but + * some expected retransmitted frames are + * missing. Request retransmission of missing + * SREJ'd frames. + */ + skb_queue_tail(&chan->srej_q, skb); + skb_in_use = true; + BT_DBG("Queued %p (queue len %d)", skb, + skb_queue_len(&chan->srej_q)); + + l2cap_pass_to_tx(chan, control); + l2cap_send_srej_list(chan, control->txseq); + break; + case L2CAP_TXSEQ_DUPLICATE_SREJ: + /* We've already queued this frame. Drop this copy. */ + l2cap_pass_to_tx(chan, control); + break; + case L2CAP_TXSEQ_DUPLICATE: + /* Expecting a later sequence number, so this frame + * was already received. Ignore it completely. + */ + break; + case L2CAP_TXSEQ_INVALID_IGNORE: + break; + case L2CAP_TXSEQ_INVALID: + default: + l2cap_send_disconn_req(chan, ECONNRESET); + break; + } + break; + case L2CAP_EV_RECV_RR: + l2cap_pass_to_tx(chan, control); + if (control->final) { + clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); + + if (!test_and_clear_bit(CONN_REJ_ACT, + &chan->conn_state)) { + control->final = 0; + l2cap_retransmit_all(chan, control); + } + + l2cap_ertm_send(chan); + } else if (control->poll) { + if (test_and_clear_bit(CONN_REMOTE_BUSY, + &chan->conn_state) && + chan->unacked_frames) { + __set_retrans_timer(chan); + } + + set_bit(CONN_SEND_FBIT, &chan->conn_state); + l2cap_send_srej_tail(chan); + } else { + if (test_and_clear_bit(CONN_REMOTE_BUSY, + &chan->conn_state) && + chan->unacked_frames) + __set_retrans_timer(chan); + + l2cap_send_ack(chan); + } + break; + case L2CAP_EV_RECV_RNR: + set_bit(CONN_REMOTE_BUSY, &chan->conn_state); + l2cap_pass_to_tx(chan, control); + if (control->poll) { + l2cap_send_srej_tail(chan); + } else { + struct l2cap_ctrl rr_control; + memset(&rr_control, 0, sizeof(rr_control)); + rr_control.sframe = 1; + rr_control.super = L2CAP_SUPER_RR; + rr_control.reqseq = chan->buffer_seq; + l2cap_send_sframe(chan, &rr_control); + } + + break; + case L2CAP_EV_RECV_REJ: + l2cap_handle_rej(chan, control); + break; + case L2CAP_EV_RECV_SREJ: + l2cap_handle_srej(chan, control); + break; + } + + if (skb && !skb_in_use) { + BT_DBG("Freeing %p", skb); + kfree_skb(skb); + } + + return err; +} + +static int l2cap_finish_move(struct l2cap_chan *chan) +{ + BT_DBG("chan %p", chan); + + chan->rx_state = L2CAP_RX_STATE_RECV; + + if (chan->hs_hcon) + chan->conn->mtu = chan->hs_hcon->hdev->block_mtu; + else + chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu; + + return l2cap_resegment(chan); +} + +static int l2cap_rx_state_wait_p(struct l2cap_chan *chan, + struct l2cap_ctrl *control, + struct sk_buff *skb, u8 event) +{ + int err; + + BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb, + event); + + if (!control->poll) + return -EPROTO; + + l2cap_process_reqseq(chan, control->reqseq); + + if (!skb_queue_empty(&chan->tx_q)) + chan->tx_send_head = skb_peek(&chan->tx_q); + else + chan->tx_send_head = NULL; + + /* Rewind next_tx_seq to the point expected + * by the receiver. + */ + chan->next_tx_seq = control->reqseq; + chan->unacked_frames = 0; + + err = l2cap_finish_move(chan); + if (err) + return err; + + set_bit(CONN_SEND_FBIT, &chan->conn_state); + l2cap_send_i_or_rr_or_rnr(chan); + + if (event == L2CAP_EV_RECV_IFRAME) + return -EPROTO; + + return l2cap_rx_state_recv(chan, control, NULL, event); +} + +static int l2cap_rx_state_wait_f(struct l2cap_chan *chan, + struct l2cap_ctrl *control, + struct sk_buff *skb, u8 event) +{ + int err; + + if (!control->final) + return -EPROTO; + + clear_bit(CONN_REMOTE_BUSY, &chan->conn_state); + + chan->rx_state = L2CAP_RX_STATE_RECV; + l2cap_process_reqseq(chan, control->reqseq); + + if (!skb_queue_empty(&chan->tx_q)) + chan->tx_send_head = skb_peek(&chan->tx_q); + else + chan->tx_send_head = NULL; + + /* Rewind next_tx_seq to the point expected + * by the receiver. + */ + chan->next_tx_seq = control->reqseq; + chan->unacked_frames = 0; + + if (chan->hs_hcon) + chan->conn->mtu = chan->hs_hcon->hdev->block_mtu; + else + chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu; + + err = l2cap_resegment(chan); + + if (!err) + err = l2cap_rx_state_recv(chan, control, skb, event); + + return err; +} + +static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq) +{ + /* Make sure reqseq is for a packet that has been sent but not acked */ + u16 unacked; + + unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq); + return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked; +} + +static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control, + struct sk_buff *skb, u8 event) +{ + int err = 0; + + BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan, + control, skb, event, chan->rx_state); + + if (__valid_reqseq(chan, control->reqseq)) { + switch (chan->rx_state) { + case L2CAP_RX_STATE_RECV: + err = l2cap_rx_state_recv(chan, control, skb, event); + break; + case L2CAP_RX_STATE_SREJ_SENT: + err = l2cap_rx_state_srej_sent(chan, control, skb, + event); + break; + case L2CAP_RX_STATE_WAIT_P: + err = l2cap_rx_state_wait_p(chan, control, skb, event); + break; + case L2CAP_RX_STATE_WAIT_F: + err = l2cap_rx_state_wait_f(chan, control, skb, event); + break; + default: + /* shut it down */ + break; + } + } else { + BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d", + control->reqseq, chan->next_tx_seq, + chan->expected_ack_seq); + l2cap_send_disconn_req(chan, ECONNRESET); + } + + return err; +} + +static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control, + struct sk_buff *skb) +{ + /* l2cap_reassemble_sdu may free skb, hence invalidate control, so store + * the txseq field in advance to use it after l2cap_reassemble_sdu + * returns and to avoid the race condition, for example: + * + * The current thread calls: + * l2cap_reassemble_sdu + * chan->ops->recv == l2cap_sock_recv_cb + * __sock_queue_rcv_skb + * Another thread calls: + * bt_sock_recvmsg + * skb_recv_datagram + * skb_free_datagram + * Then the current thread tries to access control, but it was freed by + * skb_free_datagram. + */ + u16 txseq = control->txseq; + + BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb, + chan->rx_state); + + if (l2cap_classify_txseq(chan, txseq) == L2CAP_TXSEQ_EXPECTED) { + l2cap_pass_to_tx(chan, control); + + BT_DBG("buffer_seq %u->%u", chan->buffer_seq, + __next_seq(chan, chan->buffer_seq)); + + chan->buffer_seq = __next_seq(chan, chan->buffer_seq); + + l2cap_reassemble_sdu(chan, skb, control); + } else { + if (chan->sdu) { + kfree_skb(chan->sdu); + chan->sdu = NULL; + } + chan->sdu_last_frag = NULL; + chan->sdu_len = 0; + + if (skb) { + BT_DBG("Freeing %p", skb); + kfree_skb(skb); + } + } + + chan->last_acked_seq = txseq; + chan->expected_tx_seq = __next_seq(chan, txseq); + + return 0; +} + +static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb) +{ + struct l2cap_ctrl *control = &bt_cb(skb)->l2cap; + u16 len; + u8 event; + + __unpack_control(chan, skb); + + len = skb->len; + + /* + * We can just drop the corrupted I-frame here. + * Receiver will miss it and start proper recovery + * procedures and ask for retransmission. + */ + if (l2cap_check_fcs(chan, skb)) + goto drop; + + if (!control->sframe && control->sar == L2CAP_SAR_START) + len -= L2CAP_SDULEN_SIZE; + + if (chan->fcs == L2CAP_FCS_CRC16) + len -= L2CAP_FCS_SIZE; + + if (len > chan->mps) { + l2cap_send_disconn_req(chan, ECONNRESET); + goto drop; + } + + if (chan->ops->filter) { + if (chan->ops->filter(chan, skb)) + goto drop; + } + + if (!control->sframe) { + int err; + + BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d", + control->sar, control->reqseq, control->final, + control->txseq); + + /* Validate F-bit - F=0 always valid, F=1 only + * valid in TX WAIT_F + */ + if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F) + goto drop; + + if (chan->mode != L2CAP_MODE_STREAMING) { + event = L2CAP_EV_RECV_IFRAME; + err = l2cap_rx(chan, control, skb, event); + } else { + err = l2cap_stream_rx(chan, control, skb); + } + + if (err) + l2cap_send_disconn_req(chan, ECONNRESET); + } else { + const u8 rx_func_to_event[4] = { + L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ, + L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ + }; + + /* Only I-frames are expected in streaming mode */ + if (chan->mode == L2CAP_MODE_STREAMING) + goto drop; + + BT_DBG("sframe reqseq %d, final %d, poll %d, super %d", + control->reqseq, control->final, control->poll, + control->super); + + if (len != 0) { + BT_ERR("Trailing bytes: %d in sframe", len); + l2cap_send_disconn_req(chan, ECONNRESET); + goto drop; + } + + /* Validate F and P bits */ + if (control->final && (control->poll || + chan->tx_state != L2CAP_TX_STATE_WAIT_F)) + goto drop; + + event = rx_func_to_event[control->super]; + if (l2cap_rx(chan, control, skb, event)) + l2cap_send_disconn_req(chan, ECONNRESET); + } + + return 0; + +drop: + kfree_skb(skb); + return 0; +} + +static void l2cap_chan_le_send_credits(struct l2cap_chan *chan) +{ + struct l2cap_conn *conn = chan->conn; + struct l2cap_le_credits pkt; + u16 return_credits; + + return_credits = (chan->imtu / chan->mps) + 1; + + if (chan->rx_credits >= return_credits) + return; + + return_credits -= chan->rx_credits; + + BT_DBG("chan %p returning %u credits to sender", chan, return_credits); + + chan->rx_credits += return_credits; + + pkt.cid = cpu_to_le16(chan->scid); + pkt.credits = cpu_to_le16(return_credits); + + chan->ident = l2cap_get_ident(conn); + + l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt); +} + +static int l2cap_ecred_recv(struct l2cap_chan *chan, struct sk_buff *skb) +{ + int err; + + BT_DBG("SDU reassemble complete: chan %p skb->len %u", chan, skb->len); + + /* Wait recv to confirm reception before updating the credits */ + err = chan->ops->recv(chan, skb); + + /* Update credits whenever an SDU is received */ + l2cap_chan_le_send_credits(chan); + + return err; +} + +static int l2cap_ecred_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb) +{ + int err; + + if (!chan->rx_credits) { + BT_ERR("No credits to receive LE L2CAP data"); + l2cap_send_disconn_req(chan, ECONNRESET); + return -ENOBUFS; + } + + if (chan->imtu < skb->len) { + BT_ERR("Too big LE L2CAP PDU"); + return -ENOBUFS; + } + + chan->rx_credits--; + BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits); + + /* Update if remote had run out of credits, this should only happens + * if the remote is not using the entire MPS. + */ + if (!chan->rx_credits) + l2cap_chan_le_send_credits(chan); + + err = 0; + + if (!chan->sdu) { + u16 sdu_len; + + sdu_len = get_unaligned_le16(skb->data); + skb_pull(skb, L2CAP_SDULEN_SIZE); + + BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u", + sdu_len, skb->len, chan->imtu); + + if (sdu_len > chan->imtu) { + BT_ERR("Too big LE L2CAP SDU length received"); + err = -EMSGSIZE; + goto failed; + } + + if (skb->len > sdu_len) { + BT_ERR("Too much LE L2CAP data received"); + err = -EINVAL; + goto failed; + } + + if (skb->len == sdu_len) + return l2cap_ecred_recv(chan, skb); + + chan->sdu = skb; + chan->sdu_len = sdu_len; + chan->sdu_last_frag = skb; + + /* Detect if remote is not able to use the selected MPS */ + if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) { + u16 mps_len = skb->len + L2CAP_SDULEN_SIZE; + + /* Adjust the number of credits */ + BT_DBG("chan->mps %u -> %u", chan->mps, mps_len); + chan->mps = mps_len; + l2cap_chan_le_send_credits(chan); + } + + return 0; + } + + BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u", + chan->sdu->len, skb->len, chan->sdu_len); + + if (chan->sdu->len + skb->len > chan->sdu_len) { + BT_ERR("Too much LE L2CAP data received"); + err = -EINVAL; + goto failed; + } + + append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag); + skb = NULL; + + if (chan->sdu->len == chan->sdu_len) { + err = l2cap_ecred_recv(chan, chan->sdu); + if (!err) { + chan->sdu = NULL; + chan->sdu_last_frag = NULL; + chan->sdu_len = 0; + } + } + +failed: + if (err) { + kfree_skb(skb); + kfree_skb(chan->sdu); + chan->sdu = NULL; + chan->sdu_last_frag = NULL; + chan->sdu_len = 0; + } + + /* We can't return an error here since we took care of the skb + * freeing internally. An error return would cause the caller to + * do a double-free of the skb. + */ + return 0; +} + +static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid, + struct sk_buff *skb) +{ + struct l2cap_chan *chan; + + chan = l2cap_get_chan_by_scid(conn, cid); + if (!chan) { + if (cid == L2CAP_CID_A2MP) { + chan = a2mp_channel_create(conn, skb); + if (!chan) { + kfree_skb(skb); + return; + } + + l2cap_chan_hold(chan); + l2cap_chan_lock(chan); + } else { + BT_DBG("unknown cid 0x%4.4x", cid); + /* Drop packet and return */ + kfree_skb(skb); + return; + } + } + + BT_DBG("chan %p, len %d", chan, skb->len); + + /* If we receive data on a fixed channel before the info req/rsp + * procedure is done simply assume that the channel is supported + * and mark it as ready. + */ + if (chan->chan_type == L2CAP_CHAN_FIXED) + l2cap_chan_ready(chan); + + if (chan->state != BT_CONNECTED) + goto drop; + + switch (chan->mode) { + case L2CAP_MODE_LE_FLOWCTL: + case L2CAP_MODE_EXT_FLOWCTL: + if (l2cap_ecred_data_rcv(chan, skb) < 0) + goto drop; + + goto done; + + case L2CAP_MODE_BASIC: + /* If socket recv buffers overflows we drop data here + * which is *bad* because L2CAP has to be reliable. + * But we don't have any other choice. L2CAP doesn't + * provide flow control mechanism. */ + + if (chan->imtu < skb->len) { + BT_ERR("Dropping L2CAP data: receive buffer overflow"); + goto drop; + } + + if (!chan->ops->recv(chan, skb)) + goto done; + break; + + case L2CAP_MODE_ERTM: + case L2CAP_MODE_STREAMING: + l2cap_data_rcv(chan, skb); + goto done; + + default: + BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode); + break; + } + +drop: + kfree_skb(skb); + +done: + l2cap_chan_unlock(chan); + l2cap_chan_put(chan); +} + +static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, + struct sk_buff *skb) +{ + struct hci_conn *hcon = conn->hcon; + struct l2cap_chan *chan; + + if (hcon->type != ACL_LINK) + goto free_skb; + + chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst, + ACL_LINK); + if (!chan) + goto free_skb; + + BT_DBG("chan %p, len %d", chan, skb->len); + + if (chan->state != BT_BOUND && chan->state != BT_CONNECTED) + goto drop; + + if (chan->imtu < skb->len) + goto drop; + + /* Store remote BD_ADDR and PSM for msg_name */ + bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst); + bt_cb(skb)->l2cap.psm = psm; + + if (!chan->ops->recv(chan, skb)) { + l2cap_chan_put(chan); + return; + } + +drop: + l2cap_chan_put(chan); +free_skb: + kfree_skb(skb); +} + +static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb) +{ + struct l2cap_hdr *lh = (void *) skb->data; + struct hci_conn *hcon = conn->hcon; + u16 cid, len; + __le16 psm; + + if (hcon->state != BT_CONNECTED) { + BT_DBG("queueing pending rx skb"); + skb_queue_tail(&conn->pending_rx, skb); + return; + } + + skb_pull(skb, L2CAP_HDR_SIZE); + cid = __le16_to_cpu(lh->cid); + len = __le16_to_cpu(lh->len); + + if (len != skb->len) { + kfree_skb(skb); + return; + } + + /* Since we can't actively block incoming LE connections we must + * at least ensure that we ignore incoming data from them. + */ + if (hcon->type == LE_LINK && + hci_bdaddr_list_lookup(&hcon->hdev->reject_list, &hcon->dst, + bdaddr_dst_type(hcon))) { + kfree_skb(skb); + return; + } + + BT_DBG("len %d, cid 0x%4.4x", len, cid); + + switch (cid) { + case L2CAP_CID_SIGNALING: + l2cap_sig_channel(conn, skb); + break; + + case L2CAP_CID_CONN_LESS: + psm = get_unaligned((__le16 *) skb->data); + skb_pull(skb, L2CAP_PSMLEN_SIZE); + l2cap_conless_channel(conn, psm, skb); + break; + + case L2CAP_CID_LE_SIGNALING: + l2cap_le_sig_channel(conn, skb); + break; + + default: + l2cap_data_channel(conn, cid, skb); + break; + } +} + +static void process_pending_rx(struct work_struct *work) +{ + struct l2cap_conn *conn = container_of(work, struct l2cap_conn, + pending_rx_work); + struct sk_buff *skb; + + BT_DBG(""); + + while ((skb = skb_dequeue(&conn->pending_rx))) + l2cap_recv_frame(conn, skb); +} + +static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon) +{ + struct l2cap_conn *conn = hcon->l2cap_data; + struct hci_chan *hchan; + + if (conn) + return conn; + + hchan = hci_chan_create(hcon); + if (!hchan) + return NULL; + + conn = kzalloc(sizeof(*conn), GFP_KERNEL); + if (!conn) { + hci_chan_del(hchan); + return NULL; + } + + kref_init(&conn->ref); + hcon->l2cap_data = conn; + conn->hcon = hci_conn_get(hcon); + conn->hchan = hchan; + + BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan); + + switch (hcon->type) { + case LE_LINK: + if (hcon->hdev->le_mtu) { + conn->mtu = hcon->hdev->le_mtu; + break; + } + fallthrough; + default: + conn->mtu = hcon->hdev->acl_mtu; + break; + } + + conn->feat_mask = 0; + + conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS; + + if (hcon->type == ACL_LINK && + hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED)) + conn->local_fixed_chan |= L2CAP_FC_A2MP; + + if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) && + (bredr_sc_enabled(hcon->hdev) || + hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP))) + conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR; + + mutex_init(&conn->ident_lock); + mutex_init(&conn->chan_lock); + + INIT_LIST_HEAD(&conn->chan_l); + INIT_LIST_HEAD(&conn->users); + + INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout); + + skb_queue_head_init(&conn->pending_rx); + INIT_WORK(&conn->pending_rx_work, process_pending_rx); + INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr); + + conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM; + + return conn; +} + +static bool is_valid_psm(u16 psm, u8 dst_type) +{ + if (!psm) + return false; + + if (bdaddr_type_is_le(dst_type)) + return (psm <= 0x00ff); + + /* PSM must be odd and lsb of upper byte must be 0 */ + return ((psm & 0x0101) == 0x0001); +} + +struct l2cap_chan_data { + struct l2cap_chan *chan; + struct pid *pid; + int count; +}; + +static void l2cap_chan_by_pid(struct l2cap_chan *chan, void *data) +{ + struct l2cap_chan_data *d = data; + struct pid *pid; + + if (chan == d->chan) + return; + + if (!test_bit(FLAG_DEFER_SETUP, &chan->flags)) + return; + + pid = chan->ops->get_peer_pid(chan); + + /* Only count deferred channels with the same PID/PSM */ + if (d->pid != pid || chan->psm != d->chan->psm || chan->ident || + chan->mode != L2CAP_MODE_EXT_FLOWCTL || chan->state != BT_CONNECT) + return; + + d->count++; +} + +int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, + bdaddr_t *dst, u8 dst_type) +{ + struct l2cap_conn *conn; + struct hci_conn *hcon; + struct hci_dev *hdev; + int err; + + BT_DBG("%pMR -> %pMR (type %u) psm 0x%4.4x mode 0x%2.2x", &chan->src, + dst, dst_type, __le16_to_cpu(psm), chan->mode); + + hdev = hci_get_route(dst, &chan->src, chan->src_type); + if (!hdev) + return -EHOSTUNREACH; + + hci_dev_lock(hdev); + + if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid && + chan->chan_type != L2CAP_CHAN_RAW) { + err = -EINVAL; + goto done; + } + + if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) { + err = -EINVAL; + goto done; + } + + if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) { + err = -EINVAL; + goto done; + } + + switch (chan->mode) { + case L2CAP_MODE_BASIC: + break; + case L2CAP_MODE_LE_FLOWCTL: + break; + case L2CAP_MODE_EXT_FLOWCTL: + if (!enable_ecred) { + err = -EOPNOTSUPP; + goto done; + } + break; + case L2CAP_MODE_ERTM: + case L2CAP_MODE_STREAMING: + if (!disable_ertm) + break; + fallthrough; + default: + err = -EOPNOTSUPP; + goto done; + } + + switch (chan->state) { + case BT_CONNECT: + case BT_CONNECT2: + case BT_CONFIG: + /* Already connecting */ + err = 0; + goto done; + + case BT_CONNECTED: + /* Already connected */ + err = -EISCONN; + goto done; + + case BT_OPEN: + case BT_BOUND: + /* Can connect */ + break; + + default: + err = -EBADFD; + goto done; + } + + /* Set destination address and psm */ + bacpy(&chan->dst, dst); + chan->dst_type = dst_type; + + chan->psm = psm; + chan->dcid = cid; + + if (bdaddr_type_is_le(dst_type)) { + /* Convert from L2CAP channel address type to HCI address type + */ + if (dst_type == BDADDR_LE_PUBLIC) + dst_type = ADDR_LE_DEV_PUBLIC; + else + dst_type = ADDR_LE_DEV_RANDOM; + + if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) + hcon = hci_connect_le(hdev, dst, dst_type, false, + chan->sec_level, + HCI_LE_CONN_TIMEOUT, + HCI_ROLE_SLAVE); + else + hcon = hci_connect_le_scan(hdev, dst, dst_type, + chan->sec_level, + HCI_LE_CONN_TIMEOUT, + CONN_REASON_L2CAP_CHAN); + + } else { + u8 auth_type = l2cap_get_auth_type(chan); + hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type, + CONN_REASON_L2CAP_CHAN); + } + + if (IS_ERR(hcon)) { + err = PTR_ERR(hcon); + goto done; + } + + conn = l2cap_conn_add(hcon); + if (!conn) { + hci_conn_drop(hcon); + err = -ENOMEM; + goto done; + } + + if (chan->mode == L2CAP_MODE_EXT_FLOWCTL) { + struct l2cap_chan_data data; + + data.chan = chan; + data.pid = chan->ops->get_peer_pid(chan); + data.count = 1; + + l2cap_chan_list(conn, l2cap_chan_by_pid, &data); + + /* Check if there isn't too many channels being connected */ + if (data.count > L2CAP_ECRED_CONN_SCID_MAX) { + hci_conn_drop(hcon); + err = -EPROTO; + goto done; + } + } + + mutex_lock(&conn->chan_lock); + l2cap_chan_lock(chan); + + if (cid && __l2cap_get_chan_by_dcid(conn, cid)) { + hci_conn_drop(hcon); + err = -EBUSY; + goto chan_unlock; + } + + /* Update source addr of the socket */ + bacpy(&chan->src, &hcon->src); + chan->src_type = bdaddr_src_type(hcon); + + __l2cap_chan_add(conn, chan); + + /* l2cap_chan_add takes its own ref so we can drop this one */ + hci_conn_drop(hcon); + + l2cap_state_change(chan, BT_CONNECT); + __set_chan_timer(chan, chan->ops->get_sndtimeo(chan)); + + /* Release chan->sport so that it can be reused by other + * sockets (as it's only used for listening sockets). + */ + write_lock(&chan_list_lock); + chan->sport = 0; + write_unlock(&chan_list_lock); + + if (hcon->state == BT_CONNECTED) { + if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) { + __clear_chan_timer(chan); + if (l2cap_chan_check_security(chan, true)) + l2cap_state_change(chan, BT_CONNECTED); + } else + l2cap_do_start(chan); + } + + err = 0; + +chan_unlock: + l2cap_chan_unlock(chan); + mutex_unlock(&conn->chan_lock); +done: + hci_dev_unlock(hdev); + hci_dev_put(hdev); + return err; +} +EXPORT_SYMBOL_GPL(l2cap_chan_connect); + +static void l2cap_ecred_reconfigure(struct l2cap_chan *chan) +{ + struct l2cap_conn *conn = chan->conn; + struct { + struct l2cap_ecred_reconf_req req; + __le16 scid; + } pdu; + + pdu.req.mtu = cpu_to_le16(chan->imtu); + pdu.req.mps = cpu_to_le16(chan->mps); + pdu.scid = cpu_to_le16(chan->scid); + + chan->ident = l2cap_get_ident(conn); + + l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_RECONF_REQ, + sizeof(pdu), &pdu); +} + +int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu) +{ + if (chan->imtu > mtu) + return -EINVAL; + + BT_DBG("chan %p mtu 0x%4.4x", chan, mtu); + + chan->imtu = mtu; + + l2cap_ecred_reconfigure(chan); + + return 0; +} + +/* ---- L2CAP interface with lower layer (HCI) ---- */ + +int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr) +{ + int exact = 0, lm1 = 0, lm2 = 0; + struct l2cap_chan *c; + + BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr); + + /* Find listening sockets and check their link_mode */ + read_lock(&chan_list_lock); + list_for_each_entry(c, &chan_list, global_l) { + if (c->state != BT_LISTEN) + continue; + + if (!bacmp(&c->src, &hdev->bdaddr)) { + lm1 |= HCI_LM_ACCEPT; + if (test_bit(FLAG_ROLE_SWITCH, &c->flags)) + lm1 |= HCI_LM_MASTER; + exact++; + } else if (!bacmp(&c->src, BDADDR_ANY)) { + lm2 |= HCI_LM_ACCEPT; + if (test_bit(FLAG_ROLE_SWITCH, &c->flags)) + lm2 |= HCI_LM_MASTER; + } + } + read_unlock(&chan_list_lock); + + return exact ? lm1 : lm2; +} + +/* Find the next fixed channel in BT_LISTEN state, continue iteration + * from an existing channel in the list or from the beginning of the + * global list (by passing NULL as first parameter). + */ +static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c, + struct hci_conn *hcon) +{ + u8 src_type = bdaddr_src_type(hcon); + + read_lock(&chan_list_lock); + + if (c) + c = list_next_entry(c, global_l); + else + c = list_entry(chan_list.next, typeof(*c), global_l); + + list_for_each_entry_from(c, &chan_list, global_l) { + if (c->chan_type != L2CAP_CHAN_FIXED) + continue; + if (c->state != BT_LISTEN) + continue; + if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY)) + continue; + if (src_type != c->src_type) + continue; + + c = l2cap_chan_hold_unless_zero(c); + read_unlock(&chan_list_lock); + return c; + } + + read_unlock(&chan_list_lock); + + return NULL; +} + +static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status) +{ + struct hci_dev *hdev = hcon->hdev; + struct l2cap_conn *conn; + struct l2cap_chan *pchan; + u8 dst_type; + + if (hcon->type != ACL_LINK && hcon->type != LE_LINK) + return; + + BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status); + + if (status) { + l2cap_conn_del(hcon, bt_to_errno(status)); + return; + } + + conn = l2cap_conn_add(hcon); + if (!conn) + return; + + dst_type = bdaddr_dst_type(hcon); + + /* If device is blocked, do not create channels for it */ + if (hci_bdaddr_list_lookup(&hdev->reject_list, &hcon->dst, dst_type)) + return; + + /* Find fixed channels and notify them of the new connection. We + * use multiple individual lookups, continuing each time where + * we left off, because the list lock would prevent calling the + * potentially sleeping l2cap_chan_lock() function. + */ + pchan = l2cap_global_fixed_chan(NULL, hcon); + while (pchan) { + struct l2cap_chan *chan, *next; + + /* Client fixed channels should override server ones */ + if (__l2cap_get_chan_by_dcid(conn, pchan->scid)) + goto next; + + l2cap_chan_lock(pchan); + chan = pchan->ops->new_connection(pchan); + if (chan) { + bacpy(&chan->src, &hcon->src); + bacpy(&chan->dst, &hcon->dst); + chan->src_type = bdaddr_src_type(hcon); + chan->dst_type = dst_type; + + __l2cap_chan_add(conn, chan); + } + + l2cap_chan_unlock(pchan); +next: + next = l2cap_global_fixed_chan(pchan, hcon); + l2cap_chan_put(pchan); + pchan = next; + } + + l2cap_conn_ready(conn); +} + +int l2cap_disconn_ind(struct hci_conn *hcon) +{ + struct l2cap_conn *conn = hcon->l2cap_data; + + BT_DBG("hcon %p", hcon); + + if (!conn) + return HCI_ERROR_REMOTE_USER_TERM; + return conn->disc_reason; +} + +static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason) +{ + if (hcon->type != ACL_LINK && hcon->type != LE_LINK) + return; + + BT_DBG("hcon %p reason %d", hcon, reason); + + l2cap_conn_del(hcon, bt_to_errno(reason)); +} + +static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt) +{ + if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) + return; + + if (encrypt == 0x00) { + if (chan->sec_level == BT_SECURITY_MEDIUM) { + __set_chan_timer(chan, L2CAP_ENC_TIMEOUT); + } else if (chan->sec_level == BT_SECURITY_HIGH || + chan->sec_level == BT_SECURITY_FIPS) + l2cap_chan_close(chan, ECONNREFUSED); + } else { + if (chan->sec_level == BT_SECURITY_MEDIUM) + __clear_chan_timer(chan); + } +} + +static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt) +{ + struct l2cap_conn *conn = hcon->l2cap_data; + struct l2cap_chan *chan; + + if (!conn) + return; + + BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt); + + mutex_lock(&conn->chan_lock); + + list_for_each_entry(chan, &conn->chan_l, list) { + l2cap_chan_lock(chan); + + BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid, + state_to_string(chan->state)); + + if (chan->scid == L2CAP_CID_A2MP) { + l2cap_chan_unlock(chan); + continue; + } + + if (!status && encrypt) + chan->sec_level = hcon->sec_level; + + if (!__l2cap_no_conn_pending(chan)) { + l2cap_chan_unlock(chan); + continue; + } + + if (!status && (chan->state == BT_CONNECTED || + chan->state == BT_CONFIG)) { + chan->ops->resume(chan); + l2cap_check_encryption(chan, encrypt); + l2cap_chan_unlock(chan); + continue; + } + + if (chan->state == BT_CONNECT) { + if (!status && l2cap_check_enc_key_size(hcon)) + l2cap_start_connection(chan); + else + __set_chan_timer(chan, L2CAP_DISC_TIMEOUT); + } else if (chan->state == BT_CONNECT2 && + !(chan->mode == L2CAP_MODE_EXT_FLOWCTL || + chan->mode == L2CAP_MODE_LE_FLOWCTL)) { + struct l2cap_conn_rsp rsp; + __u16 res, stat; + + if (!status && l2cap_check_enc_key_size(hcon)) { + if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) { + res = L2CAP_CR_PEND; + stat = L2CAP_CS_AUTHOR_PEND; + chan->ops->defer(chan); + } else { + l2cap_state_change(chan, BT_CONFIG); + res = L2CAP_CR_SUCCESS; + stat = L2CAP_CS_NO_INFO; + } + } else { + l2cap_state_change(chan, BT_DISCONN); + __set_chan_timer(chan, L2CAP_DISC_TIMEOUT); + res = L2CAP_CR_SEC_BLOCK; + stat = L2CAP_CS_NO_INFO; + } + + rsp.scid = cpu_to_le16(chan->dcid); + rsp.dcid = cpu_to_le16(chan->scid); + rsp.result = cpu_to_le16(res); + rsp.status = cpu_to_le16(stat); + l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, + sizeof(rsp), &rsp); + + if (!test_bit(CONF_REQ_SENT, &chan->conf_state) && + res == L2CAP_CR_SUCCESS) { + char buf[128]; + set_bit(CONF_REQ_SENT, &chan->conf_state); + l2cap_send_cmd(conn, l2cap_get_ident(conn), + L2CAP_CONF_REQ, + l2cap_build_conf_req(chan, buf, sizeof(buf)), + buf); + chan->num_conf_req++; + } + } + + l2cap_chan_unlock(chan); + } + + mutex_unlock(&conn->chan_lock); +} + +/* Append fragment into frame respecting the maximum len of rx_skb */ +static int l2cap_recv_frag(struct l2cap_conn *conn, struct sk_buff *skb, + u16 len) +{ + if (!conn->rx_skb) { + /* Allocate skb for the complete frame (with header) */ + conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL); + if (!conn->rx_skb) + return -ENOMEM; + /* Init rx_len */ + conn->rx_len = len; + } + + /* Copy as much as the rx_skb can hold */ + len = min_t(u16, len, skb->len); + skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, len), len); + skb_pull(skb, len); + conn->rx_len -= len; + + return len; +} + +static int l2cap_recv_len(struct l2cap_conn *conn, struct sk_buff *skb) +{ + struct sk_buff *rx_skb; + int len; + + /* Append just enough to complete the header */ + len = l2cap_recv_frag(conn, skb, L2CAP_LEN_SIZE - conn->rx_skb->len); + + /* If header could not be read just continue */ + if (len < 0 || conn->rx_skb->len < L2CAP_LEN_SIZE) + return len; + + rx_skb = conn->rx_skb; + len = get_unaligned_le16(rx_skb->data); + + /* Check if rx_skb has enough space to received all fragments */ + if (len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE) <= skb_tailroom(rx_skb)) { + /* Update expected len */ + conn->rx_len = len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE); + return L2CAP_LEN_SIZE; + } + + /* Reset conn->rx_skb since it will need to be reallocated in order to + * fit all fragments. + */ + conn->rx_skb = NULL; + + /* Reallocates rx_skb using the exact expected length */ + len = l2cap_recv_frag(conn, rx_skb, + len + (L2CAP_HDR_SIZE - L2CAP_LEN_SIZE)); + kfree_skb(rx_skb); + + return len; +} + +static void l2cap_recv_reset(struct l2cap_conn *conn) +{ + kfree_skb(conn->rx_skb); + conn->rx_skb = NULL; + conn->rx_len = 0; +} + +void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags) +{ + struct l2cap_conn *conn = hcon->l2cap_data; + int len; + + /* For AMP controller do not create l2cap conn */ + if (!conn && hcon->hdev->dev_type != HCI_PRIMARY) + goto drop; + + if (!conn) + conn = l2cap_conn_add(hcon); + + if (!conn) + goto drop; + + BT_DBG("conn %p len %u flags 0x%x", conn, skb->len, flags); + + switch (flags) { + case ACL_START: + case ACL_START_NO_FLUSH: + case ACL_COMPLETE: + if (conn->rx_skb) { + BT_ERR("Unexpected start frame (len %d)", skb->len); + l2cap_recv_reset(conn); + l2cap_conn_unreliable(conn, ECOMM); + } + + /* Start fragment may not contain the L2CAP length so just + * copy the initial byte when that happens and use conn->mtu as + * expected length. + */ + if (skb->len < L2CAP_LEN_SIZE) { + l2cap_recv_frag(conn, skb, conn->mtu); + break; + } + + len = get_unaligned_le16(skb->data) + L2CAP_HDR_SIZE; + + if (len == skb->len) { + /* Complete frame received */ + l2cap_recv_frame(conn, skb); + return; + } + + BT_DBG("Start: total len %d, frag len %u", len, skb->len); + + if (skb->len > len) { + BT_ERR("Frame is too long (len %u, expected len %d)", + skb->len, len); + l2cap_conn_unreliable(conn, ECOMM); + goto drop; + } + + /* Append fragment into frame (with header) */ + if (l2cap_recv_frag(conn, skb, len) < 0) + goto drop; + + break; + + case ACL_CONT: + BT_DBG("Cont: frag len %u (expecting %u)", skb->len, conn->rx_len); + + if (!conn->rx_skb) { + BT_ERR("Unexpected continuation frame (len %d)", skb->len); + l2cap_conn_unreliable(conn, ECOMM); + goto drop; + } + + /* Complete the L2CAP length if it has not been read */ + if (conn->rx_skb->len < L2CAP_LEN_SIZE) { + if (l2cap_recv_len(conn, skb) < 0) { + l2cap_conn_unreliable(conn, ECOMM); + goto drop; + } + + /* Header still could not be read just continue */ + if (conn->rx_skb->len < L2CAP_LEN_SIZE) + break; + } + + if (skb->len > conn->rx_len) { + BT_ERR("Fragment is too long (len %u, expected %u)", + skb->len, conn->rx_len); + l2cap_recv_reset(conn); + l2cap_conn_unreliable(conn, ECOMM); + goto drop; + } + + /* Append fragment into frame (with header) */ + l2cap_recv_frag(conn, skb, skb->len); + + if (!conn->rx_len) { + /* Complete frame received. l2cap_recv_frame + * takes ownership of the skb so set the global + * rx_skb pointer to NULL first. + */ + struct sk_buff *rx_skb = conn->rx_skb; + conn->rx_skb = NULL; + l2cap_recv_frame(conn, rx_skb); + } + break; + } + +drop: + kfree_skb(skb); +} + +static struct hci_cb l2cap_cb = { + .name = "L2CAP", + .connect_cfm = l2cap_connect_cfm, + .disconn_cfm = l2cap_disconn_cfm, + .security_cfm = l2cap_security_cfm, +}; + +static int l2cap_debugfs_show(struct seq_file *f, void *p) +{ + struct l2cap_chan *c; + + read_lock(&chan_list_lock); + + list_for_each_entry(c, &chan_list, global_l) { + seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n", + &c->src, c->src_type, &c->dst, c->dst_type, + c->state, __le16_to_cpu(c->psm), + c->scid, c->dcid, c->imtu, c->omtu, + c->sec_level, c->mode); + } + + read_unlock(&chan_list_lock); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(l2cap_debugfs); + +static struct dentry *l2cap_debugfs; + +int __init l2cap_init(void) +{ + int err; + + err = l2cap_init_sockets(); + if (err < 0) + return err; + + hci_register_cb(&l2cap_cb); + + if (IS_ERR_OR_NULL(bt_debugfs)) + return 0; + + l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs, + NULL, &l2cap_debugfs_fops); + + return 0; +} + +void l2cap_exit(void) +{ + debugfs_remove(l2cap_debugfs); + hci_unregister_cb(&l2cap_cb); + l2cap_cleanup_sockets(); +} + +module_param(disable_ertm, bool, 0644); +MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode"); + +module_param(enable_ecred, bool, 0644); +MODULE_PARM_DESC(enable_ecred, "Enable enhanced credit flow control mode"); diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c new file mode 100644 index 000000000..947ca580b --- /dev/null +++ b/net/bluetooth/l2cap_sock.c @@ -0,0 +1,1981 @@ +/* + BlueZ - Bluetooth protocol stack for Linux + Copyright (C) 2000-2001 Qualcomm Incorporated + Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org> + Copyright (C) 2010 Google Inc. + Copyright (C) 2011 ProFUSION Embedded Systems + + Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License version 2 as + published by the Free Software Foundation; + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + SOFTWARE IS DISCLAIMED. +*/ + +/* Bluetooth L2CAP sockets. */ + +#include <linux/module.h> +#include <linux/export.h> +#include <linux/filter.h> +#include <linux/sched/signal.h> + +#include <net/bluetooth/bluetooth.h> +#include <net/bluetooth/hci_core.h> +#include <net/bluetooth/l2cap.h> + +#include "smp.h" + +static struct bt_sock_list l2cap_sk_list = { + .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock) +}; + +static const struct proto_ops l2cap_sock_ops; +static void l2cap_sock_init(struct sock *sk, struct sock *parent); +static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, + int proto, gfp_t prio, int kern); +static void l2cap_sock_cleanup_listen(struct sock *parent); + +bool l2cap_is_socket(struct socket *sock) +{ + return sock && sock->ops == &l2cap_sock_ops; +} +EXPORT_SYMBOL(l2cap_is_socket); + +static int l2cap_validate_bredr_psm(u16 psm) +{ + /* PSM must be odd and lsb of upper byte must be 0 */ + if ((psm & 0x0101) != 0x0001) + return -EINVAL; + + /* Restrict usage of well-known PSMs */ + if (psm < L2CAP_PSM_DYN_START && !capable(CAP_NET_BIND_SERVICE)) + return -EACCES; + + return 0; +} + +static int l2cap_validate_le_psm(u16 psm) +{ + /* Valid LE_PSM ranges are defined only until 0x00ff */ + if (psm > L2CAP_PSM_LE_DYN_END) + return -EINVAL; + + /* Restrict fixed, SIG assigned PSM values to CAP_NET_BIND_SERVICE */ + if (psm < L2CAP_PSM_LE_DYN_START && !capable(CAP_NET_BIND_SERVICE)) + return -EACCES; + + return 0; +} + +static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen) +{ + struct sock *sk = sock->sk; + struct l2cap_chan *chan = l2cap_pi(sk)->chan; + struct sockaddr_l2 la; + int len, err = 0; + + BT_DBG("sk %p", sk); + + if (!addr || alen < offsetofend(struct sockaddr, sa_family) || + addr->sa_family != AF_BLUETOOTH) + return -EINVAL; + + memset(&la, 0, sizeof(la)); + len = min_t(unsigned int, sizeof(la), alen); + memcpy(&la, addr, len); + + if (la.l2_cid && la.l2_psm) + return -EINVAL; + + if (!bdaddr_type_is_valid(la.l2_bdaddr_type)) + return -EINVAL; + + if (bdaddr_type_is_le(la.l2_bdaddr_type)) { + /* We only allow ATT user space socket */ + if (la.l2_cid && + la.l2_cid != cpu_to_le16(L2CAP_CID_ATT)) + return -EINVAL; + } + + lock_sock(sk); + + if (sk->sk_state != BT_OPEN) { + err = -EBADFD; + goto done; + } + + if (la.l2_psm) { + __u16 psm = __le16_to_cpu(la.l2_psm); + + if (la.l2_bdaddr_type == BDADDR_BREDR) + err = l2cap_validate_bredr_psm(psm); + else + err = l2cap_validate_le_psm(psm); + + if (err) + goto done; + } + + bacpy(&chan->src, &la.l2_bdaddr); + chan->src_type = la.l2_bdaddr_type; + + if (la.l2_cid) + err = l2cap_add_scid(chan, __le16_to_cpu(la.l2_cid)); + else + err = l2cap_add_psm(chan, &la.l2_bdaddr, la.l2_psm); + + if (err < 0) + goto done; + + switch (chan->chan_type) { + case L2CAP_CHAN_CONN_LESS: + if (__le16_to_cpu(la.l2_psm) == L2CAP_PSM_3DSP) + chan->sec_level = BT_SECURITY_SDP; + break; + case L2CAP_CHAN_CONN_ORIENTED: + if (__le16_to_cpu(la.l2_psm) == L2CAP_PSM_SDP || + __le16_to_cpu(la.l2_psm) == L2CAP_PSM_RFCOMM) + chan->sec_level = BT_SECURITY_SDP; + break; + case L2CAP_CHAN_RAW: + chan->sec_level = BT_SECURITY_SDP; + break; + case L2CAP_CHAN_FIXED: + /* Fixed channels default to the L2CAP core not holding a + * hci_conn reference for them. For fixed channels mapping to + * L2CAP sockets we do want to hold a reference so set the + * appropriate flag to request it. + */ + set_bit(FLAG_HOLD_HCI_CONN, &chan->flags); + break; + } + + /* Use L2CAP_MODE_LE_FLOWCTL (CoC) in case of LE address and + * L2CAP_MODE_EXT_FLOWCTL (ECRED) has not been set. + */ + if (chan->psm && bdaddr_type_is_le(chan->src_type) && + chan->mode != L2CAP_MODE_EXT_FLOWCTL) + chan->mode = L2CAP_MODE_LE_FLOWCTL; + + chan->state = BT_BOUND; + sk->sk_state = BT_BOUND; + +done: + release_sock(sk); + return err; +} + +static void l2cap_sock_init_pid(struct sock *sk) +{ + struct l2cap_chan *chan = l2cap_pi(sk)->chan; + + /* Only L2CAP_MODE_EXT_FLOWCTL ever need to access the PID in order to + * group the channels being requested. + */ + if (chan->mode != L2CAP_MODE_EXT_FLOWCTL) + return; + + spin_lock(&sk->sk_peer_lock); + sk->sk_peer_pid = get_pid(task_tgid(current)); + spin_unlock(&sk->sk_peer_lock); +} + +static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, + int alen, int flags) +{ + struct sock *sk = sock->sk; + struct l2cap_chan *chan = l2cap_pi(sk)->chan; + struct sockaddr_l2 la; + int len, err = 0; + bool zapped; + + BT_DBG("sk %p", sk); + + lock_sock(sk); + zapped = sock_flag(sk, SOCK_ZAPPED); + release_sock(sk); + + if (zapped) + return -EINVAL; + + if (!addr || alen < offsetofend(struct sockaddr, sa_family) || + addr->sa_family != AF_BLUETOOTH) + return -EINVAL; + + memset(&la, 0, sizeof(la)); + len = min_t(unsigned int, sizeof(la), alen); + memcpy(&la, addr, len); + + if (la.l2_cid && la.l2_psm) + return -EINVAL; + + if (!bdaddr_type_is_valid(la.l2_bdaddr_type)) + return -EINVAL; + + /* Check that the socket wasn't bound to something that + * conflicts with the address given to connect(). If chan->src + * is BDADDR_ANY it means bind() was never used, in which case + * chan->src_type and la.l2_bdaddr_type do not need to match. + */ + if (chan->src_type == BDADDR_BREDR && bacmp(&chan->src, BDADDR_ANY) && + bdaddr_type_is_le(la.l2_bdaddr_type)) { + /* Old user space versions will try to incorrectly bind + * the ATT socket using BDADDR_BREDR. We need to accept + * this and fix up the source address type only when + * both the source CID and destination CID indicate + * ATT. Anything else is an invalid combination. + */ + if (chan->scid != L2CAP_CID_ATT || + la.l2_cid != cpu_to_le16(L2CAP_CID_ATT)) + return -EINVAL; + + /* We don't have the hdev available here to make a + * better decision on random vs public, but since all + * user space versions that exhibit this issue anyway do + * not support random local addresses assuming public + * here is good enough. + */ + chan->src_type = BDADDR_LE_PUBLIC; + } + + if (chan->src_type != BDADDR_BREDR && la.l2_bdaddr_type == BDADDR_BREDR) + return -EINVAL; + + if (bdaddr_type_is_le(la.l2_bdaddr_type)) { + /* We only allow ATT user space socket */ + if (la.l2_cid && + la.l2_cid != cpu_to_le16(L2CAP_CID_ATT)) + return -EINVAL; + } + + /* Use L2CAP_MODE_LE_FLOWCTL (CoC) in case of LE address and + * L2CAP_MODE_EXT_FLOWCTL (ECRED) has not been set. + */ + if (chan->psm && bdaddr_type_is_le(chan->src_type) && + chan->mode != L2CAP_MODE_EXT_FLOWCTL) + chan->mode = L2CAP_MODE_LE_FLOWCTL; + + l2cap_sock_init_pid(sk); + + err = l2cap_chan_connect(chan, la.l2_psm, __le16_to_cpu(la.l2_cid), + &la.l2_bdaddr, la.l2_bdaddr_type); + if (err) + return err; + + lock_sock(sk); + + err = bt_sock_wait_state(sk, BT_CONNECTED, + sock_sndtimeo(sk, flags & O_NONBLOCK)); + + release_sock(sk); + + return err; +} + +static int l2cap_sock_listen(struct socket *sock, int backlog) +{ + struct sock *sk = sock->sk; + struct l2cap_chan *chan = l2cap_pi(sk)->chan; + int err = 0; + + BT_DBG("sk %p backlog %d", sk, backlog); + + lock_sock(sk); + + if (sk->sk_state != BT_BOUND) { + err = -EBADFD; + goto done; + } + + if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM) { + err = -EINVAL; + goto done; + } + + switch (chan->mode) { + case L2CAP_MODE_BASIC: + case L2CAP_MODE_LE_FLOWCTL: + break; + case L2CAP_MODE_EXT_FLOWCTL: + if (!enable_ecred) { + err = -EOPNOTSUPP; + goto done; + } + break; + case L2CAP_MODE_ERTM: + case L2CAP_MODE_STREAMING: + if (!disable_ertm) + break; + fallthrough; + default: + err = -EOPNOTSUPP; + goto done; + } + + l2cap_sock_init_pid(sk); + + sk->sk_max_ack_backlog = backlog; + sk->sk_ack_backlog = 0; + + /* Listening channels need to use nested locking in order not to + * cause lockdep warnings when the created child channels end up + * being locked in the same thread as the parent channel. + */ + atomic_set(&chan->nesting, L2CAP_NESTING_PARENT); + + chan->state = BT_LISTEN; + sk->sk_state = BT_LISTEN; + +done: + release_sock(sk); + return err; +} + +static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, + int flags, bool kern) +{ + DEFINE_WAIT_FUNC(wait, woken_wake_function); + struct sock *sk = sock->sk, *nsk; + long timeo; + int err = 0; + + lock_sock_nested(sk, L2CAP_NESTING_PARENT); + + timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); + + BT_DBG("sk %p timeo %ld", sk, timeo); + + /* Wait for an incoming connection. (wake-one). */ + add_wait_queue_exclusive(sk_sleep(sk), &wait); + while (1) { + if (sk->sk_state != BT_LISTEN) { + err = -EBADFD; + break; + } + + nsk = bt_accept_dequeue(sk, newsock); + if (nsk) + break; + + if (!timeo) { + err = -EAGAIN; + break; + } + + if (signal_pending(current)) { + err = sock_intr_errno(timeo); + break; + } + + release_sock(sk); + + timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo); + + lock_sock_nested(sk, L2CAP_NESTING_PARENT); + } + remove_wait_queue(sk_sleep(sk), &wait); + + if (err) + goto done; + + newsock->state = SS_CONNECTED; + + BT_DBG("new socket %p", nsk); + +done: + release_sock(sk); + return err; +} + +static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, + int peer) +{ + struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr; + struct sock *sk = sock->sk; + struct l2cap_chan *chan = l2cap_pi(sk)->chan; + + BT_DBG("sock %p, sk %p", sock, sk); + + if (peer && sk->sk_state != BT_CONNECTED && + sk->sk_state != BT_CONNECT && sk->sk_state != BT_CONNECT2 && + sk->sk_state != BT_CONFIG) + return -ENOTCONN; + + memset(la, 0, sizeof(struct sockaddr_l2)); + addr->sa_family = AF_BLUETOOTH; + + la->l2_psm = chan->psm; + + if (peer) { + bacpy(&la->l2_bdaddr, &chan->dst); + la->l2_cid = cpu_to_le16(chan->dcid); + la->l2_bdaddr_type = chan->dst_type; + } else { + bacpy(&la->l2_bdaddr, &chan->src); + la->l2_cid = cpu_to_le16(chan->scid); + la->l2_bdaddr_type = chan->src_type; + } + + return sizeof(struct sockaddr_l2); +} + +static int l2cap_get_mode(struct l2cap_chan *chan) +{ + switch (chan->mode) { + case L2CAP_MODE_BASIC: + return BT_MODE_BASIC; + case L2CAP_MODE_ERTM: + return BT_MODE_ERTM; + case L2CAP_MODE_STREAMING: + return BT_MODE_STREAMING; + case L2CAP_MODE_LE_FLOWCTL: + return BT_MODE_LE_FLOWCTL; + case L2CAP_MODE_EXT_FLOWCTL: + return BT_MODE_EXT_FLOWCTL; + } + + return -EINVAL; +} + +static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, + char __user *optval, int __user *optlen) +{ + struct sock *sk = sock->sk; + struct l2cap_chan *chan = l2cap_pi(sk)->chan; + struct l2cap_options opts; + struct l2cap_conninfo cinfo; + int len, err = 0; + u32 opt; + + BT_DBG("sk %p", sk); + + if (get_user(len, optlen)) + return -EFAULT; + + lock_sock(sk); + + switch (optname) { + case L2CAP_OPTIONS: + /* LE sockets should use BT_SNDMTU/BT_RCVMTU, but since + * legacy ATT code depends on getsockopt for + * L2CAP_OPTIONS we need to let this pass. + */ + if (bdaddr_type_is_le(chan->src_type) && + chan->scid != L2CAP_CID_ATT) { + err = -EINVAL; + break; + } + + /* Only BR/EDR modes are supported here */ + switch (chan->mode) { + case L2CAP_MODE_BASIC: + case L2CAP_MODE_ERTM: + case L2CAP_MODE_STREAMING: + break; + default: + err = -EINVAL; + break; + } + + if (err < 0) + break; + + memset(&opts, 0, sizeof(opts)); + opts.imtu = chan->imtu; + opts.omtu = chan->omtu; + opts.flush_to = chan->flush_to; + opts.mode = chan->mode; + opts.fcs = chan->fcs; + opts.max_tx = chan->max_tx; + opts.txwin_size = chan->tx_win; + + BT_DBG("mode 0x%2.2x", chan->mode); + + len = min_t(unsigned int, len, sizeof(opts)); + if (copy_to_user(optval, (char *) &opts, len)) + err = -EFAULT; + + break; + + case L2CAP_LM: + switch (chan->sec_level) { + case BT_SECURITY_LOW: + opt = L2CAP_LM_AUTH; + break; + case BT_SECURITY_MEDIUM: + opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT; + break; + case BT_SECURITY_HIGH: + opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT | + L2CAP_LM_SECURE; + break; + case BT_SECURITY_FIPS: + opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT | + L2CAP_LM_SECURE | L2CAP_LM_FIPS; + break; + default: + opt = 0; + break; + } + + if (test_bit(FLAG_ROLE_SWITCH, &chan->flags)) + opt |= L2CAP_LM_MASTER; + + if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags)) + opt |= L2CAP_LM_RELIABLE; + + if (put_user(opt, (u32 __user *) optval)) + err = -EFAULT; + + break; + + case L2CAP_CONNINFO: + if (sk->sk_state != BT_CONNECTED && + !(sk->sk_state == BT_CONNECT2 && + test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))) { + err = -ENOTCONN; + break; + } + + memset(&cinfo, 0, sizeof(cinfo)); + cinfo.hci_handle = chan->conn->hcon->handle; + memcpy(cinfo.dev_class, chan->conn->hcon->dev_class, 3); + + len = min_t(unsigned int, len, sizeof(cinfo)); + if (copy_to_user(optval, (char *) &cinfo, len)) + err = -EFAULT; + + break; + + default: + err = -ENOPROTOOPT; + break; + } + + release_sock(sk); + return err; +} + +static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, + char __user *optval, int __user *optlen) +{ + struct sock *sk = sock->sk; + struct l2cap_chan *chan = l2cap_pi(sk)->chan; + struct bt_security sec; + struct bt_power pwr; + u32 phys; + int len, mode, err = 0; + + BT_DBG("sk %p", sk); + + if (level == SOL_L2CAP) + return l2cap_sock_getsockopt_old(sock, optname, optval, optlen); + + if (level != SOL_BLUETOOTH) + return -ENOPROTOOPT; + + if (get_user(len, optlen)) + return -EFAULT; + + lock_sock(sk); + + switch (optname) { + case BT_SECURITY: + if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED && + chan->chan_type != L2CAP_CHAN_FIXED && + chan->chan_type != L2CAP_CHAN_RAW) { + err = -EINVAL; + break; + } + + memset(&sec, 0, sizeof(sec)); + if (chan->conn) { + sec.level = chan->conn->hcon->sec_level; + + if (sk->sk_state == BT_CONNECTED) + sec.key_size = chan->conn->hcon->enc_key_size; + } else { + sec.level = chan->sec_level; + } + + len = min_t(unsigned int, len, sizeof(sec)); + if (copy_to_user(optval, (char *) &sec, len)) + err = -EFAULT; + + break; + + case BT_DEFER_SETUP: + if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) { + err = -EINVAL; + break; + } + + if (put_user(test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags), + (u32 __user *) optval)) + err = -EFAULT; + + break; + + case BT_FLUSHABLE: + if (put_user(test_bit(FLAG_FLUSHABLE, &chan->flags), + (u32 __user *) optval)) + err = -EFAULT; + + break; + + case BT_POWER: + if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM + && sk->sk_type != SOCK_RAW) { + err = -EINVAL; + break; + } + + pwr.force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags); + + len = min_t(unsigned int, len, sizeof(pwr)); + if (copy_to_user(optval, (char *) &pwr, len)) + err = -EFAULT; + + break; + + case BT_CHANNEL_POLICY: + if (put_user(chan->chan_policy, (u32 __user *) optval)) + err = -EFAULT; + break; + + case BT_SNDMTU: + if (!bdaddr_type_is_le(chan->src_type)) { + err = -EINVAL; + break; + } + + if (sk->sk_state != BT_CONNECTED) { + err = -ENOTCONN; + break; + } + + if (put_user(chan->omtu, (u16 __user *) optval)) + err = -EFAULT; + break; + + case BT_RCVMTU: + if (!bdaddr_type_is_le(chan->src_type)) { + err = -EINVAL; + break; + } + + if (put_user(chan->imtu, (u16 __user *) optval)) + err = -EFAULT; + break; + + case BT_PHY: + if (sk->sk_state != BT_CONNECTED) { + err = -ENOTCONN; + break; + } + + phys = hci_conn_get_phy(chan->conn->hcon); + + if (put_user(phys, (u32 __user *) optval)) + err = -EFAULT; + break; + + case BT_MODE: + if (!enable_ecred) { + err = -ENOPROTOOPT; + break; + } + + if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) { + err = -EINVAL; + break; + } + + mode = l2cap_get_mode(chan); + if (mode < 0) { + err = mode; + break; + } + + if (put_user(mode, (u8 __user *) optval)) + err = -EFAULT; + break; + + default: + err = -ENOPROTOOPT; + break; + } + + release_sock(sk); + return err; +} + +static bool l2cap_valid_mtu(struct l2cap_chan *chan, u16 mtu) +{ + switch (chan->scid) { + case L2CAP_CID_ATT: + if (mtu < L2CAP_LE_MIN_MTU) + return false; + break; + + default: + if (mtu < L2CAP_DEFAULT_MIN_MTU) + return false; + } + + return true; +} + +static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, + sockptr_t optval, unsigned int optlen) +{ + struct sock *sk = sock->sk; + struct l2cap_chan *chan = l2cap_pi(sk)->chan; + struct l2cap_options opts; + int len, err = 0; + u32 opt; + + BT_DBG("sk %p", sk); + + lock_sock(sk); + + switch (optname) { + case L2CAP_OPTIONS: + if (bdaddr_type_is_le(chan->src_type)) { + err = -EINVAL; + break; + } + + if (sk->sk_state == BT_CONNECTED) { + err = -EINVAL; + break; + } + + opts.imtu = chan->imtu; + opts.omtu = chan->omtu; + opts.flush_to = chan->flush_to; + opts.mode = chan->mode; + opts.fcs = chan->fcs; + opts.max_tx = chan->max_tx; + opts.txwin_size = chan->tx_win; + + len = min_t(unsigned int, sizeof(opts), optlen); + if (copy_from_sockptr(&opts, optval, len)) { + err = -EFAULT; + break; + } + + if (opts.txwin_size > L2CAP_DEFAULT_EXT_WINDOW) { + err = -EINVAL; + break; + } + + if (!l2cap_valid_mtu(chan, opts.imtu)) { + err = -EINVAL; + break; + } + + /* Only BR/EDR modes are supported here */ + switch (opts.mode) { + case L2CAP_MODE_BASIC: + clear_bit(CONF_STATE2_DEVICE, &chan->conf_state); + break; + case L2CAP_MODE_ERTM: + case L2CAP_MODE_STREAMING: + if (!disable_ertm) + break; + fallthrough; + default: + err = -EINVAL; + break; + } + + if (err < 0) + break; + + chan->mode = opts.mode; + + BT_DBG("mode 0x%2.2x", chan->mode); + + chan->imtu = opts.imtu; + chan->omtu = opts.omtu; + chan->fcs = opts.fcs; + chan->max_tx = opts.max_tx; + chan->tx_win = opts.txwin_size; + chan->flush_to = opts.flush_to; + break; + + case L2CAP_LM: + if (copy_from_sockptr(&opt, optval, sizeof(u32))) { + err = -EFAULT; + break; + } + + if (opt & L2CAP_LM_FIPS) { + err = -EINVAL; + break; + } + + if (opt & L2CAP_LM_AUTH) + chan->sec_level = BT_SECURITY_LOW; + if (opt & L2CAP_LM_ENCRYPT) + chan->sec_level = BT_SECURITY_MEDIUM; + if (opt & L2CAP_LM_SECURE) + chan->sec_level = BT_SECURITY_HIGH; + + if (opt & L2CAP_LM_MASTER) + set_bit(FLAG_ROLE_SWITCH, &chan->flags); + else + clear_bit(FLAG_ROLE_SWITCH, &chan->flags); + + if (opt & L2CAP_LM_RELIABLE) + set_bit(FLAG_FORCE_RELIABLE, &chan->flags); + else + clear_bit(FLAG_FORCE_RELIABLE, &chan->flags); + break; + + default: + err = -ENOPROTOOPT; + break; + } + + release_sock(sk); + return err; +} + +static int l2cap_set_mode(struct l2cap_chan *chan, u8 mode) +{ + switch (mode) { + case BT_MODE_BASIC: + if (bdaddr_type_is_le(chan->src_type)) + return -EINVAL; + mode = L2CAP_MODE_BASIC; + clear_bit(CONF_STATE2_DEVICE, &chan->conf_state); + break; + case BT_MODE_ERTM: + if (!disable_ertm || bdaddr_type_is_le(chan->src_type)) + return -EINVAL; + mode = L2CAP_MODE_ERTM; + break; + case BT_MODE_STREAMING: + if (!disable_ertm || bdaddr_type_is_le(chan->src_type)) + return -EINVAL; + mode = L2CAP_MODE_STREAMING; + break; + case BT_MODE_LE_FLOWCTL: + if (!bdaddr_type_is_le(chan->src_type)) + return -EINVAL; + mode = L2CAP_MODE_LE_FLOWCTL; + break; + case BT_MODE_EXT_FLOWCTL: + /* TODO: Add support for ECRED PDUs to BR/EDR */ + if (!bdaddr_type_is_le(chan->src_type)) + return -EINVAL; + mode = L2CAP_MODE_EXT_FLOWCTL; + break; + default: + return -EINVAL; + } + + chan->mode = mode; + + return 0; +} + +static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, + sockptr_t optval, unsigned int optlen) +{ + struct sock *sk = sock->sk; + struct l2cap_chan *chan = l2cap_pi(sk)->chan; + struct bt_security sec; + struct bt_power pwr; + struct l2cap_conn *conn; + int len, err = 0; + u32 opt; + u16 mtu; + u8 mode; + + BT_DBG("sk %p", sk); + + if (level == SOL_L2CAP) + return l2cap_sock_setsockopt_old(sock, optname, optval, optlen); + + if (level != SOL_BLUETOOTH) + return -ENOPROTOOPT; + + lock_sock(sk); + + switch (optname) { + case BT_SECURITY: + if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED && + chan->chan_type != L2CAP_CHAN_FIXED && + chan->chan_type != L2CAP_CHAN_RAW) { + err = -EINVAL; + break; + } + + sec.level = BT_SECURITY_LOW; + + len = min_t(unsigned int, sizeof(sec), optlen); + if (copy_from_sockptr(&sec, optval, len)) { + err = -EFAULT; + break; + } + + if (sec.level < BT_SECURITY_LOW || + sec.level > BT_SECURITY_FIPS) { + err = -EINVAL; + break; + } + + chan->sec_level = sec.level; + + if (!chan->conn) + break; + + conn = chan->conn; + + /* change security for LE channels */ + if (chan->scid == L2CAP_CID_ATT) { + if (smp_conn_security(conn->hcon, sec.level)) { + err = -EINVAL; + break; + } + + set_bit(FLAG_PENDING_SECURITY, &chan->flags); + sk->sk_state = BT_CONFIG; + chan->state = BT_CONFIG; + + /* or for ACL link */ + } else if ((sk->sk_state == BT_CONNECT2 && + test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) || + sk->sk_state == BT_CONNECTED) { + if (!l2cap_chan_check_security(chan, true)) + set_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags); + else + sk->sk_state_change(sk); + } else { + err = -EINVAL; + } + break; + + case BT_DEFER_SETUP: + if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) { + err = -EINVAL; + break; + } + + if (copy_from_sockptr(&opt, optval, sizeof(u32))) { + err = -EFAULT; + break; + } + + if (opt) { + set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags); + set_bit(FLAG_DEFER_SETUP, &chan->flags); + } else { + clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags); + clear_bit(FLAG_DEFER_SETUP, &chan->flags); + } + break; + + case BT_FLUSHABLE: + if (copy_from_sockptr(&opt, optval, sizeof(u32))) { + err = -EFAULT; + break; + } + + if (opt > BT_FLUSHABLE_ON) { + err = -EINVAL; + break; + } + + if (opt == BT_FLUSHABLE_OFF) { + conn = chan->conn; + /* proceed further only when we have l2cap_conn and + No Flush support in the LM */ + if (!conn || !lmp_no_flush_capable(conn->hcon->hdev)) { + err = -EINVAL; + break; + } + } + + if (opt) + set_bit(FLAG_FLUSHABLE, &chan->flags); + else + clear_bit(FLAG_FLUSHABLE, &chan->flags); + break; + + case BT_POWER: + if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED && + chan->chan_type != L2CAP_CHAN_RAW) { + err = -EINVAL; + break; + } + + pwr.force_active = BT_POWER_FORCE_ACTIVE_ON; + + len = min_t(unsigned int, sizeof(pwr), optlen); + if (copy_from_sockptr(&pwr, optval, len)) { + err = -EFAULT; + break; + } + + if (pwr.force_active) + set_bit(FLAG_FORCE_ACTIVE, &chan->flags); + else + clear_bit(FLAG_FORCE_ACTIVE, &chan->flags); + break; + + case BT_CHANNEL_POLICY: + if (copy_from_sockptr(&opt, optval, sizeof(u32))) { + err = -EFAULT; + break; + } + + if (opt > BT_CHANNEL_POLICY_AMP_PREFERRED) { + err = -EINVAL; + break; + } + + if (chan->mode != L2CAP_MODE_ERTM && + chan->mode != L2CAP_MODE_STREAMING) { + err = -EOPNOTSUPP; + break; + } + + chan->chan_policy = (u8) opt; + + if (sk->sk_state == BT_CONNECTED && + chan->move_role == L2CAP_MOVE_ROLE_NONE) + l2cap_move_start(chan); + + break; + + case BT_SNDMTU: + if (!bdaddr_type_is_le(chan->src_type)) { + err = -EINVAL; + break; + } + + /* Setting is not supported as it's the remote side that + * decides this. + */ + err = -EPERM; + break; + + case BT_RCVMTU: + if (!bdaddr_type_is_le(chan->src_type)) { + err = -EINVAL; + break; + } + + if (chan->mode == L2CAP_MODE_LE_FLOWCTL && + sk->sk_state == BT_CONNECTED) { + err = -EISCONN; + break; + } + + if (copy_from_sockptr(&mtu, optval, sizeof(u16))) { + err = -EFAULT; + break; + } + + if (chan->mode == L2CAP_MODE_EXT_FLOWCTL && + sk->sk_state == BT_CONNECTED) + err = l2cap_chan_reconfigure(chan, mtu); + else + chan->imtu = mtu; + + break; + + case BT_MODE: + if (!enable_ecred) { + err = -ENOPROTOOPT; + break; + } + + BT_DBG("sk->sk_state %u", sk->sk_state); + + if (sk->sk_state != BT_BOUND) { + err = -EINVAL; + break; + } + + if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) { + err = -EINVAL; + break; + } + + if (copy_from_sockptr(&mode, optval, sizeof(u8))) { + err = -EFAULT; + break; + } + + BT_DBG("mode %u", mode); + + err = l2cap_set_mode(chan, mode); + if (err) + break; + + BT_DBG("mode 0x%2.2x", chan->mode); + + break; + + default: + err = -ENOPROTOOPT; + break; + } + + release_sock(sk); + return err; +} + +static int l2cap_sock_sendmsg(struct socket *sock, struct msghdr *msg, + size_t len) +{ + struct sock *sk = sock->sk; + struct l2cap_chan *chan = l2cap_pi(sk)->chan; + int err; + + BT_DBG("sock %p, sk %p", sock, sk); + + err = sock_error(sk); + if (err) + return err; + + if (msg->msg_flags & MSG_OOB) + return -EOPNOTSUPP; + + if (sk->sk_state != BT_CONNECTED) + return -ENOTCONN; + + lock_sock(sk); + err = bt_sock_wait_ready(sk, msg->msg_flags); + release_sock(sk); + if (err) + return err; + + l2cap_chan_lock(chan); + err = l2cap_chan_send(chan, msg, len); + l2cap_chan_unlock(chan); + + return err; +} + +static int l2cap_sock_recvmsg(struct socket *sock, struct msghdr *msg, + size_t len, int flags) +{ + struct sock *sk = sock->sk; + struct l2cap_pinfo *pi = l2cap_pi(sk); + int err; + + lock_sock(sk); + + if (sk->sk_state == BT_CONNECT2 && test_bit(BT_SK_DEFER_SETUP, + &bt_sk(sk)->flags)) { + if (pi->chan->mode == L2CAP_MODE_EXT_FLOWCTL) { + sk->sk_state = BT_CONNECTED; + pi->chan->state = BT_CONNECTED; + __l2cap_ecred_conn_rsp_defer(pi->chan); + } else if (bdaddr_type_is_le(pi->chan->src_type)) { + sk->sk_state = BT_CONNECTED; + pi->chan->state = BT_CONNECTED; + __l2cap_le_connect_rsp_defer(pi->chan); + } else { + sk->sk_state = BT_CONFIG; + pi->chan->state = BT_CONFIG; + __l2cap_connect_rsp_defer(pi->chan); + } + + err = 0; + goto done; + } + + release_sock(sk); + + if (sock->type == SOCK_STREAM) + err = bt_sock_stream_recvmsg(sock, msg, len, flags); + else + err = bt_sock_recvmsg(sock, msg, len, flags); + + if (pi->chan->mode != L2CAP_MODE_ERTM) + return err; + + /* Attempt to put pending rx data in the socket buffer */ + + lock_sock(sk); + + if (!test_bit(CONN_LOCAL_BUSY, &pi->chan->conn_state)) + goto done; + + if (pi->rx_busy_skb) { + if (!__sock_queue_rcv_skb(sk, pi->rx_busy_skb)) + pi->rx_busy_skb = NULL; + else + goto done; + } + + /* Restore data flow when half of the receive buffer is + * available. This avoids resending large numbers of + * frames. + */ + if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf >> 1) + l2cap_chan_busy(pi->chan, 0); + +done: + release_sock(sk); + return err; +} + +/* Kill socket (only if zapped and orphan) + * Must be called on unlocked socket, with l2cap channel lock. + */ +static void l2cap_sock_kill(struct sock *sk) +{ + if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket) + return; + + BT_DBG("sk %p state %s", sk, state_to_string(sk->sk_state)); + + /* Kill poor orphan */ + + l2cap_chan_put(l2cap_pi(sk)->chan); + sock_set_flag(sk, SOCK_DEAD); + sock_put(sk); +} + +static int __l2cap_wait_ack(struct sock *sk, struct l2cap_chan *chan) +{ + DECLARE_WAITQUEUE(wait, current); + int err = 0; + int timeo = L2CAP_WAIT_ACK_POLL_PERIOD; + /* Timeout to prevent infinite loop */ + unsigned long timeout = jiffies + L2CAP_WAIT_ACK_TIMEOUT; + + add_wait_queue(sk_sleep(sk), &wait); + set_current_state(TASK_INTERRUPTIBLE); + do { + BT_DBG("Waiting for %d ACKs, timeout %04d ms", + chan->unacked_frames, time_after(jiffies, timeout) ? 0 : + jiffies_to_msecs(timeout - jiffies)); + + if (!timeo) + timeo = L2CAP_WAIT_ACK_POLL_PERIOD; + + if (signal_pending(current)) { + err = sock_intr_errno(timeo); + break; + } + + release_sock(sk); + timeo = schedule_timeout(timeo); + lock_sock(sk); + set_current_state(TASK_INTERRUPTIBLE); + + err = sock_error(sk); + if (err) + break; + + if (time_after(jiffies, timeout)) { + err = -ENOLINK; + break; + } + + } while (chan->unacked_frames > 0 && + chan->state == BT_CONNECTED); + + set_current_state(TASK_RUNNING); + remove_wait_queue(sk_sleep(sk), &wait); + return err; +} + +static int l2cap_sock_shutdown(struct socket *sock, int how) +{ + struct sock *sk = sock->sk; + struct l2cap_chan *chan; + struct l2cap_conn *conn; + int err = 0; + + BT_DBG("sock %p, sk %p, how %d", sock, sk, how); + + /* 'how' parameter is mapped to sk_shutdown as follows: + * SHUT_RD (0) --> RCV_SHUTDOWN (1) + * SHUT_WR (1) --> SEND_SHUTDOWN (2) + * SHUT_RDWR (2) --> SHUTDOWN_MASK (3) + */ + how++; + + if (!sk) + return 0; + + lock_sock(sk); + + if ((sk->sk_shutdown & how) == how) + goto shutdown_already; + + BT_DBG("Handling sock shutdown"); + + /* prevent sk structure from being freed whilst unlocked */ + sock_hold(sk); + + chan = l2cap_pi(sk)->chan; + /* prevent chan structure from being freed whilst unlocked */ + l2cap_chan_hold(chan); + + BT_DBG("chan %p state %s", chan, state_to_string(chan->state)); + + if (chan->mode == L2CAP_MODE_ERTM && + chan->unacked_frames > 0 && + chan->state == BT_CONNECTED) { + err = __l2cap_wait_ack(sk, chan); + + /* After waiting for ACKs, check whether shutdown + * has already been actioned to close the L2CAP + * link such as by l2cap_disconnection_req(). + */ + if ((sk->sk_shutdown & how) == how) + goto shutdown_matched; + } + + /* Try setting the RCV_SHUTDOWN bit, return early if SEND_SHUTDOWN + * is already set + */ + if ((how & RCV_SHUTDOWN) && !(sk->sk_shutdown & RCV_SHUTDOWN)) { + sk->sk_shutdown |= RCV_SHUTDOWN; + if ((sk->sk_shutdown & how) == how) + goto shutdown_matched; + } + + sk->sk_shutdown |= SEND_SHUTDOWN; + release_sock(sk); + + l2cap_chan_lock(chan); + conn = chan->conn; + if (conn) + /* prevent conn structure from being freed */ + l2cap_conn_get(conn); + l2cap_chan_unlock(chan); + + if (conn) + /* mutex lock must be taken before l2cap_chan_lock() */ + mutex_lock(&conn->chan_lock); + + l2cap_chan_lock(chan); + l2cap_chan_close(chan, 0); + l2cap_chan_unlock(chan); + + if (conn) { + mutex_unlock(&conn->chan_lock); + l2cap_conn_put(conn); + } + + lock_sock(sk); + + if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime && + !(current->flags & PF_EXITING)) + err = bt_sock_wait_state(sk, BT_CLOSED, + sk->sk_lingertime); + +shutdown_matched: + l2cap_chan_put(chan); + sock_put(sk); + +shutdown_already: + if (!err && sk->sk_err) + err = -sk->sk_err; + + release_sock(sk); + + BT_DBG("Sock shutdown complete err: %d", err); + + return err; +} + +static int l2cap_sock_release(struct socket *sock) +{ + struct sock *sk = sock->sk; + int err; + struct l2cap_chan *chan; + + BT_DBG("sock %p, sk %p", sock, sk); + + if (!sk) + return 0; + + l2cap_sock_cleanup_listen(sk); + bt_sock_unlink(&l2cap_sk_list, sk); + + err = l2cap_sock_shutdown(sock, SHUT_RDWR); + chan = l2cap_pi(sk)->chan; + + l2cap_chan_hold(chan); + l2cap_chan_lock(chan); + + sock_orphan(sk); + l2cap_sock_kill(sk); + + l2cap_chan_unlock(chan); + l2cap_chan_put(chan); + + return err; +} + +static void l2cap_sock_cleanup_listen(struct sock *parent) +{ + struct sock *sk; + + BT_DBG("parent %p state %s", parent, + state_to_string(parent->sk_state)); + + /* Close not yet accepted channels */ + while ((sk = bt_accept_dequeue(parent, NULL))) { + struct l2cap_chan *chan = l2cap_pi(sk)->chan; + + BT_DBG("child chan %p state %s", chan, + state_to_string(chan->state)); + + l2cap_chan_hold(chan); + l2cap_chan_lock(chan); + + __clear_chan_timer(chan); + l2cap_chan_close(chan, ECONNRESET); + l2cap_sock_kill(sk); + + l2cap_chan_unlock(chan); + l2cap_chan_put(chan); + } +} + +static struct l2cap_chan *l2cap_sock_new_connection_cb(struct l2cap_chan *chan) +{ + struct sock *sk, *parent = chan->data; + + lock_sock(parent); + + /* Check for backlog size */ + if (sk_acceptq_is_full(parent)) { + BT_DBG("backlog full %d", parent->sk_ack_backlog); + release_sock(parent); + return NULL; + } + + sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, + GFP_ATOMIC, 0); + if (!sk) { + release_sock(parent); + return NULL; + } + + bt_sock_reclassify_lock(sk, BTPROTO_L2CAP); + + l2cap_sock_init(sk, parent); + + bt_accept_enqueue(parent, sk, false); + + release_sock(parent); + + return l2cap_pi(sk)->chan; +} + +static int l2cap_sock_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb) +{ + struct sock *sk = chan->data; + int err; + + lock_sock(sk); + + if (l2cap_pi(sk)->rx_busy_skb) { + err = -ENOMEM; + goto done; + } + + if (chan->mode != L2CAP_MODE_ERTM && + chan->mode != L2CAP_MODE_STREAMING) { + /* Even if no filter is attached, we could potentially + * get errors from security modules, etc. + */ + err = sk_filter(sk, skb); + if (err) + goto done; + } + + err = __sock_queue_rcv_skb(sk, skb); + + /* For ERTM, handle one skb that doesn't fit into the recv + * buffer. This is important to do because the data frames + * have already been acked, so the skb cannot be discarded. + * + * Notify the l2cap core that the buffer is full, so the + * LOCAL_BUSY state is entered and no more frames are + * acked and reassembled until there is buffer space + * available. + */ + if (err < 0 && chan->mode == L2CAP_MODE_ERTM) { + l2cap_pi(sk)->rx_busy_skb = skb; + l2cap_chan_busy(chan, 1); + err = 0; + } + +done: + release_sock(sk); + + return err; +} + +static void l2cap_sock_close_cb(struct l2cap_chan *chan) +{ + struct sock *sk = chan->data; + + if (!sk) + return; + + l2cap_sock_kill(sk); +} + +static void l2cap_sock_teardown_cb(struct l2cap_chan *chan, int err) +{ + struct sock *sk = chan->data; + struct sock *parent; + + if (!sk) + return; + + BT_DBG("chan %p state %s", chan, state_to_string(chan->state)); + + /* This callback can be called both for server (BT_LISTEN) + * sockets as well as "normal" ones. To avoid lockdep warnings + * with child socket locking (through l2cap_sock_cleanup_listen) + * we need separation into separate nesting levels. The simplest + * way to accomplish this is to inherit the nesting level used + * for the channel. + */ + lock_sock_nested(sk, atomic_read(&chan->nesting)); + + parent = bt_sk(sk)->parent; + + switch (chan->state) { + case BT_OPEN: + case BT_BOUND: + case BT_CLOSED: + break; + case BT_LISTEN: + l2cap_sock_cleanup_listen(sk); + sk->sk_state = BT_CLOSED; + chan->state = BT_CLOSED; + + break; + default: + sk->sk_state = BT_CLOSED; + chan->state = BT_CLOSED; + + sk->sk_err = err; + + if (parent) { + bt_accept_unlink(sk); + parent->sk_data_ready(parent); + } else { + sk->sk_state_change(sk); + } + + break; + } + release_sock(sk); + + /* Only zap after cleanup to avoid use after free race */ + sock_set_flag(sk, SOCK_ZAPPED); + +} + +static void l2cap_sock_state_change_cb(struct l2cap_chan *chan, int state, + int err) +{ + struct sock *sk = chan->data; + + sk->sk_state = state; + + if (err) + sk->sk_err = err; +} + +static struct sk_buff *l2cap_sock_alloc_skb_cb(struct l2cap_chan *chan, + unsigned long hdr_len, + unsigned long len, int nb) +{ + struct sock *sk = chan->data; + struct sk_buff *skb; + int err; + + l2cap_chan_unlock(chan); + skb = bt_skb_send_alloc(sk, hdr_len + len, nb, &err); + l2cap_chan_lock(chan); + + if (!skb) + return ERR_PTR(err); + + /* Channel lock is released before requesting new skb and then + * reacquired thus we need to recheck channel state. + */ + if (chan->state != BT_CONNECTED) { + kfree_skb(skb); + return ERR_PTR(-ENOTCONN); + } + + skb->priority = sk->sk_priority; + + bt_cb(skb)->l2cap.chan = chan; + + return skb; +} + +static void l2cap_sock_ready_cb(struct l2cap_chan *chan) +{ + struct sock *sk = chan->data; + struct sock *parent; + + lock_sock(sk); + + parent = bt_sk(sk)->parent; + + BT_DBG("sk %p, parent %p", sk, parent); + + sk->sk_state = BT_CONNECTED; + sk->sk_state_change(sk); + + if (parent) + parent->sk_data_ready(parent); + + release_sock(sk); +} + +static void l2cap_sock_defer_cb(struct l2cap_chan *chan) +{ + struct sock *parent, *sk = chan->data; + + lock_sock(sk); + + parent = bt_sk(sk)->parent; + if (parent) + parent->sk_data_ready(parent); + + release_sock(sk); +} + +static void l2cap_sock_resume_cb(struct l2cap_chan *chan) +{ + struct sock *sk = chan->data; + + if (test_and_clear_bit(FLAG_PENDING_SECURITY, &chan->flags)) { + sk->sk_state = BT_CONNECTED; + chan->state = BT_CONNECTED; + } + + clear_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags); + sk->sk_state_change(sk); +} + +static void l2cap_sock_set_shutdown_cb(struct l2cap_chan *chan) +{ + struct sock *sk = chan->data; + + lock_sock(sk); + sk->sk_shutdown = SHUTDOWN_MASK; + release_sock(sk); +} + +static long l2cap_sock_get_sndtimeo_cb(struct l2cap_chan *chan) +{ + struct sock *sk = chan->data; + + return sk->sk_sndtimeo; +} + +static struct pid *l2cap_sock_get_peer_pid_cb(struct l2cap_chan *chan) +{ + struct sock *sk = chan->data; + + return sk->sk_peer_pid; +} + +static void l2cap_sock_suspend_cb(struct l2cap_chan *chan) +{ + struct sock *sk = chan->data; + + set_bit(BT_SK_SUSPEND, &bt_sk(sk)->flags); + sk->sk_state_change(sk); +} + +static int l2cap_sock_filter(struct l2cap_chan *chan, struct sk_buff *skb) +{ + struct sock *sk = chan->data; + + switch (chan->mode) { + case L2CAP_MODE_ERTM: + case L2CAP_MODE_STREAMING: + return sk_filter(sk, skb); + } + + return 0; +} + +static const struct l2cap_ops l2cap_chan_ops = { + .name = "L2CAP Socket Interface", + .new_connection = l2cap_sock_new_connection_cb, + .recv = l2cap_sock_recv_cb, + .close = l2cap_sock_close_cb, + .teardown = l2cap_sock_teardown_cb, + .state_change = l2cap_sock_state_change_cb, + .ready = l2cap_sock_ready_cb, + .defer = l2cap_sock_defer_cb, + .resume = l2cap_sock_resume_cb, + .suspend = l2cap_sock_suspend_cb, + .set_shutdown = l2cap_sock_set_shutdown_cb, + .get_sndtimeo = l2cap_sock_get_sndtimeo_cb, + .get_peer_pid = l2cap_sock_get_peer_pid_cb, + .alloc_skb = l2cap_sock_alloc_skb_cb, + .filter = l2cap_sock_filter, +}; + +static void l2cap_sock_destruct(struct sock *sk) +{ + BT_DBG("sk %p", sk); + + if (l2cap_pi(sk)->chan) { + l2cap_pi(sk)->chan->data = NULL; + l2cap_chan_put(l2cap_pi(sk)->chan); + } + + if (l2cap_pi(sk)->rx_busy_skb) { + kfree_skb(l2cap_pi(sk)->rx_busy_skb); + l2cap_pi(sk)->rx_busy_skb = NULL; + } + + skb_queue_purge(&sk->sk_receive_queue); + skb_queue_purge(&sk->sk_write_queue); +} + +static void l2cap_skb_msg_name(struct sk_buff *skb, void *msg_name, + int *msg_namelen) +{ + DECLARE_SOCKADDR(struct sockaddr_l2 *, la, msg_name); + + memset(la, 0, sizeof(struct sockaddr_l2)); + la->l2_family = AF_BLUETOOTH; + la->l2_psm = bt_cb(skb)->l2cap.psm; + bacpy(&la->l2_bdaddr, &bt_cb(skb)->l2cap.bdaddr); + + *msg_namelen = sizeof(struct sockaddr_l2); +} + +static void l2cap_sock_init(struct sock *sk, struct sock *parent) +{ + struct l2cap_chan *chan = l2cap_pi(sk)->chan; + + BT_DBG("sk %p", sk); + + if (parent) { + struct l2cap_chan *pchan = l2cap_pi(parent)->chan; + + sk->sk_type = parent->sk_type; + bt_sk(sk)->flags = bt_sk(parent)->flags; + + chan->chan_type = pchan->chan_type; + chan->imtu = pchan->imtu; + chan->omtu = pchan->omtu; + chan->conf_state = pchan->conf_state; + chan->mode = pchan->mode; + chan->fcs = pchan->fcs; + chan->max_tx = pchan->max_tx; + chan->tx_win = pchan->tx_win; + chan->tx_win_max = pchan->tx_win_max; + chan->sec_level = pchan->sec_level; + chan->flags = pchan->flags; + chan->tx_credits = pchan->tx_credits; + chan->rx_credits = pchan->rx_credits; + + if (chan->chan_type == L2CAP_CHAN_FIXED) { + chan->scid = pchan->scid; + chan->dcid = pchan->scid; + } + + security_sk_clone(parent, sk); + } else { + switch (sk->sk_type) { + case SOCK_RAW: + chan->chan_type = L2CAP_CHAN_RAW; + break; + case SOCK_DGRAM: + chan->chan_type = L2CAP_CHAN_CONN_LESS; + bt_sk(sk)->skb_msg_name = l2cap_skb_msg_name; + break; + case SOCK_SEQPACKET: + case SOCK_STREAM: + chan->chan_type = L2CAP_CHAN_CONN_ORIENTED; + break; + } + + chan->imtu = L2CAP_DEFAULT_MTU; + chan->omtu = 0; + if (!disable_ertm && sk->sk_type == SOCK_STREAM) { + chan->mode = L2CAP_MODE_ERTM; + set_bit(CONF_STATE2_DEVICE, &chan->conf_state); + } else { + chan->mode = L2CAP_MODE_BASIC; + } + + l2cap_chan_set_defaults(chan); + } + + /* Default config options */ + chan->flush_to = L2CAP_DEFAULT_FLUSH_TO; + + chan->data = sk; + chan->ops = &l2cap_chan_ops; +} + +static struct proto l2cap_proto = { + .name = "L2CAP", + .owner = THIS_MODULE, + .obj_size = sizeof(struct l2cap_pinfo) +}; + +static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, + int proto, gfp_t prio, int kern) +{ + struct sock *sk; + struct l2cap_chan *chan; + + sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto, kern); + if (!sk) + return NULL; + + sock_init_data(sock, sk); + INIT_LIST_HEAD(&bt_sk(sk)->accept_q); + + sk->sk_destruct = l2cap_sock_destruct; + sk->sk_sndtimeo = L2CAP_CONN_TIMEOUT; + + sock_reset_flag(sk, SOCK_ZAPPED); + + sk->sk_protocol = proto; + sk->sk_state = BT_OPEN; + + chan = l2cap_chan_create(); + if (!chan) { + sk_free(sk); + return NULL; + } + + l2cap_chan_hold(chan); + + l2cap_pi(sk)->chan = chan; + + return sk; +} + +static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol, + int kern) +{ + struct sock *sk; + + BT_DBG("sock %p", sock); + + sock->state = SS_UNCONNECTED; + + if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM && + sock->type != SOCK_DGRAM && sock->type != SOCK_RAW) + return -ESOCKTNOSUPPORT; + + if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW)) + return -EPERM; + + sock->ops = &l2cap_sock_ops; + + sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC, kern); + if (!sk) + return -ENOMEM; + + l2cap_sock_init(sk, NULL); + bt_sock_link(&l2cap_sk_list, sk); + return 0; +} + +static const struct proto_ops l2cap_sock_ops = { + .family = PF_BLUETOOTH, + .owner = THIS_MODULE, + .release = l2cap_sock_release, + .bind = l2cap_sock_bind, + .connect = l2cap_sock_connect, + .listen = l2cap_sock_listen, + .accept = l2cap_sock_accept, + .getname = l2cap_sock_getname, + .sendmsg = l2cap_sock_sendmsg, + .recvmsg = l2cap_sock_recvmsg, + .poll = bt_sock_poll, + .ioctl = bt_sock_ioctl, + .gettstamp = sock_gettstamp, + .mmap = sock_no_mmap, + .socketpair = sock_no_socketpair, + .shutdown = l2cap_sock_shutdown, + .setsockopt = l2cap_sock_setsockopt, + .getsockopt = l2cap_sock_getsockopt +}; + +static const struct net_proto_family l2cap_sock_family_ops = { + .family = PF_BLUETOOTH, + .owner = THIS_MODULE, + .create = l2cap_sock_create, +}; + +int __init l2cap_init_sockets(void) +{ + int err; + + BUILD_BUG_ON(sizeof(struct sockaddr_l2) > sizeof(struct sockaddr)); + + err = proto_register(&l2cap_proto, 0); + if (err < 0) + return err; + + err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops); + if (err < 0) { + BT_ERR("L2CAP socket registration failed"); + goto error; + } + + err = bt_procfs_init(&init_net, "l2cap", &l2cap_sk_list, + NULL); + if (err < 0) { + BT_ERR("Failed to create L2CAP proc file"); + bt_sock_unregister(BTPROTO_L2CAP); + goto error; + } + + BT_INFO("L2CAP socket layer initialized"); + + return 0; + +error: + proto_unregister(&l2cap_proto); + return err; +} + +void l2cap_cleanup_sockets(void) +{ + bt_procfs_cleanup(&init_net, "l2cap"); + bt_sock_unregister(BTPROTO_L2CAP); + proto_unregister(&l2cap_proto); +} diff --git a/net/bluetooth/leds.c b/net/bluetooth/leds.c new file mode 100644 index 000000000..f46847632 --- /dev/null +++ b/net/bluetooth/leds.c @@ -0,0 +1,100 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright 2015, Heiner Kallweit <hkallweit1@gmail.com> + */ + +#include <net/bluetooth/bluetooth.h> +#include <net/bluetooth/hci_core.h> + +#include "leds.h" + +DEFINE_LED_TRIGGER(bt_power_led_trigger); + +struct hci_basic_led_trigger { + struct led_trigger led_trigger; + struct hci_dev *hdev; +}; + +#define to_hci_basic_led_trigger(arg) container_of(arg, \ + struct hci_basic_led_trigger, led_trigger) + +void hci_leds_update_powered(struct hci_dev *hdev, bool enabled) +{ + if (hdev->power_led) + led_trigger_event(hdev->power_led, + enabled ? LED_FULL : LED_OFF); + + if (!enabled) { + struct hci_dev *d; + + read_lock(&hci_dev_list_lock); + + list_for_each_entry(d, &hci_dev_list, list) { + if (test_bit(HCI_UP, &d->flags)) + enabled = true; + } + + read_unlock(&hci_dev_list_lock); + } + + led_trigger_event(bt_power_led_trigger, enabled ? LED_FULL : LED_OFF); +} + +static int power_activate(struct led_classdev *led_cdev) +{ + struct hci_basic_led_trigger *htrig; + bool powered; + + htrig = to_hci_basic_led_trigger(led_cdev->trigger); + powered = test_bit(HCI_UP, &htrig->hdev->flags); + + led_trigger_event(led_cdev->trigger, powered ? LED_FULL : LED_OFF); + + return 0; +} + +static struct led_trigger *led_allocate_basic(struct hci_dev *hdev, + int (*activate)(struct led_classdev *led_cdev), + const char *name) +{ + struct hci_basic_led_trigger *htrig; + + htrig = devm_kzalloc(&hdev->dev, sizeof(*htrig), GFP_KERNEL); + if (!htrig) + return NULL; + + htrig->hdev = hdev; + htrig->led_trigger.activate = activate; + htrig->led_trigger.name = devm_kasprintf(&hdev->dev, GFP_KERNEL, + "%s-%s", hdev->name, + name); + if (!htrig->led_trigger.name) + goto err_alloc; + + if (devm_led_trigger_register(&hdev->dev, &htrig->led_trigger)) + goto err_register; + + return &htrig->led_trigger; + +err_register: + devm_kfree(&hdev->dev, (void *)htrig->led_trigger.name); +err_alloc: + devm_kfree(&hdev->dev, htrig); + return NULL; +} + +void hci_leds_init(struct hci_dev *hdev) +{ + /* initialize power_led */ + hdev->power_led = led_allocate_basic(hdev, power_activate, "power"); +} + +void bt_leds_init(void) +{ + led_trigger_register_simple("bluetooth-power", &bt_power_led_trigger); +} + +void bt_leds_cleanup(void) +{ + led_trigger_unregister_simple(bt_power_led_trigger); +} diff --git a/net/bluetooth/leds.h b/net/bluetooth/leds.h new file mode 100644 index 000000000..bb5e09204 --- /dev/null +++ b/net/bluetooth/leds.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2015, Heiner Kallweit <hkallweit1@gmail.com> + */ + +#if IS_ENABLED(CONFIG_BT_LEDS) + +void hci_leds_update_powered(struct hci_dev *hdev, bool enabled); +void hci_leds_init(struct hci_dev *hdev); + +void bt_leds_init(void); +void bt_leds_cleanup(void); + +#else + +static inline void hci_leds_update_powered(struct hci_dev *hdev, + bool enabled) {} +static inline void hci_leds_init(struct hci_dev *hdev) {} + +static inline void bt_leds_init(void) {} +static inline void bt_leds_cleanup(void) {} + +#endif diff --git a/net/bluetooth/lib.c b/net/bluetooth/lib.c new file mode 100644 index 000000000..53a796ac0 --- /dev/null +++ b/net/bluetooth/lib.c @@ -0,0 +1,320 @@ +/* + BlueZ - Bluetooth protocol stack for Linux + Copyright (C) 2000-2001 Qualcomm Incorporated + + Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License version 2 as + published by the Free Software Foundation; + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + SOFTWARE IS DISCLAIMED. +*/ + +/* Bluetooth kernel library. */ + +#define pr_fmt(fmt) "Bluetooth: " fmt + +#include <linux/export.h> + +#include <net/bluetooth/bluetooth.h> + +void baswap(bdaddr_t *dst, const bdaddr_t *src) +{ + const unsigned char *s = (const unsigned char *)src; + unsigned char *d = (unsigned char *)dst; + unsigned int i; + + for (i = 0; i < 6; i++) + d[i] = s[5 - i]; +} +EXPORT_SYMBOL(baswap); + +/* Bluetooth error codes to Unix errno mapping */ +int bt_to_errno(__u16 code) +{ + switch (code) { + case 0: + return 0; + + case 0x01: + return EBADRQC; + + case 0x02: + return ENOTCONN; + + case 0x03: + return EIO; + + case 0x04: + case 0x3c: + return EHOSTDOWN; + + case 0x05: + return EACCES; + + case 0x06: + return EBADE; + + case 0x07: + return ENOMEM; + + case 0x08: + return ETIMEDOUT; + + case 0x09: + return EMLINK; + + case 0x0a: + return EMLINK; + + case 0x0b: + return EALREADY; + + case 0x0c: + return EBUSY; + + case 0x0d: + case 0x0e: + case 0x0f: + return ECONNREFUSED; + + case 0x10: + return ETIMEDOUT; + + case 0x11: + case 0x27: + case 0x29: + case 0x20: + return EOPNOTSUPP; + + case 0x12: + return EINVAL; + + case 0x13: + case 0x14: + case 0x15: + return ECONNRESET; + + case 0x16: + return ECONNABORTED; + + case 0x17: + return ELOOP; + + case 0x18: + return EACCES; + + case 0x1a: + return EPROTONOSUPPORT; + + case 0x1b: + return ECONNREFUSED; + + case 0x19: + case 0x1e: + case 0x23: + case 0x24: + case 0x25: + return EPROTO; + + default: + return ENOSYS; + } +} +EXPORT_SYMBOL(bt_to_errno); + +/* Unix errno to Bluetooth error codes mapping */ +__u8 bt_status(int err) +{ + /* Don't convert if already positive value */ + if (err >= 0) + return err; + + switch (err) { + case -EBADRQC: + return 0x01; + + case -ENOTCONN: + return 0x02; + + case -EIO: + return 0x03; + + case -EHOSTDOWN: + return 0x04; + + case -EACCES: + return 0x05; + + case -EBADE: + return 0x06; + + case -ENOMEM: + return 0x07; + + case -ETIMEDOUT: + return 0x08; + + case -EMLINK: + return 0x09; + + case -EALREADY: + return 0x0b; + + case -EBUSY: + return 0x0c; + + case -ECONNREFUSED: + return 0x0d; + + case -EOPNOTSUPP: + return 0x11; + + case -EINVAL: + return 0x12; + + case -ECONNRESET: + return 0x13; + + case -ECONNABORTED: + return 0x16; + + case -ELOOP: + return 0x17; + + case -EPROTONOSUPPORT: + return 0x1a; + + case -EPROTO: + return 0x19; + + default: + return 0x1f; + } +} +EXPORT_SYMBOL(bt_status); + +void bt_info(const char *format, ...) +{ + struct va_format vaf; + va_list args; + + va_start(args, format); + + vaf.fmt = format; + vaf.va = &args; + + pr_info("%pV", &vaf); + + va_end(args); +} +EXPORT_SYMBOL(bt_info); + +void bt_warn(const char *format, ...) +{ + struct va_format vaf; + va_list args; + + va_start(args, format); + + vaf.fmt = format; + vaf.va = &args; + + pr_warn("%pV", &vaf); + + va_end(args); +} +EXPORT_SYMBOL(bt_warn); + +void bt_err(const char *format, ...) +{ + struct va_format vaf; + va_list args; + + va_start(args, format); + + vaf.fmt = format; + vaf.va = &args; + + pr_err("%pV", &vaf); + + va_end(args); +} +EXPORT_SYMBOL(bt_err); + +#ifdef CONFIG_BT_FEATURE_DEBUG +static bool debug_enable; + +void bt_dbg_set(bool enable) +{ + debug_enable = enable; +} + +bool bt_dbg_get(void) +{ + return debug_enable; +} + +void bt_dbg(const char *format, ...) +{ + struct va_format vaf; + va_list args; + + if (likely(!debug_enable)) + return; + + va_start(args, format); + + vaf.fmt = format; + vaf.va = &args; + + printk(KERN_DEBUG pr_fmt("%pV"), &vaf); + + va_end(args); +} +EXPORT_SYMBOL(bt_dbg); +#endif + +void bt_warn_ratelimited(const char *format, ...) +{ + struct va_format vaf; + va_list args; + + va_start(args, format); + + vaf.fmt = format; + vaf.va = &args; + + pr_warn_ratelimited("%pV", &vaf); + + va_end(args); +} +EXPORT_SYMBOL(bt_warn_ratelimited); + +void bt_err_ratelimited(const char *format, ...) +{ + struct va_format vaf; + va_list args; + + va_start(args, format); + + vaf.fmt = format; + vaf.va = &args; + + pr_err_ratelimited("%pV", &vaf); + + va_end(args); +} +EXPORT_SYMBOL(bt_err_ratelimited); diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c new file mode 100644 index 000000000..6d631a2e6 --- /dev/null +++ b/net/bluetooth/mgmt.c @@ -0,0 +1,10571 @@ +/* + BlueZ - Bluetooth protocol stack for Linux + + Copyright (C) 2010 Nokia Corporation + Copyright (C) 2011-2012 Intel Corporation + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License version 2 as + published by the Free Software Foundation; + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + SOFTWARE IS DISCLAIMED. +*/ + +/* Bluetooth HCI Management interface */ + +#include <linux/module.h> +#include <asm/unaligned.h> + +#include <net/bluetooth/bluetooth.h> +#include <net/bluetooth/hci_core.h> +#include <net/bluetooth/hci_sock.h> +#include <net/bluetooth/l2cap.h> +#include <net/bluetooth/mgmt.h> + +#include "hci_request.h" +#include "smp.h" +#include "mgmt_util.h" +#include "mgmt_config.h" +#include "msft.h" +#include "eir.h" +#include "aosp.h" + +#define MGMT_VERSION 1 +#define MGMT_REVISION 22 + +static const u16 mgmt_commands[] = { + MGMT_OP_READ_INDEX_LIST, + MGMT_OP_READ_INFO, + MGMT_OP_SET_POWERED, + MGMT_OP_SET_DISCOVERABLE, + MGMT_OP_SET_CONNECTABLE, + MGMT_OP_SET_FAST_CONNECTABLE, + MGMT_OP_SET_BONDABLE, + MGMT_OP_SET_LINK_SECURITY, + MGMT_OP_SET_SSP, + MGMT_OP_SET_HS, + MGMT_OP_SET_LE, + MGMT_OP_SET_DEV_CLASS, + MGMT_OP_SET_LOCAL_NAME, + MGMT_OP_ADD_UUID, + MGMT_OP_REMOVE_UUID, + MGMT_OP_LOAD_LINK_KEYS, + MGMT_OP_LOAD_LONG_TERM_KEYS, + MGMT_OP_DISCONNECT, + MGMT_OP_GET_CONNECTIONS, + MGMT_OP_PIN_CODE_REPLY, + MGMT_OP_PIN_CODE_NEG_REPLY, + MGMT_OP_SET_IO_CAPABILITY, + MGMT_OP_PAIR_DEVICE, + MGMT_OP_CANCEL_PAIR_DEVICE, + MGMT_OP_UNPAIR_DEVICE, + MGMT_OP_USER_CONFIRM_REPLY, + MGMT_OP_USER_CONFIRM_NEG_REPLY, + MGMT_OP_USER_PASSKEY_REPLY, + MGMT_OP_USER_PASSKEY_NEG_REPLY, + MGMT_OP_READ_LOCAL_OOB_DATA, + MGMT_OP_ADD_REMOTE_OOB_DATA, + MGMT_OP_REMOVE_REMOTE_OOB_DATA, + MGMT_OP_START_DISCOVERY, + MGMT_OP_STOP_DISCOVERY, + MGMT_OP_CONFIRM_NAME, + MGMT_OP_BLOCK_DEVICE, + MGMT_OP_UNBLOCK_DEVICE, + MGMT_OP_SET_DEVICE_ID, + MGMT_OP_SET_ADVERTISING, + MGMT_OP_SET_BREDR, + MGMT_OP_SET_STATIC_ADDRESS, + MGMT_OP_SET_SCAN_PARAMS, + MGMT_OP_SET_SECURE_CONN, + MGMT_OP_SET_DEBUG_KEYS, + MGMT_OP_SET_PRIVACY, + MGMT_OP_LOAD_IRKS, + MGMT_OP_GET_CONN_INFO, + MGMT_OP_GET_CLOCK_INFO, + MGMT_OP_ADD_DEVICE, + MGMT_OP_REMOVE_DEVICE, + MGMT_OP_LOAD_CONN_PARAM, + MGMT_OP_READ_UNCONF_INDEX_LIST, + MGMT_OP_READ_CONFIG_INFO, + MGMT_OP_SET_EXTERNAL_CONFIG, + MGMT_OP_SET_PUBLIC_ADDRESS, + MGMT_OP_START_SERVICE_DISCOVERY, + MGMT_OP_READ_LOCAL_OOB_EXT_DATA, + MGMT_OP_READ_EXT_INDEX_LIST, + MGMT_OP_READ_ADV_FEATURES, + MGMT_OP_ADD_ADVERTISING, + MGMT_OP_REMOVE_ADVERTISING, + MGMT_OP_GET_ADV_SIZE_INFO, + MGMT_OP_START_LIMITED_DISCOVERY, + MGMT_OP_READ_EXT_INFO, + MGMT_OP_SET_APPEARANCE, + MGMT_OP_GET_PHY_CONFIGURATION, + MGMT_OP_SET_PHY_CONFIGURATION, + MGMT_OP_SET_BLOCKED_KEYS, + MGMT_OP_SET_WIDEBAND_SPEECH, + MGMT_OP_READ_CONTROLLER_CAP, + MGMT_OP_READ_EXP_FEATURES_INFO, + MGMT_OP_SET_EXP_FEATURE, + MGMT_OP_READ_DEF_SYSTEM_CONFIG, + MGMT_OP_SET_DEF_SYSTEM_CONFIG, + MGMT_OP_READ_DEF_RUNTIME_CONFIG, + MGMT_OP_SET_DEF_RUNTIME_CONFIG, + MGMT_OP_GET_DEVICE_FLAGS, + MGMT_OP_SET_DEVICE_FLAGS, + MGMT_OP_READ_ADV_MONITOR_FEATURES, + MGMT_OP_ADD_ADV_PATTERNS_MONITOR, + MGMT_OP_REMOVE_ADV_MONITOR, + MGMT_OP_ADD_EXT_ADV_PARAMS, + MGMT_OP_ADD_EXT_ADV_DATA, + MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, + MGMT_OP_SET_MESH_RECEIVER, + MGMT_OP_MESH_READ_FEATURES, + MGMT_OP_MESH_SEND, + MGMT_OP_MESH_SEND_CANCEL, +}; + +static const u16 mgmt_events[] = { + MGMT_EV_CONTROLLER_ERROR, + MGMT_EV_INDEX_ADDED, + MGMT_EV_INDEX_REMOVED, + MGMT_EV_NEW_SETTINGS, + MGMT_EV_CLASS_OF_DEV_CHANGED, + MGMT_EV_LOCAL_NAME_CHANGED, + MGMT_EV_NEW_LINK_KEY, + MGMT_EV_NEW_LONG_TERM_KEY, + MGMT_EV_DEVICE_CONNECTED, + MGMT_EV_DEVICE_DISCONNECTED, + MGMT_EV_CONNECT_FAILED, + MGMT_EV_PIN_CODE_REQUEST, + MGMT_EV_USER_CONFIRM_REQUEST, + MGMT_EV_USER_PASSKEY_REQUEST, + MGMT_EV_AUTH_FAILED, + MGMT_EV_DEVICE_FOUND, + MGMT_EV_DISCOVERING, + MGMT_EV_DEVICE_BLOCKED, + MGMT_EV_DEVICE_UNBLOCKED, + MGMT_EV_DEVICE_UNPAIRED, + MGMT_EV_PASSKEY_NOTIFY, + MGMT_EV_NEW_IRK, + MGMT_EV_NEW_CSRK, + MGMT_EV_DEVICE_ADDED, + MGMT_EV_DEVICE_REMOVED, + MGMT_EV_NEW_CONN_PARAM, + MGMT_EV_UNCONF_INDEX_ADDED, + MGMT_EV_UNCONF_INDEX_REMOVED, + MGMT_EV_NEW_CONFIG_OPTIONS, + MGMT_EV_EXT_INDEX_ADDED, + MGMT_EV_EXT_INDEX_REMOVED, + MGMT_EV_LOCAL_OOB_DATA_UPDATED, + MGMT_EV_ADVERTISING_ADDED, + MGMT_EV_ADVERTISING_REMOVED, + MGMT_EV_EXT_INFO_CHANGED, + MGMT_EV_PHY_CONFIGURATION_CHANGED, + MGMT_EV_EXP_FEATURE_CHANGED, + MGMT_EV_DEVICE_FLAGS_CHANGED, + MGMT_EV_ADV_MONITOR_ADDED, + MGMT_EV_ADV_MONITOR_REMOVED, + MGMT_EV_CONTROLLER_SUSPEND, + MGMT_EV_CONTROLLER_RESUME, + MGMT_EV_ADV_MONITOR_DEVICE_FOUND, + MGMT_EV_ADV_MONITOR_DEVICE_LOST, +}; + +static const u16 mgmt_untrusted_commands[] = { + MGMT_OP_READ_INDEX_LIST, + MGMT_OP_READ_INFO, + MGMT_OP_READ_UNCONF_INDEX_LIST, + MGMT_OP_READ_CONFIG_INFO, + MGMT_OP_READ_EXT_INDEX_LIST, + MGMT_OP_READ_EXT_INFO, + MGMT_OP_READ_CONTROLLER_CAP, + MGMT_OP_READ_EXP_FEATURES_INFO, + MGMT_OP_READ_DEF_SYSTEM_CONFIG, + MGMT_OP_READ_DEF_RUNTIME_CONFIG, +}; + +static const u16 mgmt_untrusted_events[] = { + MGMT_EV_INDEX_ADDED, + MGMT_EV_INDEX_REMOVED, + MGMT_EV_NEW_SETTINGS, + MGMT_EV_CLASS_OF_DEV_CHANGED, + MGMT_EV_LOCAL_NAME_CHANGED, + MGMT_EV_UNCONF_INDEX_ADDED, + MGMT_EV_UNCONF_INDEX_REMOVED, + MGMT_EV_NEW_CONFIG_OPTIONS, + MGMT_EV_EXT_INDEX_ADDED, + MGMT_EV_EXT_INDEX_REMOVED, + MGMT_EV_EXT_INFO_CHANGED, + MGMT_EV_EXP_FEATURE_CHANGED, +}; + +#define CACHE_TIMEOUT msecs_to_jiffies(2 * 1000) + +#define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \ + "\x00\x00\x00\x00\x00\x00\x00\x00" + +/* HCI to MGMT error code conversion table */ +static const u8 mgmt_status_table[] = { + MGMT_STATUS_SUCCESS, + MGMT_STATUS_UNKNOWN_COMMAND, /* Unknown Command */ + MGMT_STATUS_NOT_CONNECTED, /* No Connection */ + MGMT_STATUS_FAILED, /* Hardware Failure */ + MGMT_STATUS_CONNECT_FAILED, /* Page Timeout */ + MGMT_STATUS_AUTH_FAILED, /* Authentication Failed */ + MGMT_STATUS_AUTH_FAILED, /* PIN or Key Missing */ + MGMT_STATUS_NO_RESOURCES, /* Memory Full */ + MGMT_STATUS_TIMEOUT, /* Connection Timeout */ + MGMT_STATUS_NO_RESOURCES, /* Max Number of Connections */ + MGMT_STATUS_NO_RESOURCES, /* Max Number of SCO Connections */ + MGMT_STATUS_ALREADY_CONNECTED, /* ACL Connection Exists */ + MGMT_STATUS_BUSY, /* Command Disallowed */ + MGMT_STATUS_NO_RESOURCES, /* Rejected Limited Resources */ + MGMT_STATUS_REJECTED, /* Rejected Security */ + MGMT_STATUS_REJECTED, /* Rejected Personal */ + MGMT_STATUS_TIMEOUT, /* Host Timeout */ + MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Feature */ + MGMT_STATUS_INVALID_PARAMS, /* Invalid Parameters */ + MGMT_STATUS_DISCONNECTED, /* OE User Ended Connection */ + MGMT_STATUS_NO_RESOURCES, /* OE Low Resources */ + MGMT_STATUS_DISCONNECTED, /* OE Power Off */ + MGMT_STATUS_DISCONNECTED, /* Connection Terminated */ + MGMT_STATUS_BUSY, /* Repeated Attempts */ + MGMT_STATUS_REJECTED, /* Pairing Not Allowed */ + MGMT_STATUS_FAILED, /* Unknown LMP PDU */ + MGMT_STATUS_NOT_SUPPORTED, /* Unsupported Remote Feature */ + MGMT_STATUS_REJECTED, /* SCO Offset Rejected */ + MGMT_STATUS_REJECTED, /* SCO Interval Rejected */ + MGMT_STATUS_REJECTED, /* Air Mode Rejected */ + MGMT_STATUS_INVALID_PARAMS, /* Invalid LMP Parameters */ + MGMT_STATUS_FAILED, /* Unspecified Error */ + MGMT_STATUS_NOT_SUPPORTED, /* Unsupported LMP Parameter Value */ + MGMT_STATUS_FAILED, /* Role Change Not Allowed */ + MGMT_STATUS_TIMEOUT, /* LMP Response Timeout */ + MGMT_STATUS_FAILED, /* LMP Error Transaction Collision */ + MGMT_STATUS_FAILED, /* LMP PDU Not Allowed */ + MGMT_STATUS_REJECTED, /* Encryption Mode Not Accepted */ + MGMT_STATUS_FAILED, /* Unit Link Key Used */ + MGMT_STATUS_NOT_SUPPORTED, /* QoS Not Supported */ + MGMT_STATUS_TIMEOUT, /* Instant Passed */ + MGMT_STATUS_NOT_SUPPORTED, /* Pairing Not Supported */ + MGMT_STATUS_FAILED, /* Transaction Collision */ + MGMT_STATUS_FAILED, /* Reserved for future use */ + MGMT_STATUS_INVALID_PARAMS, /* Unacceptable Parameter */ + MGMT_STATUS_REJECTED, /* QoS Rejected */ + MGMT_STATUS_NOT_SUPPORTED, /* Classification Not Supported */ + MGMT_STATUS_REJECTED, /* Insufficient Security */ + MGMT_STATUS_INVALID_PARAMS, /* Parameter Out Of Range */ + MGMT_STATUS_FAILED, /* Reserved for future use */ + MGMT_STATUS_BUSY, /* Role Switch Pending */ + MGMT_STATUS_FAILED, /* Reserved for future use */ + MGMT_STATUS_FAILED, /* Slot Violation */ + MGMT_STATUS_FAILED, /* Role Switch Failed */ + MGMT_STATUS_INVALID_PARAMS, /* EIR Too Large */ + MGMT_STATUS_NOT_SUPPORTED, /* Simple Pairing Not Supported */ + MGMT_STATUS_BUSY, /* Host Busy Pairing */ + MGMT_STATUS_REJECTED, /* Rejected, No Suitable Channel */ + MGMT_STATUS_BUSY, /* Controller Busy */ + MGMT_STATUS_INVALID_PARAMS, /* Unsuitable Connection Interval */ + MGMT_STATUS_TIMEOUT, /* Directed Advertising Timeout */ + MGMT_STATUS_AUTH_FAILED, /* Terminated Due to MIC Failure */ + MGMT_STATUS_CONNECT_FAILED, /* Connection Establishment Failed */ + MGMT_STATUS_CONNECT_FAILED, /* MAC Connection Failed */ +}; + +static u8 mgmt_errno_status(int err) +{ + switch (err) { + case 0: + return MGMT_STATUS_SUCCESS; + case -EPERM: + return MGMT_STATUS_REJECTED; + case -EINVAL: + return MGMT_STATUS_INVALID_PARAMS; + case -EOPNOTSUPP: + return MGMT_STATUS_NOT_SUPPORTED; + case -EBUSY: + return MGMT_STATUS_BUSY; + case -ETIMEDOUT: + return MGMT_STATUS_AUTH_FAILED; + case -ENOMEM: + return MGMT_STATUS_NO_RESOURCES; + case -EISCONN: + return MGMT_STATUS_ALREADY_CONNECTED; + case -ENOTCONN: + return MGMT_STATUS_DISCONNECTED; + } + + return MGMT_STATUS_FAILED; +} + +static u8 mgmt_status(int err) +{ + if (err < 0) + return mgmt_errno_status(err); + + if (err < ARRAY_SIZE(mgmt_status_table)) + return mgmt_status_table[err]; + + return MGMT_STATUS_FAILED; +} + +static int mgmt_index_event(u16 event, struct hci_dev *hdev, void *data, + u16 len, int flag) +{ + return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len, + flag, NULL); +} + +static int mgmt_limited_event(u16 event, struct hci_dev *hdev, void *data, + u16 len, int flag, struct sock *skip_sk) +{ + return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len, + flag, skip_sk); +} + +static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len, + struct sock *skip_sk) +{ + return mgmt_send_event(event, hdev, HCI_CHANNEL_CONTROL, data, len, + HCI_SOCK_TRUSTED, skip_sk); +} + +static int mgmt_event_skb(struct sk_buff *skb, struct sock *skip_sk) +{ + return mgmt_send_event_skb(HCI_CHANNEL_CONTROL, skb, HCI_SOCK_TRUSTED, + skip_sk); +} + +static u8 le_addr_type(u8 mgmt_addr_type) +{ + if (mgmt_addr_type == BDADDR_LE_PUBLIC) + return ADDR_LE_DEV_PUBLIC; + else + return ADDR_LE_DEV_RANDOM; +} + +void mgmt_fill_version_info(void *ver) +{ + struct mgmt_rp_read_version *rp = ver; + + rp->version = MGMT_VERSION; + rp->revision = cpu_to_le16(MGMT_REVISION); +} + +static int read_version(struct sock *sk, struct hci_dev *hdev, void *data, + u16 data_len) +{ + struct mgmt_rp_read_version rp; + + bt_dev_dbg(hdev, "sock %p", sk); + + mgmt_fill_version_info(&rp); + + return mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_VERSION, 0, + &rp, sizeof(rp)); +} + +static int read_commands(struct sock *sk, struct hci_dev *hdev, void *data, + u16 data_len) +{ + struct mgmt_rp_read_commands *rp; + u16 num_commands, num_events; + size_t rp_size; + int i, err; + + bt_dev_dbg(hdev, "sock %p", sk); + + if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) { + num_commands = ARRAY_SIZE(mgmt_commands); + num_events = ARRAY_SIZE(mgmt_events); + } else { + num_commands = ARRAY_SIZE(mgmt_untrusted_commands); + num_events = ARRAY_SIZE(mgmt_untrusted_events); + } + + rp_size = sizeof(*rp) + ((num_commands + num_events) * sizeof(u16)); + + rp = kmalloc(rp_size, GFP_KERNEL); + if (!rp) + return -ENOMEM; + + rp->num_commands = cpu_to_le16(num_commands); + rp->num_events = cpu_to_le16(num_events); + + if (hci_sock_test_flag(sk, HCI_SOCK_TRUSTED)) { + __le16 *opcode = rp->opcodes; + + for (i = 0; i < num_commands; i++, opcode++) + put_unaligned_le16(mgmt_commands[i], opcode); + + for (i = 0; i < num_events; i++, opcode++) + put_unaligned_le16(mgmt_events[i], opcode); + } else { + __le16 *opcode = rp->opcodes; + + for (i = 0; i < num_commands; i++, opcode++) + put_unaligned_le16(mgmt_untrusted_commands[i], opcode); + + for (i = 0; i < num_events; i++, opcode++) + put_unaligned_le16(mgmt_untrusted_events[i], opcode); + } + + err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_COMMANDS, 0, + rp, rp_size); + kfree(rp); + + return err; +} + +static int read_index_list(struct sock *sk, struct hci_dev *hdev, void *data, + u16 data_len) +{ + struct mgmt_rp_read_index_list *rp; + struct hci_dev *d; + size_t rp_len; + u16 count; + int err; + + bt_dev_dbg(hdev, "sock %p", sk); + + read_lock(&hci_dev_list_lock); + + count = 0; + list_for_each_entry(d, &hci_dev_list, list) { + if (d->dev_type == HCI_PRIMARY && + !hci_dev_test_flag(d, HCI_UNCONFIGURED)) + count++; + } + + rp_len = sizeof(*rp) + (2 * count); + rp = kmalloc(rp_len, GFP_ATOMIC); + if (!rp) { + read_unlock(&hci_dev_list_lock); + return -ENOMEM; + } + + count = 0; + list_for_each_entry(d, &hci_dev_list, list) { + if (hci_dev_test_flag(d, HCI_SETUP) || + hci_dev_test_flag(d, HCI_CONFIG) || + hci_dev_test_flag(d, HCI_USER_CHANNEL)) + continue; + + /* Devices marked as raw-only are neither configured + * nor unconfigured controllers. + */ + if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks)) + continue; + + if (d->dev_type == HCI_PRIMARY && + !hci_dev_test_flag(d, HCI_UNCONFIGURED)) { + rp->index[count++] = cpu_to_le16(d->id); + bt_dev_dbg(hdev, "Added hci%u", d->id); + } + } + + rp->num_controllers = cpu_to_le16(count); + rp_len = sizeof(*rp) + (2 * count); + + read_unlock(&hci_dev_list_lock); + + err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, MGMT_OP_READ_INDEX_LIST, + 0, rp, rp_len); + + kfree(rp); + + return err; +} + +static int read_unconf_index_list(struct sock *sk, struct hci_dev *hdev, + void *data, u16 data_len) +{ + struct mgmt_rp_read_unconf_index_list *rp; + struct hci_dev *d; + size_t rp_len; + u16 count; + int err; + + bt_dev_dbg(hdev, "sock %p", sk); + + read_lock(&hci_dev_list_lock); + + count = 0; + list_for_each_entry(d, &hci_dev_list, list) { + if (d->dev_type == HCI_PRIMARY && + hci_dev_test_flag(d, HCI_UNCONFIGURED)) + count++; + } + + rp_len = sizeof(*rp) + (2 * count); + rp = kmalloc(rp_len, GFP_ATOMIC); + if (!rp) { + read_unlock(&hci_dev_list_lock); + return -ENOMEM; + } + + count = 0; + list_for_each_entry(d, &hci_dev_list, list) { + if (hci_dev_test_flag(d, HCI_SETUP) || + hci_dev_test_flag(d, HCI_CONFIG) || + hci_dev_test_flag(d, HCI_USER_CHANNEL)) + continue; + + /* Devices marked as raw-only are neither configured + * nor unconfigured controllers. + */ + if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks)) + continue; + + if (d->dev_type == HCI_PRIMARY && + hci_dev_test_flag(d, HCI_UNCONFIGURED)) { + rp->index[count++] = cpu_to_le16(d->id); + bt_dev_dbg(hdev, "Added hci%u", d->id); + } + } + + rp->num_controllers = cpu_to_le16(count); + rp_len = sizeof(*rp) + (2 * count); + + read_unlock(&hci_dev_list_lock); + + err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, + MGMT_OP_READ_UNCONF_INDEX_LIST, 0, rp, rp_len); + + kfree(rp); + + return err; +} + +static int read_ext_index_list(struct sock *sk, struct hci_dev *hdev, + void *data, u16 data_len) +{ + struct mgmt_rp_read_ext_index_list *rp; + struct hci_dev *d; + u16 count; + int err; + + bt_dev_dbg(hdev, "sock %p", sk); + + read_lock(&hci_dev_list_lock); + + count = 0; + list_for_each_entry(d, &hci_dev_list, list) { + if (d->dev_type == HCI_PRIMARY || d->dev_type == HCI_AMP) + count++; + } + + rp = kmalloc(struct_size(rp, entry, count), GFP_ATOMIC); + if (!rp) { + read_unlock(&hci_dev_list_lock); + return -ENOMEM; + } + + count = 0; + list_for_each_entry(d, &hci_dev_list, list) { + if (hci_dev_test_flag(d, HCI_SETUP) || + hci_dev_test_flag(d, HCI_CONFIG) || + hci_dev_test_flag(d, HCI_USER_CHANNEL)) + continue; + + /* Devices marked as raw-only are neither configured + * nor unconfigured controllers. + */ + if (test_bit(HCI_QUIRK_RAW_DEVICE, &d->quirks)) + continue; + + if (d->dev_type == HCI_PRIMARY) { + if (hci_dev_test_flag(d, HCI_UNCONFIGURED)) + rp->entry[count].type = 0x01; + else + rp->entry[count].type = 0x00; + } else if (d->dev_type == HCI_AMP) { + rp->entry[count].type = 0x02; + } else { + continue; + } + + rp->entry[count].bus = d->bus; + rp->entry[count++].index = cpu_to_le16(d->id); + bt_dev_dbg(hdev, "Added hci%u", d->id); + } + + rp->num_controllers = cpu_to_le16(count); + + read_unlock(&hci_dev_list_lock); + + /* If this command is called at least once, then all the + * default index and unconfigured index events are disabled + * and from now on only extended index events are used. + */ + hci_sock_set_flag(sk, HCI_MGMT_EXT_INDEX_EVENTS); + hci_sock_clear_flag(sk, HCI_MGMT_INDEX_EVENTS); + hci_sock_clear_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS); + + err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, + MGMT_OP_READ_EXT_INDEX_LIST, 0, rp, + struct_size(rp, entry, count)); + + kfree(rp); + + return err; +} + +static bool is_configured(struct hci_dev *hdev) +{ + if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) && + !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED)) + return false; + + if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) || + test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) && + !bacmp(&hdev->public_addr, BDADDR_ANY)) + return false; + + return true; +} + +static __le32 get_missing_options(struct hci_dev *hdev) +{ + u32 options = 0; + + if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) && + !hci_dev_test_flag(hdev, HCI_EXT_CONFIGURED)) + options |= MGMT_OPTION_EXTERNAL_CONFIG; + + if ((test_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks) || + test_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks)) && + !bacmp(&hdev->public_addr, BDADDR_ANY)) + options |= MGMT_OPTION_PUBLIC_ADDRESS; + + return cpu_to_le32(options); +} + +static int new_options(struct hci_dev *hdev, struct sock *skip) +{ + __le32 options = get_missing_options(hdev); + + return mgmt_limited_event(MGMT_EV_NEW_CONFIG_OPTIONS, hdev, &options, + sizeof(options), HCI_MGMT_OPTION_EVENTS, skip); +} + +static int send_options_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev) +{ + __le32 options = get_missing_options(hdev); + + return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &options, + sizeof(options)); +} + +static int read_config_info(struct sock *sk, struct hci_dev *hdev, + void *data, u16 data_len) +{ + struct mgmt_rp_read_config_info rp; + u32 options = 0; + + bt_dev_dbg(hdev, "sock %p", sk); + + hci_dev_lock(hdev); + + memset(&rp, 0, sizeof(rp)); + rp.manufacturer = cpu_to_le16(hdev->manufacturer); + + if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks)) + options |= MGMT_OPTION_EXTERNAL_CONFIG; + + if (hdev->set_bdaddr) + options |= MGMT_OPTION_PUBLIC_ADDRESS; + + rp.supported_options = cpu_to_le32(options); + rp.missing_options = get_missing_options(hdev); + + hci_dev_unlock(hdev); + + return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONFIG_INFO, 0, + &rp, sizeof(rp)); +} + +static u32 get_supported_phys(struct hci_dev *hdev) +{ + u32 supported_phys = 0; + + if (lmp_bredr_capable(hdev)) { + supported_phys |= MGMT_PHY_BR_1M_1SLOT; + + if (hdev->features[0][0] & LMP_3SLOT) + supported_phys |= MGMT_PHY_BR_1M_3SLOT; + + if (hdev->features[0][0] & LMP_5SLOT) + supported_phys |= MGMT_PHY_BR_1M_5SLOT; + + if (lmp_edr_2m_capable(hdev)) { + supported_phys |= MGMT_PHY_EDR_2M_1SLOT; + + if (lmp_edr_3slot_capable(hdev)) + supported_phys |= MGMT_PHY_EDR_2M_3SLOT; + + if (lmp_edr_5slot_capable(hdev)) + supported_phys |= MGMT_PHY_EDR_2M_5SLOT; + + if (lmp_edr_3m_capable(hdev)) { + supported_phys |= MGMT_PHY_EDR_3M_1SLOT; + + if (lmp_edr_3slot_capable(hdev)) + supported_phys |= MGMT_PHY_EDR_3M_3SLOT; + + if (lmp_edr_5slot_capable(hdev)) + supported_phys |= MGMT_PHY_EDR_3M_5SLOT; + } + } + } + + if (lmp_le_capable(hdev)) { + supported_phys |= MGMT_PHY_LE_1M_TX; + supported_phys |= MGMT_PHY_LE_1M_RX; + + if (hdev->le_features[1] & HCI_LE_PHY_2M) { + supported_phys |= MGMT_PHY_LE_2M_TX; + supported_phys |= MGMT_PHY_LE_2M_RX; + } + + if (hdev->le_features[1] & HCI_LE_PHY_CODED) { + supported_phys |= MGMT_PHY_LE_CODED_TX; + supported_phys |= MGMT_PHY_LE_CODED_RX; + } + } + + return supported_phys; +} + +static u32 get_selected_phys(struct hci_dev *hdev) +{ + u32 selected_phys = 0; + + if (lmp_bredr_capable(hdev)) { + selected_phys |= MGMT_PHY_BR_1M_1SLOT; + + if (hdev->pkt_type & (HCI_DM3 | HCI_DH3)) + selected_phys |= MGMT_PHY_BR_1M_3SLOT; + + if (hdev->pkt_type & (HCI_DM5 | HCI_DH5)) + selected_phys |= MGMT_PHY_BR_1M_5SLOT; + + if (lmp_edr_2m_capable(hdev)) { + if (!(hdev->pkt_type & HCI_2DH1)) + selected_phys |= MGMT_PHY_EDR_2M_1SLOT; + + if (lmp_edr_3slot_capable(hdev) && + !(hdev->pkt_type & HCI_2DH3)) + selected_phys |= MGMT_PHY_EDR_2M_3SLOT; + + if (lmp_edr_5slot_capable(hdev) && + !(hdev->pkt_type & HCI_2DH5)) + selected_phys |= MGMT_PHY_EDR_2M_5SLOT; + + if (lmp_edr_3m_capable(hdev)) { + if (!(hdev->pkt_type & HCI_3DH1)) + selected_phys |= MGMT_PHY_EDR_3M_1SLOT; + + if (lmp_edr_3slot_capable(hdev) && + !(hdev->pkt_type & HCI_3DH3)) + selected_phys |= MGMT_PHY_EDR_3M_3SLOT; + + if (lmp_edr_5slot_capable(hdev) && + !(hdev->pkt_type & HCI_3DH5)) + selected_phys |= MGMT_PHY_EDR_3M_5SLOT; + } + } + } + + if (lmp_le_capable(hdev)) { + if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_1M) + selected_phys |= MGMT_PHY_LE_1M_TX; + + if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_1M) + selected_phys |= MGMT_PHY_LE_1M_RX; + + if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_2M) + selected_phys |= MGMT_PHY_LE_2M_TX; + + if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_2M) + selected_phys |= MGMT_PHY_LE_2M_RX; + + if (hdev->le_tx_def_phys & HCI_LE_SET_PHY_CODED) + selected_phys |= MGMT_PHY_LE_CODED_TX; + + if (hdev->le_rx_def_phys & HCI_LE_SET_PHY_CODED) + selected_phys |= MGMT_PHY_LE_CODED_RX; + } + + return selected_phys; +} + +static u32 get_configurable_phys(struct hci_dev *hdev) +{ + return (get_supported_phys(hdev) & ~MGMT_PHY_BR_1M_1SLOT & + ~MGMT_PHY_LE_1M_TX & ~MGMT_PHY_LE_1M_RX); +} + +static u32 get_supported_settings(struct hci_dev *hdev) +{ + u32 settings = 0; + + settings |= MGMT_SETTING_POWERED; + settings |= MGMT_SETTING_BONDABLE; + settings |= MGMT_SETTING_DEBUG_KEYS; + settings |= MGMT_SETTING_CONNECTABLE; + settings |= MGMT_SETTING_DISCOVERABLE; + + if (lmp_bredr_capable(hdev)) { + if (hdev->hci_ver >= BLUETOOTH_VER_1_2) + settings |= MGMT_SETTING_FAST_CONNECTABLE; + settings |= MGMT_SETTING_BREDR; + settings |= MGMT_SETTING_LINK_SECURITY; + + if (lmp_ssp_capable(hdev)) { + settings |= MGMT_SETTING_SSP; + if (IS_ENABLED(CONFIG_BT_HS)) + settings |= MGMT_SETTING_HS; + } + + if (lmp_sc_capable(hdev)) + settings |= MGMT_SETTING_SECURE_CONN; + + if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, + &hdev->quirks)) + settings |= MGMT_SETTING_WIDEBAND_SPEECH; + } + + if (lmp_le_capable(hdev)) { + settings |= MGMT_SETTING_LE; + settings |= MGMT_SETTING_SECURE_CONN; + settings |= MGMT_SETTING_PRIVACY; + settings |= MGMT_SETTING_STATIC_ADDRESS; + settings |= MGMT_SETTING_ADVERTISING; + } + + if (test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks) || + hdev->set_bdaddr) + settings |= MGMT_SETTING_CONFIGURATION; + + if (cis_central_capable(hdev)) + settings |= MGMT_SETTING_CIS_CENTRAL; + + if (cis_peripheral_capable(hdev)) + settings |= MGMT_SETTING_CIS_PERIPHERAL; + + settings |= MGMT_SETTING_PHY_CONFIGURATION; + + return settings; +} + +static u32 get_current_settings(struct hci_dev *hdev) +{ + u32 settings = 0; + + if (hdev_is_powered(hdev)) + settings |= MGMT_SETTING_POWERED; + + if (hci_dev_test_flag(hdev, HCI_CONNECTABLE)) + settings |= MGMT_SETTING_CONNECTABLE; + + if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) + settings |= MGMT_SETTING_FAST_CONNECTABLE; + + if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) + settings |= MGMT_SETTING_DISCOVERABLE; + + if (hci_dev_test_flag(hdev, HCI_BONDABLE)) + settings |= MGMT_SETTING_BONDABLE; + + if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) + settings |= MGMT_SETTING_BREDR; + + if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) + settings |= MGMT_SETTING_LE; + + if (hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) + settings |= MGMT_SETTING_LINK_SECURITY; + + if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) + settings |= MGMT_SETTING_SSP; + + if (hci_dev_test_flag(hdev, HCI_HS_ENABLED)) + settings |= MGMT_SETTING_HS; + + if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) + settings |= MGMT_SETTING_ADVERTISING; + + if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) + settings |= MGMT_SETTING_SECURE_CONN; + + if (hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS)) + settings |= MGMT_SETTING_DEBUG_KEYS; + + if (hci_dev_test_flag(hdev, HCI_PRIVACY)) + settings |= MGMT_SETTING_PRIVACY; + + /* The current setting for static address has two purposes. The + * first is to indicate if the static address will be used and + * the second is to indicate if it is actually set. + * + * This means if the static address is not configured, this flag + * will never be set. If the address is configured, then if the + * address is actually used decides if the flag is set or not. + * + * For single mode LE only controllers and dual-mode controllers + * with BR/EDR disabled, the existence of the static address will + * be evaluated. + */ + if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) || + !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) || + !bacmp(&hdev->bdaddr, BDADDR_ANY)) { + if (bacmp(&hdev->static_addr, BDADDR_ANY)) + settings |= MGMT_SETTING_STATIC_ADDRESS; + } + + if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED)) + settings |= MGMT_SETTING_WIDEBAND_SPEECH; + + if (cis_central_capable(hdev)) + settings |= MGMT_SETTING_CIS_CENTRAL; + + if (cis_peripheral_capable(hdev)) + settings |= MGMT_SETTING_CIS_PERIPHERAL; + + return settings; +} + +static struct mgmt_pending_cmd *pending_find(u16 opcode, struct hci_dev *hdev) +{ + return mgmt_pending_find(HCI_CHANNEL_CONTROL, opcode, hdev); +} + +u8 mgmt_get_adv_discov_flags(struct hci_dev *hdev) +{ + struct mgmt_pending_cmd *cmd; + + /* If there's a pending mgmt command the flags will not yet have + * their final values, so check for this first. + */ + cmd = pending_find(MGMT_OP_SET_DISCOVERABLE, hdev); + if (cmd) { + struct mgmt_mode *cp = cmd->param; + if (cp->val == 0x01) + return LE_AD_GENERAL; + else if (cp->val == 0x02) + return LE_AD_LIMITED; + } else { + if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) + return LE_AD_LIMITED; + else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) + return LE_AD_GENERAL; + } + + return 0; +} + +bool mgmt_get_connectable(struct hci_dev *hdev) +{ + struct mgmt_pending_cmd *cmd; + + /* If there's a pending mgmt command the flag will not yet have + * it's final value, so check for this first. + */ + cmd = pending_find(MGMT_OP_SET_CONNECTABLE, hdev); + if (cmd) { + struct mgmt_mode *cp = cmd->param; + + return cp->val; + } + + return hci_dev_test_flag(hdev, HCI_CONNECTABLE); +} + +static int service_cache_sync(struct hci_dev *hdev, void *data) +{ + hci_update_eir_sync(hdev); + hci_update_class_sync(hdev); + + return 0; +} + +static void service_cache_off(struct work_struct *work) +{ + struct hci_dev *hdev = container_of(work, struct hci_dev, + service_cache.work); + + if (!hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) + return; + + hci_cmd_sync_queue(hdev, service_cache_sync, NULL, NULL); +} + +static int rpa_expired_sync(struct hci_dev *hdev, void *data) +{ + /* The generation of a new RPA and programming it into the + * controller happens in the hci_req_enable_advertising() + * function. + */ + if (ext_adv_capable(hdev)) + return hci_start_ext_adv_sync(hdev, hdev->cur_adv_instance); + else + return hci_enable_advertising_sync(hdev); +} + +static void rpa_expired(struct work_struct *work) +{ + struct hci_dev *hdev = container_of(work, struct hci_dev, + rpa_expired.work); + + bt_dev_dbg(hdev, ""); + + hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); + + if (!hci_dev_test_flag(hdev, HCI_ADVERTISING)) + return; + + hci_cmd_sync_queue(hdev, rpa_expired_sync, NULL, NULL); +} + +static void discov_off(struct work_struct *work) +{ + struct hci_dev *hdev = container_of(work, struct hci_dev, + discov_off.work); + + bt_dev_dbg(hdev, ""); + + hci_dev_lock(hdev); + + /* When discoverable timeout triggers, then just make sure + * the limited discoverable flag is cleared. Even in the case + * of a timeout triggered from general discoverable, it is + * safe to unconditionally clear the flag. + */ + hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); + hci_dev_clear_flag(hdev, HCI_DISCOVERABLE); + hdev->discov_timeout = 0; + + hci_update_discoverable(hdev); + + mgmt_new_settings(hdev); + + hci_dev_unlock(hdev); +} + +static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev); + +static void mesh_send_complete(struct hci_dev *hdev, + struct mgmt_mesh_tx *mesh_tx, bool silent) +{ + u8 handle = mesh_tx->handle; + + if (!silent) + mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, &handle, + sizeof(handle), NULL); + + mgmt_mesh_remove(mesh_tx); +} + +static int mesh_send_done_sync(struct hci_dev *hdev, void *data) +{ + struct mgmt_mesh_tx *mesh_tx; + + hci_dev_clear_flag(hdev, HCI_MESH_SENDING); + hci_disable_advertising_sync(hdev); + mesh_tx = mgmt_mesh_next(hdev, NULL); + + if (mesh_tx) + mesh_send_complete(hdev, mesh_tx, false); + + return 0; +} + +static int mesh_send_sync(struct hci_dev *hdev, void *data); +static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err); +static void mesh_next(struct hci_dev *hdev, void *data, int err) +{ + struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL); + + if (!mesh_tx) + return; + + err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx, + mesh_send_start_complete); + + if (err < 0) + mesh_send_complete(hdev, mesh_tx, false); + else + hci_dev_set_flag(hdev, HCI_MESH_SENDING); +} + +static void mesh_send_done(struct work_struct *work) +{ + struct hci_dev *hdev = container_of(work, struct hci_dev, + mesh_send_done.work); + + if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING)) + return; + + hci_cmd_sync_queue(hdev, mesh_send_done_sync, NULL, mesh_next); +} + +static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev) +{ + if (hci_dev_test_flag(hdev, HCI_MGMT)) + return; + + BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION); + + INIT_DELAYED_WORK(&hdev->discov_off, discov_off); + INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off); + INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired); + INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done); + + /* Non-mgmt controlled devices get this bit set + * implicitly so that pairing works for them, however + * for mgmt we require user-space to explicitly enable + * it + */ + hci_dev_clear_flag(hdev, HCI_BONDABLE); + + hci_dev_set_flag(hdev, HCI_MGMT); +} + +static int read_controller_info(struct sock *sk, struct hci_dev *hdev, + void *data, u16 data_len) +{ + struct mgmt_rp_read_info rp; + + bt_dev_dbg(hdev, "sock %p", sk); + + hci_dev_lock(hdev); + + memset(&rp, 0, sizeof(rp)); + + bacpy(&rp.bdaddr, &hdev->bdaddr); + + rp.version = hdev->hci_ver; + rp.manufacturer = cpu_to_le16(hdev->manufacturer); + + rp.supported_settings = cpu_to_le32(get_supported_settings(hdev)); + rp.current_settings = cpu_to_le32(get_current_settings(hdev)); + + memcpy(rp.dev_class, hdev->dev_class, 3); + + memcpy(rp.name, hdev->dev_name, sizeof(hdev->dev_name)); + memcpy(rp.short_name, hdev->short_name, sizeof(hdev->short_name)); + + hci_dev_unlock(hdev); + + return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_INFO, 0, &rp, + sizeof(rp)); +} + +static u16 append_eir_data_to_buf(struct hci_dev *hdev, u8 *eir) +{ + u16 eir_len = 0; + size_t name_len; + + if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) + eir_len = eir_append_data(eir, eir_len, EIR_CLASS_OF_DEV, + hdev->dev_class, 3); + + if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) + eir_len = eir_append_le16(eir, eir_len, EIR_APPEARANCE, + hdev->appearance); + + name_len = strnlen(hdev->dev_name, sizeof(hdev->dev_name)); + eir_len = eir_append_data(eir, eir_len, EIR_NAME_COMPLETE, + hdev->dev_name, name_len); + + name_len = strnlen(hdev->short_name, sizeof(hdev->short_name)); + eir_len = eir_append_data(eir, eir_len, EIR_NAME_SHORT, + hdev->short_name, name_len); + + return eir_len; +} + +static int read_ext_controller_info(struct sock *sk, struct hci_dev *hdev, + void *data, u16 data_len) +{ + char buf[512]; + struct mgmt_rp_read_ext_info *rp = (void *)buf; + u16 eir_len; + + bt_dev_dbg(hdev, "sock %p", sk); + + memset(&buf, 0, sizeof(buf)); + + hci_dev_lock(hdev); + + bacpy(&rp->bdaddr, &hdev->bdaddr); + + rp->version = hdev->hci_ver; + rp->manufacturer = cpu_to_le16(hdev->manufacturer); + + rp->supported_settings = cpu_to_le32(get_supported_settings(hdev)); + rp->current_settings = cpu_to_le32(get_current_settings(hdev)); + + + eir_len = append_eir_data_to_buf(hdev, rp->eir); + rp->eir_len = cpu_to_le16(eir_len); + + hci_dev_unlock(hdev); + + /* If this command is called at least once, then the events + * for class of device and local name changes are disabled + * and only the new extended controller information event + * is used. + */ + hci_sock_set_flag(sk, HCI_MGMT_EXT_INFO_EVENTS); + hci_sock_clear_flag(sk, HCI_MGMT_DEV_CLASS_EVENTS); + hci_sock_clear_flag(sk, HCI_MGMT_LOCAL_NAME_EVENTS); + + return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_EXT_INFO, 0, rp, + sizeof(*rp) + eir_len); +} + +static int ext_info_changed(struct hci_dev *hdev, struct sock *skip) +{ + char buf[512]; + struct mgmt_ev_ext_info_changed *ev = (void *)buf; + u16 eir_len; + + memset(buf, 0, sizeof(buf)); + + eir_len = append_eir_data_to_buf(hdev, ev->eir); + ev->eir_len = cpu_to_le16(eir_len); + + return mgmt_limited_event(MGMT_EV_EXT_INFO_CHANGED, hdev, ev, + sizeof(*ev) + eir_len, + HCI_MGMT_EXT_INFO_EVENTS, skip); +} + +static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev) +{ + __le32 settings = cpu_to_le32(get_current_settings(hdev)); + + return mgmt_cmd_complete(sk, hdev->id, opcode, 0, &settings, + sizeof(settings)); +} + +void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev, u8 instance) +{ + struct mgmt_ev_advertising_added ev; + + ev.instance = instance; + + mgmt_event(MGMT_EV_ADVERTISING_ADDED, hdev, &ev, sizeof(ev), sk); +} + +void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev, + u8 instance) +{ + struct mgmt_ev_advertising_removed ev; + + ev.instance = instance; + + mgmt_event(MGMT_EV_ADVERTISING_REMOVED, hdev, &ev, sizeof(ev), sk); +} + +static void cancel_adv_timeout(struct hci_dev *hdev) +{ + if (hdev->adv_instance_timeout) { + hdev->adv_instance_timeout = 0; + cancel_delayed_work(&hdev->adv_instance_expire); + } +} + +/* This function requires the caller holds hdev->lock */ +static void restart_le_actions(struct hci_dev *hdev) +{ + struct hci_conn_params *p; + + list_for_each_entry(p, &hdev->le_conn_params, list) { + /* Needed for AUTO_OFF case where might not "really" + * have been powered off. + */ + hci_pend_le_list_del_init(p); + + switch (p->auto_connect) { + case HCI_AUTO_CONN_DIRECT: + case HCI_AUTO_CONN_ALWAYS: + hci_pend_le_list_add(p, &hdev->pend_le_conns); + break; + case HCI_AUTO_CONN_REPORT: + hci_pend_le_list_add(p, &hdev->pend_le_reports); + break; + default: + break; + } + } +} + +static int new_settings(struct hci_dev *hdev, struct sock *skip) +{ + __le32 ev = cpu_to_le32(get_current_settings(hdev)); + + return mgmt_limited_event(MGMT_EV_NEW_SETTINGS, hdev, &ev, + sizeof(ev), HCI_MGMT_SETTING_EVENTS, skip); +} + +static void mgmt_set_powered_complete(struct hci_dev *hdev, void *data, int err) +{ + struct mgmt_pending_cmd *cmd = data; + struct mgmt_mode *cp; + + /* Make sure cmd still outstanding. */ + if (cmd != pending_find(MGMT_OP_SET_POWERED, hdev)) + return; + + cp = cmd->param; + + bt_dev_dbg(hdev, "err %d", err); + + if (!err) { + if (cp->val) { + hci_dev_lock(hdev); + restart_le_actions(hdev); + hci_update_passive_scan(hdev); + hci_dev_unlock(hdev); + } + + send_settings_rsp(cmd->sk, cmd->opcode, hdev); + + /* Only call new_setting for power on as power off is deferred + * to hdev->power_off work which does call hci_dev_do_close. + */ + if (cp->val) + new_settings(hdev, cmd->sk); + } else { + mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, + mgmt_status(err)); + } + + mgmt_pending_remove(cmd); +} + +static int set_powered_sync(struct hci_dev *hdev, void *data) +{ + struct mgmt_pending_cmd *cmd = data; + struct mgmt_mode *cp = cmd->param; + + BT_DBG("%s", hdev->name); + + return hci_set_powered_sync(hdev, cp->val); +} + +static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data, + u16 len) +{ + struct mgmt_mode *cp = data; + struct mgmt_pending_cmd *cmd; + int err; + + bt_dev_dbg(hdev, "sock %p", sk); + + if (cp->val != 0x00 && cp->val != 0x01) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED, + MGMT_STATUS_INVALID_PARAMS); + + hci_dev_lock(hdev); + + if (pending_find(MGMT_OP_SET_POWERED, hdev)) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_POWERED, + MGMT_STATUS_BUSY); + goto failed; + } + + if (!!cp->val == hdev_is_powered(hdev)) { + err = send_settings_rsp(sk, MGMT_OP_SET_POWERED, hdev); + goto failed; + } + + cmd = mgmt_pending_add(sk, MGMT_OP_SET_POWERED, hdev, data, len); + if (!cmd) { + err = -ENOMEM; + goto failed; + } + + err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd, + mgmt_set_powered_complete); + + if (err < 0) + mgmt_pending_remove(cmd); + +failed: + hci_dev_unlock(hdev); + return err; +} + +int mgmt_new_settings(struct hci_dev *hdev) +{ + return new_settings(hdev, NULL); +} + +struct cmd_lookup { + struct sock *sk; + struct hci_dev *hdev; + u8 mgmt_status; +}; + +static void settings_rsp(struct mgmt_pending_cmd *cmd, void *data) +{ + struct cmd_lookup *match = data; + + send_settings_rsp(cmd->sk, cmd->opcode, match->hdev); + + list_del(&cmd->list); + + if (match->sk == NULL) { + match->sk = cmd->sk; + sock_hold(match->sk); + } + + mgmt_pending_free(cmd); +} + +static void cmd_status_rsp(struct mgmt_pending_cmd *cmd, void *data) +{ + u8 *status = data; + + mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, *status); + mgmt_pending_remove(cmd); +} + +static void cmd_complete_rsp(struct mgmt_pending_cmd *cmd, void *data) +{ + if (cmd->cmd_complete) { + u8 *status = data; + + cmd->cmd_complete(cmd, *status); + mgmt_pending_remove(cmd); + + return; + } + + cmd_status_rsp(cmd, data); +} + +static int generic_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status) +{ + return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, + cmd->param, cmd->param_len); +} + +static int addr_cmd_complete(struct mgmt_pending_cmd *cmd, u8 status) +{ + return mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, + cmd->param, sizeof(struct mgmt_addr_info)); +} + +static u8 mgmt_bredr_support(struct hci_dev *hdev) +{ + if (!lmp_bredr_capable(hdev)) + return MGMT_STATUS_NOT_SUPPORTED; + else if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) + return MGMT_STATUS_REJECTED; + else + return MGMT_STATUS_SUCCESS; +} + +static u8 mgmt_le_support(struct hci_dev *hdev) +{ + if (!lmp_le_capable(hdev)) + return MGMT_STATUS_NOT_SUPPORTED; + else if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) + return MGMT_STATUS_REJECTED; + else + return MGMT_STATUS_SUCCESS; +} + +static void mgmt_set_discoverable_complete(struct hci_dev *hdev, void *data, + int err) +{ + struct mgmt_pending_cmd *cmd = data; + + bt_dev_dbg(hdev, "err %d", err); + + /* Make sure cmd still outstanding. */ + if (cmd != pending_find(MGMT_OP_SET_DISCOVERABLE, hdev)) + return; + + hci_dev_lock(hdev); + + if (err) { + u8 mgmt_err = mgmt_status(err); + mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err); + hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); + goto done; + } + + if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE) && + hdev->discov_timeout > 0) { + int to = msecs_to_jiffies(hdev->discov_timeout * 1000); + queue_delayed_work(hdev->req_workqueue, &hdev->discov_off, to); + } + + send_settings_rsp(cmd->sk, MGMT_OP_SET_DISCOVERABLE, hdev); + new_settings(hdev, cmd->sk); + +done: + mgmt_pending_remove(cmd); + hci_dev_unlock(hdev); +} + +static int set_discoverable_sync(struct hci_dev *hdev, void *data) +{ + BT_DBG("%s", hdev->name); + + return hci_update_discoverable_sync(hdev); +} + +static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data, + u16 len) +{ + struct mgmt_cp_set_discoverable *cp = data; + struct mgmt_pending_cmd *cmd; + u16 timeout; + int err; + + bt_dev_dbg(hdev, "sock %p", sk); + + if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) && + !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, + MGMT_STATUS_REJECTED); + + if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, + MGMT_STATUS_INVALID_PARAMS); + + timeout = __le16_to_cpu(cp->timeout); + + /* Disabling discoverable requires that no timeout is set, + * and enabling limited discoverable requires a timeout. + */ + if ((cp->val == 0x00 && timeout > 0) || + (cp->val == 0x02 && timeout == 0)) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, + MGMT_STATUS_INVALID_PARAMS); + + hci_dev_lock(hdev); + + if (!hdev_is_powered(hdev) && timeout > 0) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, + MGMT_STATUS_NOT_POWERED); + goto failed; + } + + if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) || + pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, + MGMT_STATUS_BUSY); + goto failed; + } + + if (!hci_dev_test_flag(hdev, HCI_CONNECTABLE)) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, + MGMT_STATUS_REJECTED); + goto failed; + } + + if (hdev->advertising_paused) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE, + MGMT_STATUS_BUSY); + goto failed; + } + + if (!hdev_is_powered(hdev)) { + bool changed = false; + + /* Setting limited discoverable when powered off is + * not a valid operation since it requires a timeout + * and so no need to check HCI_LIMITED_DISCOVERABLE. + */ + if (!!cp->val != hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) { + hci_dev_change_flag(hdev, HCI_DISCOVERABLE); + changed = true; + } + + err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev); + if (err < 0) + goto failed; + + if (changed) + err = new_settings(hdev, sk); + + goto failed; + } + + /* If the current mode is the same, then just update the timeout + * value with the new value. And if only the timeout gets updated, + * then no need for any HCI transactions. + */ + if (!!cp->val == hci_dev_test_flag(hdev, HCI_DISCOVERABLE) && + (cp->val == 0x02) == hci_dev_test_flag(hdev, + HCI_LIMITED_DISCOVERABLE)) { + cancel_delayed_work(&hdev->discov_off); + hdev->discov_timeout = timeout; + + if (cp->val && hdev->discov_timeout > 0) { + int to = msecs_to_jiffies(hdev->discov_timeout * 1000); + queue_delayed_work(hdev->req_workqueue, + &hdev->discov_off, to); + } + + err = send_settings_rsp(sk, MGMT_OP_SET_DISCOVERABLE, hdev); + goto failed; + } + + cmd = mgmt_pending_add(sk, MGMT_OP_SET_DISCOVERABLE, hdev, data, len); + if (!cmd) { + err = -ENOMEM; + goto failed; + } + + /* Cancel any potential discoverable timeout that might be + * still active and store new timeout value. The arming of + * the timeout happens in the complete handler. + */ + cancel_delayed_work(&hdev->discov_off); + hdev->discov_timeout = timeout; + + if (cp->val) + hci_dev_set_flag(hdev, HCI_DISCOVERABLE); + else + hci_dev_clear_flag(hdev, HCI_DISCOVERABLE); + + /* Limited discoverable mode */ + if (cp->val == 0x02) + hci_dev_set_flag(hdev, HCI_LIMITED_DISCOVERABLE); + else + hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); + + err = hci_cmd_sync_queue(hdev, set_discoverable_sync, cmd, + mgmt_set_discoverable_complete); + + if (err < 0) + mgmt_pending_remove(cmd); + +failed: + hci_dev_unlock(hdev); + return err; +} + +static void mgmt_set_connectable_complete(struct hci_dev *hdev, void *data, + int err) +{ + struct mgmt_pending_cmd *cmd = data; + + bt_dev_dbg(hdev, "err %d", err); + + /* Make sure cmd still outstanding. */ + if (cmd != pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) + return; + + hci_dev_lock(hdev); + + if (err) { + u8 mgmt_err = mgmt_status(err); + mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err); + goto done; + } + + send_settings_rsp(cmd->sk, MGMT_OP_SET_CONNECTABLE, hdev); + new_settings(hdev, cmd->sk); + +done: + if (cmd) + mgmt_pending_remove(cmd); + + hci_dev_unlock(hdev); +} + +static int set_connectable_update_settings(struct hci_dev *hdev, + struct sock *sk, u8 val) +{ + bool changed = false; + int err; + + if (!!val != hci_dev_test_flag(hdev, HCI_CONNECTABLE)) + changed = true; + + if (val) { + hci_dev_set_flag(hdev, HCI_CONNECTABLE); + } else { + hci_dev_clear_flag(hdev, HCI_CONNECTABLE); + hci_dev_clear_flag(hdev, HCI_DISCOVERABLE); + } + + err = send_settings_rsp(sk, MGMT_OP_SET_CONNECTABLE, hdev); + if (err < 0) + return err; + + if (changed) { + hci_update_scan(hdev); + hci_update_passive_scan(hdev); + return new_settings(hdev, sk); + } + + return 0; +} + +static int set_connectable_sync(struct hci_dev *hdev, void *data) +{ + BT_DBG("%s", hdev->name); + + return hci_update_connectable_sync(hdev); +} + +static int set_connectable(struct sock *sk, struct hci_dev *hdev, void *data, + u16 len) +{ + struct mgmt_mode *cp = data; + struct mgmt_pending_cmd *cmd; + int err; + + bt_dev_dbg(hdev, "sock %p", sk); + + if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) && + !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE, + MGMT_STATUS_REJECTED); + + if (cp->val != 0x00 && cp->val != 0x01) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE, + MGMT_STATUS_INVALID_PARAMS); + + hci_dev_lock(hdev); + + if (!hdev_is_powered(hdev)) { + err = set_connectable_update_settings(hdev, sk, cp->val); + goto failed; + } + + if (pending_find(MGMT_OP_SET_DISCOVERABLE, hdev) || + pending_find(MGMT_OP_SET_CONNECTABLE, hdev)) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_CONNECTABLE, + MGMT_STATUS_BUSY); + goto failed; + } + + cmd = mgmt_pending_add(sk, MGMT_OP_SET_CONNECTABLE, hdev, data, len); + if (!cmd) { + err = -ENOMEM; + goto failed; + } + + if (cp->val) { + hci_dev_set_flag(hdev, HCI_CONNECTABLE); + } else { + if (hdev->discov_timeout > 0) + cancel_delayed_work(&hdev->discov_off); + + hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); + hci_dev_clear_flag(hdev, HCI_DISCOVERABLE); + hci_dev_clear_flag(hdev, HCI_CONNECTABLE); + } + + err = hci_cmd_sync_queue(hdev, set_connectable_sync, cmd, + mgmt_set_connectable_complete); + + if (err < 0) + mgmt_pending_remove(cmd); + +failed: + hci_dev_unlock(hdev); + return err; +} + +static int set_bondable(struct sock *sk, struct hci_dev *hdev, void *data, + u16 len) +{ + struct mgmt_mode *cp = data; + bool changed; + int err; + + bt_dev_dbg(hdev, "sock %p", sk); + + if (cp->val != 0x00 && cp->val != 0x01) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BONDABLE, + MGMT_STATUS_INVALID_PARAMS); + + hci_dev_lock(hdev); + + if (cp->val) + changed = !hci_dev_test_and_set_flag(hdev, HCI_BONDABLE); + else + changed = hci_dev_test_and_clear_flag(hdev, HCI_BONDABLE); + + err = send_settings_rsp(sk, MGMT_OP_SET_BONDABLE, hdev); + if (err < 0) + goto unlock; + + if (changed) { + /* In limited privacy mode the change of bondable mode + * may affect the local advertising address. + */ + hci_update_discoverable(hdev); + + err = new_settings(hdev, sk); + } + +unlock: + hci_dev_unlock(hdev); + return err; +} + +static int set_link_security(struct sock *sk, struct hci_dev *hdev, void *data, + u16 len) +{ + struct mgmt_mode *cp = data; + struct mgmt_pending_cmd *cmd; + u8 val, status; + int err; + + bt_dev_dbg(hdev, "sock %p", sk); + + status = mgmt_bredr_support(hdev); + if (status) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY, + status); + + if (cp->val != 0x00 && cp->val != 0x01) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY, + MGMT_STATUS_INVALID_PARAMS); + + hci_dev_lock(hdev); + + if (!hdev_is_powered(hdev)) { + bool changed = false; + + if (!!cp->val != hci_dev_test_flag(hdev, HCI_LINK_SECURITY)) { + hci_dev_change_flag(hdev, HCI_LINK_SECURITY); + changed = true; + } + + err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev); + if (err < 0) + goto failed; + + if (changed) + err = new_settings(hdev, sk); + + goto failed; + } + + if (pending_find(MGMT_OP_SET_LINK_SECURITY, hdev)) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LINK_SECURITY, + MGMT_STATUS_BUSY); + goto failed; + } + + val = !!cp->val; + + if (test_bit(HCI_AUTH, &hdev->flags) == val) { + err = send_settings_rsp(sk, MGMT_OP_SET_LINK_SECURITY, hdev); + goto failed; + } + + cmd = mgmt_pending_add(sk, MGMT_OP_SET_LINK_SECURITY, hdev, data, len); + if (!cmd) { + err = -ENOMEM; + goto failed; + } + + err = hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, sizeof(val), &val); + if (err < 0) { + mgmt_pending_remove(cmd); + goto failed; + } + +failed: + hci_dev_unlock(hdev); + return err; +} + +static void set_ssp_complete(struct hci_dev *hdev, void *data, int err) +{ + struct cmd_lookup match = { NULL, hdev }; + struct mgmt_pending_cmd *cmd = data; + struct mgmt_mode *cp = cmd->param; + u8 enable = cp->val; + bool changed; + + /* Make sure cmd still outstanding. */ + if (cmd != pending_find(MGMT_OP_SET_SSP, hdev)) + return; + + if (err) { + u8 mgmt_err = mgmt_status(err); + + if (enable && hci_dev_test_and_clear_flag(hdev, + HCI_SSP_ENABLED)) { + hci_dev_clear_flag(hdev, HCI_HS_ENABLED); + new_settings(hdev, NULL); + } + + mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, cmd_status_rsp, + &mgmt_err); + return; + } + + if (enable) { + changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED); + } else { + changed = hci_dev_test_and_clear_flag(hdev, HCI_SSP_ENABLED); + + if (!changed) + changed = hci_dev_test_and_clear_flag(hdev, + HCI_HS_ENABLED); + else + hci_dev_clear_flag(hdev, HCI_HS_ENABLED); + } + + mgmt_pending_foreach(MGMT_OP_SET_SSP, hdev, settings_rsp, &match); + + if (changed) + new_settings(hdev, match.sk); + + if (match.sk) + sock_put(match.sk); + + hci_update_eir_sync(hdev); +} + +static int set_ssp_sync(struct hci_dev *hdev, void *data) +{ + struct mgmt_pending_cmd *cmd = data; + struct mgmt_mode *cp = cmd->param; + bool changed = false; + int err; + + if (cp->val) + changed = !hci_dev_test_and_set_flag(hdev, HCI_SSP_ENABLED); + + err = hci_write_ssp_mode_sync(hdev, cp->val); + + if (!err && changed) + hci_dev_clear_flag(hdev, HCI_SSP_ENABLED); + + return err; +} + +static int set_ssp(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) +{ + struct mgmt_mode *cp = data; + struct mgmt_pending_cmd *cmd; + u8 status; + int err; + + bt_dev_dbg(hdev, "sock %p", sk); + + status = mgmt_bredr_support(hdev); + if (status) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, status); + + if (!lmp_ssp_capable(hdev)) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, + MGMT_STATUS_NOT_SUPPORTED); + + if (cp->val != 0x00 && cp->val != 0x01) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, + MGMT_STATUS_INVALID_PARAMS); + + hci_dev_lock(hdev); + + if (!hdev_is_powered(hdev)) { + bool changed; + + if (cp->val) { + changed = !hci_dev_test_and_set_flag(hdev, + HCI_SSP_ENABLED); + } else { + changed = hci_dev_test_and_clear_flag(hdev, + HCI_SSP_ENABLED); + if (!changed) + changed = hci_dev_test_and_clear_flag(hdev, + HCI_HS_ENABLED); + else + hci_dev_clear_flag(hdev, HCI_HS_ENABLED); + } + + err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev); + if (err < 0) + goto failed; + + if (changed) + err = new_settings(hdev, sk); + + goto failed; + } + + if (pending_find(MGMT_OP_SET_SSP, hdev)) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, + MGMT_STATUS_BUSY); + goto failed; + } + + if (!!cp->val == hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) { + err = send_settings_rsp(sk, MGMT_OP_SET_SSP, hdev); + goto failed; + } + + cmd = mgmt_pending_add(sk, MGMT_OP_SET_SSP, hdev, data, len); + if (!cmd) + err = -ENOMEM; + else + err = hci_cmd_sync_queue(hdev, set_ssp_sync, cmd, + set_ssp_complete); + + if (err < 0) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SSP, + MGMT_STATUS_FAILED); + + if (cmd) + mgmt_pending_remove(cmd); + } + +failed: + hci_dev_unlock(hdev); + return err; +} + +static int set_hs(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) +{ + struct mgmt_mode *cp = data; + bool changed; + u8 status; + int err; + + bt_dev_dbg(hdev, "sock %p", sk); + + if (!IS_ENABLED(CONFIG_BT_HS)) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, + MGMT_STATUS_NOT_SUPPORTED); + + status = mgmt_bredr_support(hdev); + if (status) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, status); + + if (!lmp_ssp_capable(hdev)) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, + MGMT_STATUS_NOT_SUPPORTED); + + if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, + MGMT_STATUS_REJECTED); + + if (cp->val != 0x00 && cp->val != 0x01) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, + MGMT_STATUS_INVALID_PARAMS); + + hci_dev_lock(hdev); + + if (pending_find(MGMT_OP_SET_SSP, hdev)) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, + MGMT_STATUS_BUSY); + goto unlock; + } + + if (cp->val) { + changed = !hci_dev_test_and_set_flag(hdev, HCI_HS_ENABLED); + } else { + if (hdev_is_powered(hdev)) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_HS, + MGMT_STATUS_REJECTED); + goto unlock; + } + + changed = hci_dev_test_and_clear_flag(hdev, HCI_HS_ENABLED); + } + + err = send_settings_rsp(sk, MGMT_OP_SET_HS, hdev); + if (err < 0) + goto unlock; + + if (changed) + err = new_settings(hdev, sk); + +unlock: + hci_dev_unlock(hdev); + return err; +} + +static void set_le_complete(struct hci_dev *hdev, void *data, int err) +{ + struct cmd_lookup match = { NULL, hdev }; + u8 status = mgmt_status(err); + + bt_dev_dbg(hdev, "err %d", err); + + if (status) { + mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, cmd_status_rsp, + &status); + return; + } + + mgmt_pending_foreach(MGMT_OP_SET_LE, hdev, settings_rsp, &match); + + new_settings(hdev, match.sk); + + if (match.sk) + sock_put(match.sk); +} + +static int set_le_sync(struct hci_dev *hdev, void *data) +{ + struct mgmt_pending_cmd *cmd = data; + struct mgmt_mode *cp = cmd->param; + u8 val = !!cp->val; + int err; + + if (!val) { + hci_clear_adv_instance_sync(hdev, NULL, 0x00, true); + + if (hci_dev_test_flag(hdev, HCI_LE_ADV)) + hci_disable_advertising_sync(hdev); + + if (ext_adv_capable(hdev)) + hci_remove_ext_adv_instance_sync(hdev, 0, cmd->sk); + } else { + hci_dev_set_flag(hdev, HCI_LE_ENABLED); + } + + err = hci_write_le_host_supported_sync(hdev, val, 0); + + /* Make sure the controller has a good default for + * advertising data. Restrict the update to when LE + * has actually been enabled. During power on, the + * update in powered_update_hci will take care of it. + */ + if (!err && hci_dev_test_flag(hdev, HCI_LE_ENABLED)) { + if (ext_adv_capable(hdev)) { + int status; + + status = hci_setup_ext_adv_instance_sync(hdev, 0x00); + if (!status) + hci_update_scan_rsp_data_sync(hdev, 0x00); + } else { + hci_update_adv_data_sync(hdev, 0x00); + hci_update_scan_rsp_data_sync(hdev, 0x00); + } + + hci_update_passive_scan(hdev); + } + + return err; +} + +static void set_mesh_complete(struct hci_dev *hdev, void *data, int err) +{ + struct mgmt_pending_cmd *cmd = data; + u8 status = mgmt_status(err); + struct sock *sk = cmd->sk; + + if (status) { + mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev, + cmd_status_rsp, &status); + return; + } + + mgmt_pending_remove(cmd); + mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0); +} + +static int set_mesh_sync(struct hci_dev *hdev, void *data) +{ + struct mgmt_pending_cmd *cmd = data; + struct mgmt_cp_set_mesh *cp = cmd->param; + size_t len = cmd->param_len; + + memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types)); + + if (cp->enable) + hci_dev_set_flag(hdev, HCI_MESH); + else + hci_dev_clear_flag(hdev, HCI_MESH); + + len -= sizeof(*cp); + + /* If filters don't fit, forward all adv pkts */ + if (len <= sizeof(hdev->mesh_ad_types)) + memcpy(hdev->mesh_ad_types, cp->ad_types, len); + + hci_update_passive_scan_sync(hdev); + return 0; +} + +static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) +{ + struct mgmt_cp_set_mesh *cp = data; + struct mgmt_pending_cmd *cmd; + int err = 0; + + bt_dev_dbg(hdev, "sock %p", sk); + + if (!lmp_le_capable(hdev) || + !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL)) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, + MGMT_STATUS_NOT_SUPPORTED); + + if (cp->enable != 0x00 && cp->enable != 0x01) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, + MGMT_STATUS_INVALID_PARAMS); + + hci_dev_lock(hdev); + + cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len); + if (!cmd) + err = -ENOMEM; + else + err = hci_cmd_sync_queue(hdev, set_mesh_sync, cmd, + set_mesh_complete); + + if (err < 0) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, + MGMT_STATUS_FAILED); + + if (cmd) + mgmt_pending_remove(cmd); + } + + hci_dev_unlock(hdev); + return err; +} + +static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err) +{ + struct mgmt_mesh_tx *mesh_tx = data; + struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param; + unsigned long mesh_send_interval; + u8 mgmt_err = mgmt_status(err); + + /* Report any errors here, but don't report completion */ + + if (mgmt_err) { + hci_dev_clear_flag(hdev, HCI_MESH_SENDING); + /* Send Complete Error Code for handle */ + mesh_send_complete(hdev, mesh_tx, false); + return; + } + + mesh_send_interval = msecs_to_jiffies((send->cnt) * 25); + queue_delayed_work(hdev->req_workqueue, &hdev->mesh_send_done, + mesh_send_interval); +} + +static int mesh_send_sync(struct hci_dev *hdev, void *data) +{ + struct mgmt_mesh_tx *mesh_tx = data; + struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param; + struct adv_info *adv, *next_instance; + u8 instance = hdev->le_num_of_adv_sets + 1; + u16 timeout, duration; + int err = 0; + + if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt) + return MGMT_STATUS_BUSY; + + timeout = 1000; + duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval); + adv = hci_add_adv_instance(hdev, instance, 0, + send->adv_data_len, send->adv_data, + 0, NULL, + timeout, duration, + HCI_ADV_TX_POWER_NO_PREFERENCE, + hdev->le_adv_min_interval, + hdev->le_adv_max_interval, + mesh_tx->handle); + + if (!IS_ERR(adv)) + mesh_tx->instance = instance; + else + err = PTR_ERR(adv); + + if (hdev->cur_adv_instance == instance) { + /* If the currently advertised instance is being changed then + * cancel the current advertising and schedule the next + * instance. If there is only one instance then the overridden + * advertising data will be visible right away. + */ + cancel_adv_timeout(hdev); + + next_instance = hci_get_next_instance(hdev, instance); + if (next_instance) + instance = next_instance->instance; + else + instance = 0; + } else if (hdev->adv_instance_timeout) { + /* Immediately advertise the new instance if no other, or + * let it go naturally from queue if ADV is already happening + */ + instance = 0; + } + + if (instance) + return hci_schedule_adv_instance_sync(hdev, instance, true); + + return err; +} + +static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data) +{ + struct mgmt_rp_mesh_read_features *rp = data; + + if (rp->used_handles >= rp->max_handles) + return; + + rp->handles[rp->used_handles++] = mesh_tx->handle; +} + +static int mesh_features(struct sock *sk, struct hci_dev *hdev, + void *data, u16 len) +{ + struct mgmt_rp_mesh_read_features rp; + + if (!lmp_le_capable(hdev) || + !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL)) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, + MGMT_STATUS_NOT_SUPPORTED); + + memset(&rp, 0, sizeof(rp)); + rp.index = cpu_to_le16(hdev->id); + if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) + rp.max_handles = MESH_HANDLES_MAX; + + hci_dev_lock(hdev); + + if (rp.max_handles) + mgmt_mesh_foreach(hdev, send_count, &rp, sk); + + mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, 0, &rp, + rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX); + + hci_dev_unlock(hdev); + return 0; +} + +static int send_cancel(struct hci_dev *hdev, void *data) +{ + struct mgmt_pending_cmd *cmd = data; + struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param; + struct mgmt_mesh_tx *mesh_tx; + + if (!cancel->handle) { + do { + mesh_tx = mgmt_mesh_next(hdev, cmd->sk); + + if (mesh_tx) + mesh_send_complete(hdev, mesh_tx, false); + } while (mesh_tx); + } else { + mesh_tx = mgmt_mesh_find(hdev, cancel->handle); + + if (mesh_tx && mesh_tx->sk == cmd->sk) + mesh_send_complete(hdev, mesh_tx, false); + } + + mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL, + 0, NULL, 0); + mgmt_pending_free(cmd); + + return 0; +} + +static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev, + void *data, u16 len) +{ + struct mgmt_pending_cmd *cmd; + int err; + + if (!lmp_le_capable(hdev) || + !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL)) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL, + MGMT_STATUS_NOT_SUPPORTED); + + if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL, + MGMT_STATUS_REJECTED); + + hci_dev_lock(hdev); + cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len); + if (!cmd) + err = -ENOMEM; + else + err = hci_cmd_sync_queue(hdev, send_cancel, cmd, NULL); + + if (err < 0) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL, + MGMT_STATUS_FAILED); + + if (cmd) + mgmt_pending_free(cmd); + } + + hci_dev_unlock(hdev); + return err; +} + +static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) +{ + struct mgmt_mesh_tx *mesh_tx; + struct mgmt_cp_mesh_send *send = data; + struct mgmt_rp_mesh_read_features rp; + bool sending; + int err = 0; + + if (!lmp_le_capable(hdev) || + !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL)) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND, + MGMT_STATUS_NOT_SUPPORTED); + if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) || + len <= MGMT_MESH_SEND_SIZE || + len > (MGMT_MESH_SEND_SIZE + 31)) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND, + MGMT_STATUS_REJECTED); + + hci_dev_lock(hdev); + + memset(&rp, 0, sizeof(rp)); + rp.max_handles = MESH_HANDLES_MAX; + + mgmt_mesh_foreach(hdev, send_count, &rp, sk); + + if (rp.max_handles <= rp.used_handles) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND, + MGMT_STATUS_BUSY); + goto done; + } + + sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING); + mesh_tx = mgmt_mesh_add(sk, hdev, send, len); + + if (!mesh_tx) + err = -ENOMEM; + else if (!sending) + err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx, + mesh_send_start_complete); + + if (err < 0) { + bt_dev_err(hdev, "Send Mesh Failed %d", err); + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND, + MGMT_STATUS_FAILED); + + if (mesh_tx) { + if (sending) + mgmt_mesh_remove(mesh_tx); + } + } else { + hci_dev_set_flag(hdev, HCI_MESH_SENDING); + + mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_SEND, 0, + &mesh_tx->handle, 1); + } + +done: + hci_dev_unlock(hdev); + return err; +} + +static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) +{ + struct mgmt_mode *cp = data; + struct mgmt_pending_cmd *cmd; + int err; + u8 val, enabled; + + bt_dev_dbg(hdev, "sock %p", sk); + + if (!lmp_le_capable(hdev)) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE, + MGMT_STATUS_NOT_SUPPORTED); + + if (cp->val != 0x00 && cp->val != 0x01) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE, + MGMT_STATUS_INVALID_PARAMS); + + /* Bluetooth single mode LE only controllers or dual-mode + * controllers configured as LE only devices, do not allow + * switching LE off. These have either LE enabled explicitly + * or BR/EDR has been previously switched off. + * + * When trying to enable an already enabled LE, then gracefully + * send a positive response. Trying to disable it however will + * result into rejection. + */ + if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { + if (cp->val == 0x01) + return send_settings_rsp(sk, MGMT_OP_SET_LE, hdev); + + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE, + MGMT_STATUS_REJECTED); + } + + hci_dev_lock(hdev); + + val = !!cp->val; + enabled = lmp_host_le_capable(hdev); + + if (!hdev_is_powered(hdev) || val == enabled) { + bool changed = false; + + if (val != hci_dev_test_flag(hdev, HCI_LE_ENABLED)) { + hci_dev_change_flag(hdev, HCI_LE_ENABLED); + changed = true; + } + + if (!val && hci_dev_test_flag(hdev, HCI_ADVERTISING)) { + hci_dev_clear_flag(hdev, HCI_ADVERTISING); + changed = true; + } + + err = send_settings_rsp(sk, MGMT_OP_SET_LE, hdev); + if (err < 0) + goto unlock; + + if (changed) + err = new_settings(hdev, sk); + + goto unlock; + } + + if (pending_find(MGMT_OP_SET_LE, hdev) || + pending_find(MGMT_OP_SET_ADVERTISING, hdev)) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE, + MGMT_STATUS_BUSY); + goto unlock; + } + + cmd = mgmt_pending_add(sk, MGMT_OP_SET_LE, hdev, data, len); + if (!cmd) + err = -ENOMEM; + else + err = hci_cmd_sync_queue(hdev, set_le_sync, cmd, + set_le_complete); + + if (err < 0) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LE, + MGMT_STATUS_FAILED); + + if (cmd) + mgmt_pending_remove(cmd); + } + +unlock: + hci_dev_unlock(hdev); + return err; +} + +/* This is a helper function to test for pending mgmt commands that can + * cause CoD or EIR HCI commands. We can only allow one such pending + * mgmt command at a time since otherwise we cannot easily track what + * the current values are, will be, and based on that calculate if a new + * HCI command needs to be sent and if yes with what value. + */ +static bool pending_eir_or_class(struct hci_dev *hdev) +{ + struct mgmt_pending_cmd *cmd; + + list_for_each_entry(cmd, &hdev->mgmt_pending, list) { + switch (cmd->opcode) { + case MGMT_OP_ADD_UUID: + case MGMT_OP_REMOVE_UUID: + case MGMT_OP_SET_DEV_CLASS: + case MGMT_OP_SET_POWERED: + return true; + } + } + + return false; +} + +static const u8 bluetooth_base_uuid[] = { + 0xfb, 0x34, 0x9b, 0x5f, 0x80, 0x00, 0x00, 0x80, + 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, +}; + +static u8 get_uuid_size(const u8 *uuid) +{ + u32 val; + + if (memcmp(uuid, bluetooth_base_uuid, 12)) + return 128; + + val = get_unaligned_le32(&uuid[12]); + if (val > 0xffff) + return 32; + + return 16; +} + +static void mgmt_class_complete(struct hci_dev *hdev, void *data, int err) +{ + struct mgmt_pending_cmd *cmd = data; + + bt_dev_dbg(hdev, "err %d", err); + + mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, + mgmt_status(err), hdev->dev_class, 3); + + mgmt_pending_free(cmd); +} + +static int add_uuid_sync(struct hci_dev *hdev, void *data) +{ + int err; + + err = hci_update_class_sync(hdev); + if (err) + return err; + + return hci_update_eir_sync(hdev); +} + +static int add_uuid(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) +{ + struct mgmt_cp_add_uuid *cp = data; + struct mgmt_pending_cmd *cmd; + struct bt_uuid *uuid; + int err; + + bt_dev_dbg(hdev, "sock %p", sk); + + hci_dev_lock(hdev); + + if (pending_eir_or_class(hdev)) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_UUID, + MGMT_STATUS_BUSY); + goto failed; + } + + uuid = kmalloc(sizeof(*uuid), GFP_KERNEL); + if (!uuid) { + err = -ENOMEM; + goto failed; + } + + memcpy(uuid->uuid, cp->uuid, 16); + uuid->svc_hint = cp->svc_hint; + uuid->size = get_uuid_size(cp->uuid); + + list_add_tail(&uuid->list, &hdev->uuids); + + cmd = mgmt_pending_new(sk, MGMT_OP_ADD_UUID, hdev, data, len); + if (!cmd) { + err = -ENOMEM; + goto failed; + } + + err = hci_cmd_sync_queue(hdev, add_uuid_sync, cmd, mgmt_class_complete); + if (err < 0) { + mgmt_pending_free(cmd); + goto failed; + } + +failed: + hci_dev_unlock(hdev); + return err; +} + +static bool enable_service_cache(struct hci_dev *hdev) +{ + if (!hdev_is_powered(hdev)) + return false; + + if (!hci_dev_test_and_set_flag(hdev, HCI_SERVICE_CACHE)) { + queue_delayed_work(hdev->workqueue, &hdev->service_cache, + CACHE_TIMEOUT); + return true; + } + + return false; +} + +static int remove_uuid_sync(struct hci_dev *hdev, void *data) +{ + int err; + + err = hci_update_class_sync(hdev); + if (err) + return err; + + return hci_update_eir_sync(hdev); +} + +static int remove_uuid(struct sock *sk, struct hci_dev *hdev, void *data, + u16 len) +{ + struct mgmt_cp_remove_uuid *cp = data; + struct mgmt_pending_cmd *cmd; + struct bt_uuid *match, *tmp; + static const u8 bt_uuid_any[] = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 + }; + int err, found; + + bt_dev_dbg(hdev, "sock %p", sk); + + hci_dev_lock(hdev); + + if (pending_eir_or_class(hdev)) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID, + MGMT_STATUS_BUSY); + goto unlock; + } + + if (memcmp(cp->uuid, bt_uuid_any, 16) == 0) { + hci_uuids_clear(hdev); + + if (enable_service_cache(hdev)) { + err = mgmt_cmd_complete(sk, hdev->id, + MGMT_OP_REMOVE_UUID, + 0, hdev->dev_class, 3); + goto unlock; + } + + goto update_class; + } + + found = 0; + + list_for_each_entry_safe(match, tmp, &hdev->uuids, list) { + if (memcmp(match->uuid, cp->uuid, 16) != 0) + continue; + + list_del(&match->list); + kfree(match); + found++; + } + + if (found == 0) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_UUID, + MGMT_STATUS_INVALID_PARAMS); + goto unlock; + } + +update_class: + cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_UUID, hdev, data, len); + if (!cmd) { + err = -ENOMEM; + goto unlock; + } + + err = hci_cmd_sync_queue(hdev, remove_uuid_sync, cmd, + mgmt_class_complete); + if (err < 0) + mgmt_pending_free(cmd); + +unlock: + hci_dev_unlock(hdev); + return err; +} + +static int set_class_sync(struct hci_dev *hdev, void *data) +{ + int err = 0; + + if (hci_dev_test_and_clear_flag(hdev, HCI_SERVICE_CACHE)) { + cancel_delayed_work_sync(&hdev->service_cache); + err = hci_update_eir_sync(hdev); + } + + if (err) + return err; + + return hci_update_class_sync(hdev); +} + +static int set_dev_class(struct sock *sk, struct hci_dev *hdev, void *data, + u16 len) +{ + struct mgmt_cp_set_dev_class *cp = data; + struct mgmt_pending_cmd *cmd; + int err; + + bt_dev_dbg(hdev, "sock %p", sk); + + if (!lmp_bredr_capable(hdev)) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, + MGMT_STATUS_NOT_SUPPORTED); + + hci_dev_lock(hdev); + + if (pending_eir_or_class(hdev)) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, + MGMT_STATUS_BUSY); + goto unlock; + } + + if ((cp->minor & 0x03) != 0 || (cp->major & 0xe0) != 0) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, + MGMT_STATUS_INVALID_PARAMS); + goto unlock; + } + + hdev->major_class = cp->major; + hdev->minor_class = cp->minor; + + if (!hdev_is_powered(hdev)) { + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEV_CLASS, 0, + hdev->dev_class, 3); + goto unlock; + } + + cmd = mgmt_pending_new(sk, MGMT_OP_SET_DEV_CLASS, hdev, data, len); + if (!cmd) { + err = -ENOMEM; + goto unlock; + } + + err = hci_cmd_sync_queue(hdev, set_class_sync, cmd, + mgmt_class_complete); + if (err < 0) + mgmt_pending_free(cmd); + +unlock: + hci_dev_unlock(hdev); + return err; +} + +static int load_link_keys(struct sock *sk, struct hci_dev *hdev, void *data, + u16 len) +{ + struct mgmt_cp_load_link_keys *cp = data; + const u16 max_key_count = ((U16_MAX - sizeof(*cp)) / + sizeof(struct mgmt_link_key_info)); + u16 key_count, expected_len; + bool changed; + int i; + + bt_dev_dbg(hdev, "sock %p", sk); + + if (!lmp_bredr_capable(hdev)) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, + MGMT_STATUS_NOT_SUPPORTED); + + key_count = __le16_to_cpu(cp->key_count); + if (key_count > max_key_count) { + bt_dev_err(hdev, "load_link_keys: too big key_count value %u", + key_count); + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, + MGMT_STATUS_INVALID_PARAMS); + } + + expected_len = struct_size(cp, keys, key_count); + if (expected_len != len) { + bt_dev_err(hdev, "load_link_keys: expected %u bytes, got %u bytes", + expected_len, len); + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, + MGMT_STATUS_INVALID_PARAMS); + } + + if (cp->debug_keys != 0x00 && cp->debug_keys != 0x01) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, + MGMT_STATUS_INVALID_PARAMS); + + bt_dev_dbg(hdev, "debug_keys %u key_count %u", cp->debug_keys, + key_count); + + for (i = 0; i < key_count; i++) { + struct mgmt_link_key_info *key = &cp->keys[i]; + + /* Considering SMP over BREDR/LE, there is no need to check addr_type */ + if (key->type > 0x08) + return mgmt_cmd_status(sk, hdev->id, + MGMT_OP_LOAD_LINK_KEYS, + MGMT_STATUS_INVALID_PARAMS); + } + + hci_dev_lock(hdev); + + hci_link_keys_clear(hdev); + + if (cp->debug_keys) + changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS); + else + changed = hci_dev_test_and_clear_flag(hdev, + HCI_KEEP_DEBUG_KEYS); + + if (changed) + new_settings(hdev, NULL); + + for (i = 0; i < key_count; i++) { + struct mgmt_link_key_info *key = &cp->keys[i]; + + if (hci_is_blocked_key(hdev, + HCI_BLOCKED_KEY_TYPE_LINKKEY, + key->val)) { + bt_dev_warn(hdev, "Skipping blocked link key for %pMR", + &key->addr.bdaddr); + continue; + } + + /* Always ignore debug keys and require a new pairing if + * the user wants to use them. + */ + if (key->type == HCI_LK_DEBUG_COMBINATION) + continue; + + hci_add_link_key(hdev, NULL, &key->addr.bdaddr, key->val, + key->type, key->pin_len, NULL); + } + + mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LINK_KEYS, 0, NULL, 0); + + hci_dev_unlock(hdev); + + return 0; +} + +static int device_unpaired(struct hci_dev *hdev, bdaddr_t *bdaddr, + u8 addr_type, struct sock *skip_sk) +{ + struct mgmt_ev_device_unpaired ev; + + bacpy(&ev.addr.bdaddr, bdaddr); + ev.addr.type = addr_type; + + return mgmt_event(MGMT_EV_DEVICE_UNPAIRED, hdev, &ev, sizeof(ev), + skip_sk); +} + +static void unpair_device_complete(struct hci_dev *hdev, void *data, int err) +{ + struct mgmt_pending_cmd *cmd = data; + struct mgmt_cp_unpair_device *cp = cmd->param; + + if (!err) + device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk); + + cmd->cmd_complete(cmd, err); + mgmt_pending_free(cmd); +} + +static int unpair_device_sync(struct hci_dev *hdev, void *data) +{ + struct mgmt_pending_cmd *cmd = data; + struct mgmt_cp_unpair_device *cp = cmd->param; + struct hci_conn *conn; + + if (cp->addr.type == BDADDR_BREDR) + conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, + &cp->addr.bdaddr); + else + conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, + le_addr_type(cp->addr.type)); + + if (!conn) + return 0; + + return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM); +} + +static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data, + u16 len) +{ + struct mgmt_cp_unpair_device *cp = data; + struct mgmt_rp_unpair_device rp; + struct hci_conn_params *params; + struct mgmt_pending_cmd *cmd; + struct hci_conn *conn; + u8 addr_type; + int err; + + memset(&rp, 0, sizeof(rp)); + bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr); + rp.addr.type = cp->addr.type; + + if (!bdaddr_type_is_valid(cp->addr.type)) + return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, + MGMT_STATUS_INVALID_PARAMS, + &rp, sizeof(rp)); + + if (cp->disconnect != 0x00 && cp->disconnect != 0x01) + return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, + MGMT_STATUS_INVALID_PARAMS, + &rp, sizeof(rp)); + + hci_dev_lock(hdev); + + if (!hdev_is_powered(hdev)) { + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, + MGMT_STATUS_NOT_POWERED, &rp, + sizeof(rp)); + goto unlock; + } + + if (cp->addr.type == BDADDR_BREDR) { + /* If disconnection is requested, then look up the + * connection. If the remote device is connected, it + * will be later used to terminate the link. + * + * Setting it to NULL explicitly will cause no + * termination of the link. + */ + if (cp->disconnect) + conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, + &cp->addr.bdaddr); + else + conn = NULL; + + err = hci_remove_link_key(hdev, &cp->addr.bdaddr); + if (err < 0) { + err = mgmt_cmd_complete(sk, hdev->id, + MGMT_OP_UNPAIR_DEVICE, + MGMT_STATUS_NOT_PAIRED, &rp, + sizeof(rp)); + goto unlock; + } + + goto done; + } + + /* LE address type */ + addr_type = le_addr_type(cp->addr.type); + + /* Abort any ongoing SMP pairing. Removes ltk and irk if they exist. */ + err = smp_cancel_and_remove_pairing(hdev, &cp->addr.bdaddr, addr_type); + if (err < 0) { + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, + MGMT_STATUS_NOT_PAIRED, &rp, + sizeof(rp)); + goto unlock; + } + + conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type); + if (!conn) { + hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type); + goto done; + } + + + /* Defer clearing up the connection parameters until closing to + * give a chance of keeping them if a repairing happens. + */ + set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags); + + /* Disable auto-connection parameters if present */ + params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type); + if (params) { + if (params->explicit_connect) + params->auto_connect = HCI_AUTO_CONN_EXPLICIT; + else + params->auto_connect = HCI_AUTO_CONN_DISABLED; + } + + /* If disconnection is not requested, then clear the connection + * variable so that the link is not terminated. + */ + if (!cp->disconnect) + conn = NULL; + +done: + /* If the connection variable is set, then termination of the + * link is requested. + */ + if (!conn) { + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE, 0, + &rp, sizeof(rp)); + device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, sk); + goto unlock; + } + + cmd = mgmt_pending_new(sk, MGMT_OP_UNPAIR_DEVICE, hdev, cp, + sizeof(*cp)); + if (!cmd) { + err = -ENOMEM; + goto unlock; + } + + cmd->cmd_complete = addr_cmd_complete; + + err = hci_cmd_sync_queue(hdev, unpair_device_sync, cmd, + unpair_device_complete); + if (err < 0) + mgmt_pending_free(cmd); + +unlock: + hci_dev_unlock(hdev); + return err; +} + +static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data, + u16 len) +{ + struct mgmt_cp_disconnect *cp = data; + struct mgmt_rp_disconnect rp; + struct mgmt_pending_cmd *cmd; + struct hci_conn *conn; + int err; + + bt_dev_dbg(hdev, "sock %p", sk); + + memset(&rp, 0, sizeof(rp)); + bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr); + rp.addr.type = cp->addr.type; + + if (!bdaddr_type_is_valid(cp->addr.type)) + return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT, + MGMT_STATUS_INVALID_PARAMS, + &rp, sizeof(rp)); + + hci_dev_lock(hdev); + + if (!test_bit(HCI_UP, &hdev->flags)) { + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT, + MGMT_STATUS_NOT_POWERED, &rp, + sizeof(rp)); + goto failed; + } + + if (pending_find(MGMT_OP_DISCONNECT, hdev)) { + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT, + MGMT_STATUS_BUSY, &rp, sizeof(rp)); + goto failed; + } + + if (cp->addr.type == BDADDR_BREDR) + conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, + &cp->addr.bdaddr); + else + conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, + le_addr_type(cp->addr.type)); + + if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) { + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT, + MGMT_STATUS_NOT_CONNECTED, &rp, + sizeof(rp)); + goto failed; + } + + cmd = mgmt_pending_add(sk, MGMT_OP_DISCONNECT, hdev, data, len); + if (!cmd) { + err = -ENOMEM; + goto failed; + } + + cmd->cmd_complete = generic_cmd_complete; + + err = hci_disconnect(conn, HCI_ERROR_REMOTE_USER_TERM); + if (err < 0) + mgmt_pending_remove(cmd); + +failed: + hci_dev_unlock(hdev); + return err; +} + +static u8 link_to_bdaddr(u8 link_type, u8 addr_type) +{ + switch (link_type) { + case LE_LINK: + switch (addr_type) { + case ADDR_LE_DEV_PUBLIC: + return BDADDR_LE_PUBLIC; + + default: + /* Fallback to LE Random address type */ + return BDADDR_LE_RANDOM; + } + + default: + /* Fallback to BR/EDR type */ + return BDADDR_BREDR; + } +} + +static int get_connections(struct sock *sk, struct hci_dev *hdev, void *data, + u16 data_len) +{ + struct mgmt_rp_get_connections *rp; + struct hci_conn *c; + int err; + u16 i; + + bt_dev_dbg(hdev, "sock %p", sk); + + hci_dev_lock(hdev); + + if (!hdev_is_powered(hdev)) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, + MGMT_STATUS_NOT_POWERED); + goto unlock; + } + + i = 0; + list_for_each_entry(c, &hdev->conn_hash.list, list) { + if (test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags)) + i++; + } + + rp = kmalloc(struct_size(rp, addr, i), GFP_KERNEL); + if (!rp) { + err = -ENOMEM; + goto unlock; + } + + i = 0; + list_for_each_entry(c, &hdev->conn_hash.list, list) { + if (!test_bit(HCI_CONN_MGMT_CONNECTED, &c->flags)) + continue; + bacpy(&rp->addr[i].bdaddr, &c->dst); + rp->addr[i].type = link_to_bdaddr(c->type, c->dst_type); + if (c->type == SCO_LINK || c->type == ESCO_LINK) + continue; + i++; + } + + rp->conn_count = cpu_to_le16(i); + + /* Recalculate length in case of filtered SCO connections, etc */ + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONNECTIONS, 0, rp, + struct_size(rp, addr, i)); + + kfree(rp); + +unlock: + hci_dev_unlock(hdev); + return err; +} + +static int send_pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev, + struct mgmt_cp_pin_code_neg_reply *cp) +{ + struct mgmt_pending_cmd *cmd; + int err; + + cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_NEG_REPLY, hdev, cp, + sizeof(*cp)); + if (!cmd) + return -ENOMEM; + + cmd->cmd_complete = addr_cmd_complete; + + err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_NEG_REPLY, + sizeof(cp->addr.bdaddr), &cp->addr.bdaddr); + if (err < 0) + mgmt_pending_remove(cmd); + + return err; +} + +static int pin_code_reply(struct sock *sk, struct hci_dev *hdev, void *data, + u16 len) +{ + struct hci_conn *conn; + struct mgmt_cp_pin_code_reply *cp = data; + struct hci_cp_pin_code_reply reply; + struct mgmt_pending_cmd *cmd; + int err; + + bt_dev_dbg(hdev, "sock %p", sk); + + hci_dev_lock(hdev); + + if (!hdev_is_powered(hdev)) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY, + MGMT_STATUS_NOT_POWERED); + goto failed; + } + + conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr); + if (!conn) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY, + MGMT_STATUS_NOT_CONNECTED); + goto failed; + } + + if (conn->pending_sec_level == BT_SECURITY_HIGH && cp->pin_len != 16) { + struct mgmt_cp_pin_code_neg_reply ncp; + + memcpy(&ncp.addr, &cp->addr, sizeof(ncp.addr)); + + bt_dev_err(hdev, "PIN code is not 16 bytes long"); + + err = send_pin_code_neg_reply(sk, hdev, &ncp); + if (err >= 0) + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_PIN_CODE_REPLY, + MGMT_STATUS_INVALID_PARAMS); + + goto failed; + } + + cmd = mgmt_pending_add(sk, MGMT_OP_PIN_CODE_REPLY, hdev, data, len); + if (!cmd) { + err = -ENOMEM; + goto failed; + } + + cmd->cmd_complete = addr_cmd_complete; + + bacpy(&reply.bdaddr, &cp->addr.bdaddr); + reply.pin_len = cp->pin_len; + memcpy(reply.pin_code, cp->pin_code, sizeof(reply.pin_code)); + + err = hci_send_cmd(hdev, HCI_OP_PIN_CODE_REPLY, sizeof(reply), &reply); + if (err < 0) + mgmt_pending_remove(cmd); + +failed: + hci_dev_unlock(hdev); + return err; +} + +static int set_io_capability(struct sock *sk, struct hci_dev *hdev, void *data, + u16 len) +{ + struct mgmt_cp_set_io_capability *cp = data; + + bt_dev_dbg(hdev, "sock %p", sk); + + if (cp->io_capability > SMP_IO_KEYBOARD_DISPLAY) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, + MGMT_STATUS_INVALID_PARAMS); + + hci_dev_lock(hdev); + + hdev->io_capability = cp->io_capability; + + bt_dev_dbg(hdev, "IO capability set to 0x%02x", hdev->io_capability); + + hci_dev_unlock(hdev); + + return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_IO_CAPABILITY, 0, + NULL, 0); +} + +static struct mgmt_pending_cmd *find_pairing(struct hci_conn *conn) +{ + struct hci_dev *hdev = conn->hdev; + struct mgmt_pending_cmd *cmd; + + list_for_each_entry(cmd, &hdev->mgmt_pending, list) { + if (cmd->opcode != MGMT_OP_PAIR_DEVICE) + continue; + + if (cmd->user_data != conn) + continue; + + return cmd; + } + + return NULL; +} + +static int pairing_complete(struct mgmt_pending_cmd *cmd, u8 status) +{ + struct mgmt_rp_pair_device rp; + struct hci_conn *conn = cmd->user_data; + int err; + + bacpy(&rp.addr.bdaddr, &conn->dst); + rp.addr.type = link_to_bdaddr(conn->type, conn->dst_type); + + err = mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_PAIR_DEVICE, + status, &rp, sizeof(rp)); + + /* So we don't get further callbacks for this connection */ + conn->connect_cfm_cb = NULL; + conn->security_cfm_cb = NULL; + conn->disconn_cfm_cb = NULL; + + hci_conn_drop(conn); + + /* The device is paired so there is no need to remove + * its connection parameters anymore. + */ + clear_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags); + + hci_conn_put(conn); + + return err; +} + +void mgmt_smp_complete(struct hci_conn *conn, bool complete) +{ + u8 status = complete ? MGMT_STATUS_SUCCESS : MGMT_STATUS_FAILED; + struct mgmt_pending_cmd *cmd; + + cmd = find_pairing(conn); + if (cmd) { + cmd->cmd_complete(cmd, status); + mgmt_pending_remove(cmd); + } +} + +static void pairing_complete_cb(struct hci_conn *conn, u8 status) +{ + struct mgmt_pending_cmd *cmd; + + BT_DBG("status %u", status); + + cmd = find_pairing(conn); + if (!cmd) { + BT_DBG("Unable to find a pending command"); + return; + } + + cmd->cmd_complete(cmd, mgmt_status(status)); + mgmt_pending_remove(cmd); +} + +static void le_pairing_complete_cb(struct hci_conn *conn, u8 status) +{ + struct mgmt_pending_cmd *cmd; + + BT_DBG("status %u", status); + + if (!status) + return; + + cmd = find_pairing(conn); + if (!cmd) { + BT_DBG("Unable to find a pending command"); + return; + } + + cmd->cmd_complete(cmd, mgmt_status(status)); + mgmt_pending_remove(cmd); +} + +static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data, + u16 len) +{ + struct mgmt_cp_pair_device *cp = data; + struct mgmt_rp_pair_device rp; + struct mgmt_pending_cmd *cmd; + u8 sec_level, auth_type; + struct hci_conn *conn; + int err; + + bt_dev_dbg(hdev, "sock %p", sk); + + memset(&rp, 0, sizeof(rp)); + bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr); + rp.addr.type = cp->addr.type; + + if (!bdaddr_type_is_valid(cp->addr.type)) + return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE, + MGMT_STATUS_INVALID_PARAMS, + &rp, sizeof(rp)); + + if (cp->io_cap > SMP_IO_KEYBOARD_DISPLAY) + return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE, + MGMT_STATUS_INVALID_PARAMS, + &rp, sizeof(rp)); + + hci_dev_lock(hdev); + + if (!hdev_is_powered(hdev)) { + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE, + MGMT_STATUS_NOT_POWERED, &rp, + sizeof(rp)); + goto unlock; + } + + if (hci_bdaddr_is_paired(hdev, &cp->addr.bdaddr, cp->addr.type)) { + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE, + MGMT_STATUS_ALREADY_PAIRED, &rp, + sizeof(rp)); + goto unlock; + } + + sec_level = BT_SECURITY_MEDIUM; + auth_type = HCI_AT_DEDICATED_BONDING; + + if (cp->addr.type == BDADDR_BREDR) { + conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level, + auth_type, CONN_REASON_PAIR_DEVICE); + } else { + u8 addr_type = le_addr_type(cp->addr.type); + struct hci_conn_params *p; + + /* When pairing a new device, it is expected to remember + * this device for future connections. Adding the connection + * parameter information ahead of time allows tracking + * of the peripheral preferred values and will speed up any + * further connection establishment. + * + * If connection parameters already exist, then they + * will be kept and this function does nothing. + */ + p = hci_conn_params_add(hdev, &cp->addr.bdaddr, addr_type); + + if (p->auto_connect == HCI_AUTO_CONN_EXPLICIT) + p->auto_connect = HCI_AUTO_CONN_DISABLED; + + conn = hci_connect_le_scan(hdev, &cp->addr.bdaddr, addr_type, + sec_level, HCI_LE_CONN_TIMEOUT, + CONN_REASON_PAIR_DEVICE); + } + + if (IS_ERR(conn)) { + int status; + + if (PTR_ERR(conn) == -EBUSY) + status = MGMT_STATUS_BUSY; + else if (PTR_ERR(conn) == -EOPNOTSUPP) + status = MGMT_STATUS_NOT_SUPPORTED; + else if (PTR_ERR(conn) == -ECONNREFUSED) + status = MGMT_STATUS_REJECTED; + else + status = MGMT_STATUS_CONNECT_FAILED; + + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE, + status, &rp, sizeof(rp)); + goto unlock; + } + + if (conn->connect_cfm_cb) { + hci_conn_drop(conn); + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_PAIR_DEVICE, + MGMT_STATUS_BUSY, &rp, sizeof(rp)); + goto unlock; + } + + cmd = mgmt_pending_add(sk, MGMT_OP_PAIR_DEVICE, hdev, data, len); + if (!cmd) { + err = -ENOMEM; + hci_conn_drop(conn); + goto unlock; + } + + cmd->cmd_complete = pairing_complete; + + /* For LE, just connecting isn't a proof that the pairing finished */ + if (cp->addr.type == BDADDR_BREDR) { + conn->connect_cfm_cb = pairing_complete_cb; + conn->security_cfm_cb = pairing_complete_cb; + conn->disconn_cfm_cb = pairing_complete_cb; + } else { + conn->connect_cfm_cb = le_pairing_complete_cb; + conn->security_cfm_cb = le_pairing_complete_cb; + conn->disconn_cfm_cb = le_pairing_complete_cb; + } + + conn->io_capability = cp->io_cap; + cmd->user_data = hci_conn_get(conn); + + if ((conn->state == BT_CONNECTED || conn->state == BT_CONFIG) && + hci_conn_security(conn, sec_level, auth_type, true)) { + cmd->cmd_complete(cmd, 0); + mgmt_pending_remove(cmd); + } + + err = 0; + +unlock: + hci_dev_unlock(hdev); + return err; +} + +static int abort_conn_sync(struct hci_dev *hdev, void *data) +{ + struct hci_conn *conn; + u16 handle = PTR_ERR(data); + + conn = hci_conn_hash_lookup_handle(hdev, handle); + if (!conn) + return 0; + + return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM); +} + +static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data, + u16 len) +{ + struct mgmt_addr_info *addr = data; + struct mgmt_pending_cmd *cmd; + struct hci_conn *conn; + int err; + + bt_dev_dbg(hdev, "sock %p", sk); + + hci_dev_lock(hdev); + + if (!hdev_is_powered(hdev)) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, + MGMT_STATUS_NOT_POWERED); + goto unlock; + } + + cmd = pending_find(MGMT_OP_PAIR_DEVICE, hdev); + if (!cmd) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, + MGMT_STATUS_INVALID_PARAMS); + goto unlock; + } + + conn = cmd->user_data; + + if (bacmp(&addr->bdaddr, &conn->dst) != 0) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, + MGMT_STATUS_INVALID_PARAMS); + goto unlock; + } + + cmd->cmd_complete(cmd, MGMT_STATUS_CANCELLED); + mgmt_pending_remove(cmd); + + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CANCEL_PAIR_DEVICE, 0, + addr, sizeof(*addr)); + + /* Since user doesn't want to proceed with the connection, abort any + * ongoing pairing and then terminate the link if it was created + * because of the pair device action. + */ + if (addr->type == BDADDR_BREDR) + hci_remove_link_key(hdev, &addr->bdaddr); + else + smp_cancel_and_remove_pairing(hdev, &addr->bdaddr, + le_addr_type(addr->type)); + + if (conn->conn_reason == CONN_REASON_PAIR_DEVICE) + hci_cmd_sync_queue(hdev, abort_conn_sync, ERR_PTR(conn->handle), + NULL); + +unlock: + hci_dev_unlock(hdev); + return err; +} + +static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev, + struct mgmt_addr_info *addr, u16 mgmt_op, + u16 hci_op, __le32 passkey) +{ + struct mgmt_pending_cmd *cmd; + struct hci_conn *conn; + int err; + + hci_dev_lock(hdev); + + if (!hdev_is_powered(hdev)) { + err = mgmt_cmd_complete(sk, hdev->id, mgmt_op, + MGMT_STATUS_NOT_POWERED, addr, + sizeof(*addr)); + goto done; + } + + if (addr->type == BDADDR_BREDR) + conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr); + else + conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr, + le_addr_type(addr->type)); + + if (!conn) { + err = mgmt_cmd_complete(sk, hdev->id, mgmt_op, + MGMT_STATUS_NOT_CONNECTED, addr, + sizeof(*addr)); + goto done; + } + + if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) { + err = smp_user_confirm_reply(conn, mgmt_op, passkey); + if (!err) + err = mgmt_cmd_complete(sk, hdev->id, mgmt_op, + MGMT_STATUS_SUCCESS, addr, + sizeof(*addr)); + else + err = mgmt_cmd_complete(sk, hdev->id, mgmt_op, + MGMT_STATUS_FAILED, addr, + sizeof(*addr)); + + goto done; + } + + cmd = mgmt_pending_add(sk, mgmt_op, hdev, addr, sizeof(*addr)); + if (!cmd) { + err = -ENOMEM; + goto done; + } + + cmd->cmd_complete = addr_cmd_complete; + + /* Continue with pairing via HCI */ + if (hci_op == HCI_OP_USER_PASSKEY_REPLY) { + struct hci_cp_user_passkey_reply cp; + + bacpy(&cp.bdaddr, &addr->bdaddr); + cp.passkey = passkey; + err = hci_send_cmd(hdev, hci_op, sizeof(cp), &cp); + } else + err = hci_send_cmd(hdev, hci_op, sizeof(addr->bdaddr), + &addr->bdaddr); + + if (err < 0) + mgmt_pending_remove(cmd); + +done: + hci_dev_unlock(hdev); + return err; +} + +static int pin_code_neg_reply(struct sock *sk, struct hci_dev *hdev, + void *data, u16 len) +{ + struct mgmt_cp_pin_code_neg_reply *cp = data; + + bt_dev_dbg(hdev, "sock %p", sk); + + return user_pairing_resp(sk, hdev, &cp->addr, + MGMT_OP_PIN_CODE_NEG_REPLY, + HCI_OP_PIN_CODE_NEG_REPLY, 0); +} + +static int user_confirm_reply(struct sock *sk, struct hci_dev *hdev, void *data, + u16 len) +{ + struct mgmt_cp_user_confirm_reply *cp = data; + + bt_dev_dbg(hdev, "sock %p", sk); + + if (len != sizeof(*cp)) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_USER_CONFIRM_REPLY, + MGMT_STATUS_INVALID_PARAMS); + + return user_pairing_resp(sk, hdev, &cp->addr, + MGMT_OP_USER_CONFIRM_REPLY, + HCI_OP_USER_CONFIRM_REPLY, 0); +} + +static int user_confirm_neg_reply(struct sock *sk, struct hci_dev *hdev, + void *data, u16 len) +{ + struct mgmt_cp_user_confirm_neg_reply *cp = data; + + bt_dev_dbg(hdev, "sock %p", sk); + + return user_pairing_resp(sk, hdev, &cp->addr, + MGMT_OP_USER_CONFIRM_NEG_REPLY, + HCI_OP_USER_CONFIRM_NEG_REPLY, 0); +} + +static int user_passkey_reply(struct sock *sk, struct hci_dev *hdev, void *data, + u16 len) +{ + struct mgmt_cp_user_passkey_reply *cp = data; + + bt_dev_dbg(hdev, "sock %p", sk); + + return user_pairing_resp(sk, hdev, &cp->addr, + MGMT_OP_USER_PASSKEY_REPLY, + HCI_OP_USER_PASSKEY_REPLY, cp->passkey); +} + +static int user_passkey_neg_reply(struct sock *sk, struct hci_dev *hdev, + void *data, u16 len) +{ + struct mgmt_cp_user_passkey_neg_reply *cp = data; + + bt_dev_dbg(hdev, "sock %p", sk); + + return user_pairing_resp(sk, hdev, &cp->addr, + MGMT_OP_USER_PASSKEY_NEG_REPLY, + HCI_OP_USER_PASSKEY_NEG_REPLY, 0); +} + +static int adv_expire_sync(struct hci_dev *hdev, u32 flags) +{ + struct adv_info *adv_instance; + + adv_instance = hci_find_adv_instance(hdev, hdev->cur_adv_instance); + if (!adv_instance) + return 0; + + /* stop if current instance doesn't need to be changed */ + if (!(adv_instance->flags & flags)) + return 0; + + cancel_adv_timeout(hdev); + + adv_instance = hci_get_next_instance(hdev, adv_instance->instance); + if (!adv_instance) + return 0; + + hci_schedule_adv_instance_sync(hdev, adv_instance->instance, true); + + return 0; +} + +static int name_changed_sync(struct hci_dev *hdev, void *data) +{ + return adv_expire_sync(hdev, MGMT_ADV_FLAG_LOCAL_NAME); +} + +static void set_name_complete(struct hci_dev *hdev, void *data, int err) +{ + struct mgmt_pending_cmd *cmd = data; + struct mgmt_cp_set_local_name *cp = cmd->param; + u8 status = mgmt_status(err); + + bt_dev_dbg(hdev, "err %d", err); + + if (cmd != pending_find(MGMT_OP_SET_LOCAL_NAME, hdev)) + return; + + if (status) { + mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, + status); + } else { + mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0, + cp, sizeof(*cp)); + + if (hci_dev_test_flag(hdev, HCI_LE_ADV)) + hci_cmd_sync_queue(hdev, name_changed_sync, NULL, NULL); + } + + mgmt_pending_remove(cmd); +} + +static int set_name_sync(struct hci_dev *hdev, void *data) +{ + if (lmp_bredr_capable(hdev)) { + hci_update_name_sync(hdev); + hci_update_eir_sync(hdev); + } + + /* The name is stored in the scan response data and so + * no need to update the advertising data here. + */ + if (lmp_le_capable(hdev) && hci_dev_test_flag(hdev, HCI_ADVERTISING)) + hci_update_scan_rsp_data_sync(hdev, hdev->cur_adv_instance); + + return 0; +} + +static int set_local_name(struct sock *sk, struct hci_dev *hdev, void *data, + u16 len) +{ + struct mgmt_cp_set_local_name *cp = data; + struct mgmt_pending_cmd *cmd; + int err; + + bt_dev_dbg(hdev, "sock %p", sk); + + hci_dev_lock(hdev); + + /* If the old values are the same as the new ones just return a + * direct command complete event. + */ + if (!memcmp(hdev->dev_name, cp->name, sizeof(hdev->dev_name)) && + !memcmp(hdev->short_name, cp->short_name, + sizeof(hdev->short_name))) { + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0, + data, len); + goto failed; + } + + memcpy(hdev->short_name, cp->short_name, sizeof(hdev->short_name)); + + if (!hdev_is_powered(hdev)) { + memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name)); + + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, 0, + data, len); + if (err < 0) + goto failed; + + err = mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, data, + len, HCI_MGMT_LOCAL_NAME_EVENTS, sk); + ext_info_changed(hdev, sk); + + goto failed; + } + + cmd = mgmt_pending_add(sk, MGMT_OP_SET_LOCAL_NAME, hdev, data, len); + if (!cmd) + err = -ENOMEM; + else + err = hci_cmd_sync_queue(hdev, set_name_sync, cmd, + set_name_complete); + + if (err < 0) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_LOCAL_NAME, + MGMT_STATUS_FAILED); + + if (cmd) + mgmt_pending_remove(cmd); + + goto failed; + } + + memcpy(hdev->dev_name, cp->name, sizeof(hdev->dev_name)); + +failed: + hci_dev_unlock(hdev); + return err; +} + +static int appearance_changed_sync(struct hci_dev *hdev, void *data) +{ + return adv_expire_sync(hdev, MGMT_ADV_FLAG_APPEARANCE); +} + +static int set_appearance(struct sock *sk, struct hci_dev *hdev, void *data, + u16 len) +{ + struct mgmt_cp_set_appearance *cp = data; + u16 appearance; + int err; + + bt_dev_dbg(hdev, "sock %p", sk); + + if (!lmp_le_capable(hdev)) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_APPEARANCE, + MGMT_STATUS_NOT_SUPPORTED); + + appearance = le16_to_cpu(cp->appearance); + + hci_dev_lock(hdev); + + if (hdev->appearance != appearance) { + hdev->appearance = appearance; + + if (hci_dev_test_flag(hdev, HCI_LE_ADV)) + hci_cmd_sync_queue(hdev, appearance_changed_sync, NULL, + NULL); + + ext_info_changed(hdev, sk); + } + + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_APPEARANCE, 0, NULL, + 0); + + hci_dev_unlock(hdev); + + return err; +} + +static int get_phy_configuration(struct sock *sk, struct hci_dev *hdev, + void *data, u16 len) +{ + struct mgmt_rp_get_phy_configuration rp; + + bt_dev_dbg(hdev, "sock %p", sk); + + hci_dev_lock(hdev); + + memset(&rp, 0, sizeof(rp)); + + rp.supported_phys = cpu_to_le32(get_supported_phys(hdev)); + rp.selected_phys = cpu_to_le32(get_selected_phys(hdev)); + rp.configurable_phys = cpu_to_le32(get_configurable_phys(hdev)); + + hci_dev_unlock(hdev); + + return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_PHY_CONFIGURATION, 0, + &rp, sizeof(rp)); +} + +int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip) +{ + struct mgmt_ev_phy_configuration_changed ev; + + memset(&ev, 0, sizeof(ev)); + + ev.selected_phys = cpu_to_le32(get_selected_phys(hdev)); + + return mgmt_event(MGMT_EV_PHY_CONFIGURATION_CHANGED, hdev, &ev, + sizeof(ev), skip); +} + +static void set_default_phy_complete(struct hci_dev *hdev, void *data, int err) +{ + struct mgmt_pending_cmd *cmd = data; + struct sk_buff *skb = cmd->skb; + u8 status = mgmt_status(err); + + if (cmd != pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) + return; + + if (!status) { + if (!skb) + status = MGMT_STATUS_FAILED; + else if (IS_ERR(skb)) + status = mgmt_status(PTR_ERR(skb)); + else + status = mgmt_status(skb->data[0]); + } + + bt_dev_dbg(hdev, "status %d", status); + + if (status) { + mgmt_cmd_status(cmd->sk, hdev->id, + MGMT_OP_SET_PHY_CONFIGURATION, status); + } else { + mgmt_cmd_complete(cmd->sk, hdev->id, + MGMT_OP_SET_PHY_CONFIGURATION, 0, + NULL, 0); + + mgmt_phy_configuration_changed(hdev, cmd->sk); + } + + if (skb && !IS_ERR(skb)) + kfree_skb(skb); + + mgmt_pending_remove(cmd); +} + +static int set_default_phy_sync(struct hci_dev *hdev, void *data) +{ + struct mgmt_pending_cmd *cmd = data; + struct mgmt_cp_set_phy_configuration *cp = cmd->param; + struct hci_cp_le_set_default_phy cp_phy; + u32 selected_phys = __le32_to_cpu(cp->selected_phys); + + memset(&cp_phy, 0, sizeof(cp_phy)); + + if (!(selected_phys & MGMT_PHY_LE_TX_MASK)) + cp_phy.all_phys |= 0x01; + + if (!(selected_phys & MGMT_PHY_LE_RX_MASK)) + cp_phy.all_phys |= 0x02; + + if (selected_phys & MGMT_PHY_LE_1M_TX) + cp_phy.tx_phys |= HCI_LE_SET_PHY_1M; + + if (selected_phys & MGMT_PHY_LE_2M_TX) + cp_phy.tx_phys |= HCI_LE_SET_PHY_2M; + + if (selected_phys & MGMT_PHY_LE_CODED_TX) + cp_phy.tx_phys |= HCI_LE_SET_PHY_CODED; + + if (selected_phys & MGMT_PHY_LE_1M_RX) + cp_phy.rx_phys |= HCI_LE_SET_PHY_1M; + + if (selected_phys & MGMT_PHY_LE_2M_RX) + cp_phy.rx_phys |= HCI_LE_SET_PHY_2M; + + if (selected_phys & MGMT_PHY_LE_CODED_RX) + cp_phy.rx_phys |= HCI_LE_SET_PHY_CODED; + + cmd->skb = __hci_cmd_sync(hdev, HCI_OP_LE_SET_DEFAULT_PHY, + sizeof(cp_phy), &cp_phy, HCI_CMD_TIMEOUT); + + return 0; +} + +static int set_phy_configuration(struct sock *sk, struct hci_dev *hdev, + void *data, u16 len) +{ + struct mgmt_cp_set_phy_configuration *cp = data; + struct mgmt_pending_cmd *cmd; + u32 selected_phys, configurable_phys, supported_phys, unconfigure_phys; + u16 pkt_type = (HCI_DH1 | HCI_DM1); + bool changed = false; + int err; + + bt_dev_dbg(hdev, "sock %p", sk); + + configurable_phys = get_configurable_phys(hdev); + supported_phys = get_supported_phys(hdev); + selected_phys = __le32_to_cpu(cp->selected_phys); + + if (selected_phys & ~supported_phys) + return mgmt_cmd_status(sk, hdev->id, + MGMT_OP_SET_PHY_CONFIGURATION, + MGMT_STATUS_INVALID_PARAMS); + + unconfigure_phys = supported_phys & ~configurable_phys; + + if ((selected_phys & unconfigure_phys) != unconfigure_phys) + return mgmt_cmd_status(sk, hdev->id, + MGMT_OP_SET_PHY_CONFIGURATION, + MGMT_STATUS_INVALID_PARAMS); + + if (selected_phys == get_selected_phys(hdev)) + return mgmt_cmd_complete(sk, hdev->id, + MGMT_OP_SET_PHY_CONFIGURATION, + 0, NULL, 0); + + hci_dev_lock(hdev); + + if (!hdev_is_powered(hdev)) { + err = mgmt_cmd_status(sk, hdev->id, + MGMT_OP_SET_PHY_CONFIGURATION, + MGMT_STATUS_REJECTED); + goto unlock; + } + + if (pending_find(MGMT_OP_SET_PHY_CONFIGURATION, hdev)) { + err = mgmt_cmd_status(sk, hdev->id, + MGMT_OP_SET_PHY_CONFIGURATION, + MGMT_STATUS_BUSY); + goto unlock; + } + + if (selected_phys & MGMT_PHY_BR_1M_3SLOT) + pkt_type |= (HCI_DH3 | HCI_DM3); + else + pkt_type &= ~(HCI_DH3 | HCI_DM3); + + if (selected_phys & MGMT_PHY_BR_1M_5SLOT) + pkt_type |= (HCI_DH5 | HCI_DM5); + else + pkt_type &= ~(HCI_DH5 | HCI_DM5); + + if (selected_phys & MGMT_PHY_EDR_2M_1SLOT) + pkt_type &= ~HCI_2DH1; + else + pkt_type |= HCI_2DH1; + + if (selected_phys & MGMT_PHY_EDR_2M_3SLOT) + pkt_type &= ~HCI_2DH3; + else + pkt_type |= HCI_2DH3; + + if (selected_phys & MGMT_PHY_EDR_2M_5SLOT) + pkt_type &= ~HCI_2DH5; + else + pkt_type |= HCI_2DH5; + + if (selected_phys & MGMT_PHY_EDR_3M_1SLOT) + pkt_type &= ~HCI_3DH1; + else + pkt_type |= HCI_3DH1; + + if (selected_phys & MGMT_PHY_EDR_3M_3SLOT) + pkt_type &= ~HCI_3DH3; + else + pkt_type |= HCI_3DH3; + + if (selected_phys & MGMT_PHY_EDR_3M_5SLOT) + pkt_type &= ~HCI_3DH5; + else + pkt_type |= HCI_3DH5; + + if (pkt_type != hdev->pkt_type) { + hdev->pkt_type = pkt_type; + changed = true; + } + + if ((selected_phys & MGMT_PHY_LE_MASK) == + (get_selected_phys(hdev) & MGMT_PHY_LE_MASK)) { + if (changed) + mgmt_phy_configuration_changed(hdev, sk); + + err = mgmt_cmd_complete(sk, hdev->id, + MGMT_OP_SET_PHY_CONFIGURATION, + 0, NULL, 0); + + goto unlock; + } + + cmd = mgmt_pending_add(sk, MGMT_OP_SET_PHY_CONFIGURATION, hdev, data, + len); + if (!cmd) + err = -ENOMEM; + else + err = hci_cmd_sync_queue(hdev, set_default_phy_sync, cmd, + set_default_phy_complete); + + if (err < 0) { + err = mgmt_cmd_status(sk, hdev->id, + MGMT_OP_SET_PHY_CONFIGURATION, + MGMT_STATUS_FAILED); + + if (cmd) + mgmt_pending_remove(cmd); + } + +unlock: + hci_dev_unlock(hdev); + + return err; +} + +static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data, + u16 len) +{ + int err = MGMT_STATUS_SUCCESS; + struct mgmt_cp_set_blocked_keys *keys = data; + const u16 max_key_count = ((U16_MAX - sizeof(*keys)) / + sizeof(struct mgmt_blocked_key_info)); + u16 key_count, expected_len; + int i; + + bt_dev_dbg(hdev, "sock %p", sk); + + key_count = __le16_to_cpu(keys->key_count); + if (key_count > max_key_count) { + bt_dev_err(hdev, "too big key_count value %u", key_count); + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS, + MGMT_STATUS_INVALID_PARAMS); + } + + expected_len = struct_size(keys, keys, key_count); + if (expected_len != len) { + bt_dev_err(hdev, "expected %u bytes, got %u bytes", + expected_len, len); + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS, + MGMT_STATUS_INVALID_PARAMS); + } + + hci_dev_lock(hdev); + + hci_blocked_keys_clear(hdev); + + for (i = 0; i < key_count; ++i) { + struct blocked_key *b = kzalloc(sizeof(*b), GFP_KERNEL); + + if (!b) { + err = MGMT_STATUS_NO_RESOURCES; + break; + } + + b->type = keys->keys[i].type; + memcpy(b->val, keys->keys[i].val, sizeof(b->val)); + list_add_rcu(&b->list, &hdev->blocked_keys); + } + hci_dev_unlock(hdev); + + return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_BLOCKED_KEYS, + err, NULL, 0); +} + +static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev, + void *data, u16 len) +{ + struct mgmt_mode *cp = data; + int err; + bool changed = false; + + bt_dev_dbg(hdev, "sock %p", sk); + + if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks)) + return mgmt_cmd_status(sk, hdev->id, + MGMT_OP_SET_WIDEBAND_SPEECH, + MGMT_STATUS_NOT_SUPPORTED); + + if (cp->val != 0x00 && cp->val != 0x01) + return mgmt_cmd_status(sk, hdev->id, + MGMT_OP_SET_WIDEBAND_SPEECH, + MGMT_STATUS_INVALID_PARAMS); + + hci_dev_lock(hdev); + + if (hdev_is_powered(hdev) && + !!cp->val != hci_dev_test_flag(hdev, + HCI_WIDEBAND_SPEECH_ENABLED)) { + err = mgmt_cmd_status(sk, hdev->id, + MGMT_OP_SET_WIDEBAND_SPEECH, + MGMT_STATUS_REJECTED); + goto unlock; + } + + if (cp->val) + changed = !hci_dev_test_and_set_flag(hdev, + HCI_WIDEBAND_SPEECH_ENABLED); + else + changed = hci_dev_test_and_clear_flag(hdev, + HCI_WIDEBAND_SPEECH_ENABLED); + + err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev); + if (err < 0) + goto unlock; + + if (changed) + err = new_settings(hdev, sk); + +unlock: + hci_dev_unlock(hdev); + return err; +} + +static int read_controller_cap(struct sock *sk, struct hci_dev *hdev, + void *data, u16 data_len) +{ + char buf[20]; + struct mgmt_rp_read_controller_cap *rp = (void *)buf; + u16 cap_len = 0; + u8 flags = 0; + u8 tx_power_range[2]; + + bt_dev_dbg(hdev, "sock %p", sk); + + memset(&buf, 0, sizeof(buf)); + + hci_dev_lock(hdev); + + /* When the Read Simple Pairing Options command is supported, then + * the remote public key validation is supported. + * + * Alternatively, when Microsoft extensions are available, they can + * indicate support for public key validation as well. + */ + if ((hdev->commands[41] & 0x08) || msft_curve_validity(hdev)) + flags |= 0x01; /* Remote public key validation (BR/EDR) */ + + flags |= 0x02; /* Remote public key validation (LE) */ + + /* When the Read Encryption Key Size command is supported, then the + * encryption key size is enforced. + */ + if (hdev->commands[20] & 0x10) + flags |= 0x04; /* Encryption key size enforcement (BR/EDR) */ + + flags |= 0x08; /* Encryption key size enforcement (LE) */ + + cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_SEC_FLAGS, + &flags, 1); + + /* When the Read Simple Pairing Options command is supported, then + * also max encryption key size information is provided. + */ + if (hdev->commands[41] & 0x08) + cap_len = eir_append_le16(rp->cap, cap_len, + MGMT_CAP_MAX_ENC_KEY_SIZE, + hdev->max_enc_key_size); + + cap_len = eir_append_le16(rp->cap, cap_len, + MGMT_CAP_SMP_MAX_ENC_KEY_SIZE, + SMP_MAX_ENC_KEY_SIZE); + + /* Append the min/max LE tx power parameters if we were able to fetch + * it from the controller + */ + if (hdev->commands[38] & 0x80) { + memcpy(&tx_power_range[0], &hdev->min_le_tx_power, 1); + memcpy(&tx_power_range[1], &hdev->max_le_tx_power, 1); + cap_len = eir_append_data(rp->cap, cap_len, MGMT_CAP_LE_TX_PWR, + tx_power_range, 2); + } + + rp->cap_len = cpu_to_le16(cap_len); + + hci_dev_unlock(hdev); + + return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_CONTROLLER_CAP, 0, + rp, sizeof(*rp) + cap_len); +} + +#ifdef CONFIG_BT_FEATURE_DEBUG +/* d4992530-b9ec-469f-ab01-6c481c47da1c */ +static const u8 debug_uuid[16] = { + 0x1c, 0xda, 0x47, 0x1c, 0x48, 0x6c, 0x01, 0xab, + 0x9f, 0x46, 0xec, 0xb9, 0x30, 0x25, 0x99, 0xd4, +}; +#endif + +/* 330859bc-7506-492d-9370-9a6f0614037f */ +static const u8 quality_report_uuid[16] = { + 0x7f, 0x03, 0x14, 0x06, 0x6f, 0x9a, 0x70, 0x93, + 0x2d, 0x49, 0x06, 0x75, 0xbc, 0x59, 0x08, 0x33, +}; + +/* a6695ace-ee7f-4fb9-881a-5fac66c629af */ +static const u8 offload_codecs_uuid[16] = { + 0xaf, 0x29, 0xc6, 0x66, 0xac, 0x5f, 0x1a, 0x88, + 0xb9, 0x4f, 0x7f, 0xee, 0xce, 0x5a, 0x69, 0xa6, +}; + +/* 671b10b5-42c0-4696-9227-eb28d1b049d6 */ +static const u8 le_simultaneous_roles_uuid[16] = { + 0xd6, 0x49, 0xb0, 0xd1, 0x28, 0xeb, 0x27, 0x92, + 0x96, 0x46, 0xc0, 0x42, 0xb5, 0x10, 0x1b, 0x67, +}; + +/* 15c0a148-c273-11ea-b3de-0242ac130004 */ +static const u8 rpa_resolution_uuid[16] = { + 0x04, 0x00, 0x13, 0xac, 0x42, 0x02, 0xde, 0xb3, + 0xea, 0x11, 0x73, 0xc2, 0x48, 0xa1, 0xc0, 0x15, +}; + +/* 6fbaf188-05e0-496a-9885-d6ddfdb4e03e */ +static const u8 iso_socket_uuid[16] = { + 0x3e, 0xe0, 0xb4, 0xfd, 0xdd, 0xd6, 0x85, 0x98, + 0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f, +}; + +/* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */ +static const u8 mgmt_mesh_uuid[16] = { + 0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf, + 0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c, +}; + +static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev, + void *data, u16 data_len) +{ + struct mgmt_rp_read_exp_features_info *rp; + size_t len; + u16 idx = 0; + u32 flags; + int status; + + bt_dev_dbg(hdev, "sock %p", sk); + + /* Enough space for 7 features */ + len = sizeof(*rp) + (sizeof(rp->features[0]) * 7); + rp = kzalloc(len, GFP_KERNEL); + if (!rp) + return -ENOMEM; + +#ifdef CONFIG_BT_FEATURE_DEBUG + if (!hdev) { + flags = bt_dbg_get() ? BIT(0) : 0; + + memcpy(rp->features[idx].uuid, debug_uuid, 16); + rp->features[idx].flags = cpu_to_le32(flags); + idx++; + } +#endif + + if (hdev && hci_dev_le_state_simultaneous(hdev)) { + if (hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES)) + flags = BIT(0); + else + flags = 0; + + memcpy(rp->features[idx].uuid, le_simultaneous_roles_uuid, 16); + rp->features[idx].flags = cpu_to_le32(flags); + idx++; + } + + if (hdev && ll_privacy_capable(hdev)) { + if (hci_dev_test_flag(hdev, HCI_ENABLE_LL_PRIVACY)) + flags = BIT(0) | BIT(1); + else + flags = BIT(1); + + memcpy(rp->features[idx].uuid, rpa_resolution_uuid, 16); + rp->features[idx].flags = cpu_to_le32(flags); + idx++; + } + + if (hdev && (aosp_has_quality_report(hdev) || + hdev->set_quality_report)) { + if (hci_dev_test_flag(hdev, HCI_QUALITY_REPORT)) + flags = BIT(0); + else + flags = 0; + + memcpy(rp->features[idx].uuid, quality_report_uuid, 16); + rp->features[idx].flags = cpu_to_le32(flags); + idx++; + } + + if (hdev && hdev->get_data_path_id) { + if (hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED)) + flags = BIT(0); + else + flags = 0; + + memcpy(rp->features[idx].uuid, offload_codecs_uuid, 16); + rp->features[idx].flags = cpu_to_le32(flags); + idx++; + } + + if (IS_ENABLED(CONFIG_BT_LE)) { + flags = iso_enabled() ? BIT(0) : 0; + memcpy(rp->features[idx].uuid, iso_socket_uuid, 16); + rp->features[idx].flags = cpu_to_le32(flags); + idx++; + } + + if (hdev && lmp_le_capable(hdev)) { + if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL)) + flags = BIT(0); + else + flags = 0; + + memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16); + rp->features[idx].flags = cpu_to_le32(flags); + idx++; + } + + rp->feature_count = cpu_to_le16(idx); + + /* After reading the experimental features information, enable + * the events to update client on any future change. + */ + hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS); + + status = mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE, + MGMT_OP_READ_EXP_FEATURES_INFO, + 0, rp, sizeof(*rp) + (20 * idx)); + + kfree(rp); + return status; +} + +static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev, + struct sock *skip) +{ + struct mgmt_ev_exp_feature_changed ev; + + memset(&ev, 0, sizeof(ev)); + memcpy(ev.uuid, rpa_resolution_uuid, 16); + ev.flags = cpu_to_le32((enabled ? BIT(0) : 0) | BIT(1)); + + // Do we need to be atomic with the conn_flags? + if (enabled && privacy_mode_capable(hdev)) + hdev->conn_flags |= HCI_CONN_FLAG_DEVICE_PRIVACY; + else + hdev->conn_flags &= ~HCI_CONN_FLAG_DEVICE_PRIVACY; + + return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev, + &ev, sizeof(ev), + HCI_MGMT_EXP_FEATURE_EVENTS, skip); + +} + +static int exp_feature_changed(struct hci_dev *hdev, const u8 *uuid, + bool enabled, struct sock *skip) +{ + struct mgmt_ev_exp_feature_changed ev; + + memset(&ev, 0, sizeof(ev)); + memcpy(ev.uuid, uuid, 16); + ev.flags = cpu_to_le32(enabled ? BIT(0) : 0); + + return mgmt_limited_event(MGMT_EV_EXP_FEATURE_CHANGED, hdev, + &ev, sizeof(ev), + HCI_MGMT_EXP_FEATURE_EVENTS, skip); +} + +#define EXP_FEAT(_uuid, _set_func) \ +{ \ + .uuid = _uuid, \ + .set_func = _set_func, \ +} + +/* The zero key uuid is special. Multiple exp features are set through it. */ +static int set_zero_key_func(struct sock *sk, struct hci_dev *hdev, + struct mgmt_cp_set_exp_feature *cp, u16 data_len) +{ + struct mgmt_rp_set_exp_feature rp; + + memset(rp.uuid, 0, 16); + rp.flags = cpu_to_le32(0); + +#ifdef CONFIG_BT_FEATURE_DEBUG + if (!hdev) { + bool changed = bt_dbg_get(); + + bt_dbg_set(false); + + if (changed) + exp_feature_changed(NULL, ZERO_KEY, false, sk); + } +#endif + + if (hdev && use_ll_privacy(hdev) && !hdev_is_powered(hdev)) { + bool changed; + + changed = hci_dev_test_and_clear_flag(hdev, + HCI_ENABLE_LL_PRIVACY); + if (changed) + exp_feature_changed(hdev, rpa_resolution_uuid, false, + sk); + } + + hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS); + + return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE, + MGMT_OP_SET_EXP_FEATURE, 0, + &rp, sizeof(rp)); +} + +#ifdef CONFIG_BT_FEATURE_DEBUG +static int set_debug_func(struct sock *sk, struct hci_dev *hdev, + struct mgmt_cp_set_exp_feature *cp, u16 data_len) +{ + struct mgmt_rp_set_exp_feature rp; + + bool val, changed; + int err; + + /* Command requires to use the non-controller index */ + if (hdev) + return mgmt_cmd_status(sk, hdev->id, + MGMT_OP_SET_EXP_FEATURE, + MGMT_STATUS_INVALID_INDEX); + + /* Parameters are limited to a single octet */ + if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1) + return mgmt_cmd_status(sk, MGMT_INDEX_NONE, + MGMT_OP_SET_EXP_FEATURE, + MGMT_STATUS_INVALID_PARAMS); + + /* Only boolean on/off is supported */ + if (cp->param[0] != 0x00 && cp->param[0] != 0x01) + return mgmt_cmd_status(sk, MGMT_INDEX_NONE, + MGMT_OP_SET_EXP_FEATURE, + MGMT_STATUS_INVALID_PARAMS); + + val = !!cp->param[0]; + changed = val ? !bt_dbg_get() : bt_dbg_get(); + bt_dbg_set(val); + + memcpy(rp.uuid, debug_uuid, 16); + rp.flags = cpu_to_le32(val ? BIT(0) : 0); + + hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS); + + err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, + MGMT_OP_SET_EXP_FEATURE, 0, + &rp, sizeof(rp)); + + if (changed) + exp_feature_changed(hdev, debug_uuid, val, sk); + + return err; +} +#endif + +static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev, + struct mgmt_cp_set_exp_feature *cp, u16 data_len) +{ + struct mgmt_rp_set_exp_feature rp; + bool val, changed; + int err; + + /* Command requires to use the controller index */ + if (!hdev) + return mgmt_cmd_status(sk, MGMT_INDEX_NONE, + MGMT_OP_SET_EXP_FEATURE, + MGMT_STATUS_INVALID_INDEX); + + /* Parameters are limited to a single octet */ + if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1) + return mgmt_cmd_status(sk, hdev->id, + MGMT_OP_SET_EXP_FEATURE, + MGMT_STATUS_INVALID_PARAMS); + + /* Only boolean on/off is supported */ + if (cp->param[0] != 0x00 && cp->param[0] != 0x01) + return mgmt_cmd_status(sk, hdev->id, + MGMT_OP_SET_EXP_FEATURE, + MGMT_STATUS_INVALID_PARAMS); + + val = !!cp->param[0]; + + if (val) { + changed = !hci_dev_test_and_set_flag(hdev, + HCI_MESH_EXPERIMENTAL); + } else { + hci_dev_clear_flag(hdev, HCI_MESH); + changed = hci_dev_test_and_clear_flag(hdev, + HCI_MESH_EXPERIMENTAL); + } + + memcpy(rp.uuid, mgmt_mesh_uuid, 16); + rp.flags = cpu_to_le32(val ? BIT(0) : 0); + + hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS); + + err = mgmt_cmd_complete(sk, hdev->id, + MGMT_OP_SET_EXP_FEATURE, 0, + &rp, sizeof(rp)); + + if (changed) + exp_feature_changed(hdev, mgmt_mesh_uuid, val, sk); + + return err; +} + +static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev, + struct mgmt_cp_set_exp_feature *cp, + u16 data_len) +{ + struct mgmt_rp_set_exp_feature rp; + bool val, changed; + int err; + u32 flags; + + /* Command requires to use the controller index */ + if (!hdev) + return mgmt_cmd_status(sk, MGMT_INDEX_NONE, + MGMT_OP_SET_EXP_FEATURE, + MGMT_STATUS_INVALID_INDEX); + + /* Changes can only be made when controller is powered down */ + if (hdev_is_powered(hdev)) + return mgmt_cmd_status(sk, hdev->id, + MGMT_OP_SET_EXP_FEATURE, + MGMT_STATUS_REJECTED); + + /* Parameters are limited to a single octet */ + if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1) + return mgmt_cmd_status(sk, hdev->id, + MGMT_OP_SET_EXP_FEATURE, + MGMT_STATUS_INVALID_PARAMS); + + /* Only boolean on/off is supported */ + if (cp->param[0] != 0x00 && cp->param[0] != 0x01) + return mgmt_cmd_status(sk, hdev->id, + MGMT_OP_SET_EXP_FEATURE, + MGMT_STATUS_INVALID_PARAMS); + + val = !!cp->param[0]; + + if (val) { + changed = !hci_dev_test_and_set_flag(hdev, + HCI_ENABLE_LL_PRIVACY); + hci_dev_clear_flag(hdev, HCI_ADVERTISING); + + /* Enable LL privacy + supported settings changed */ + flags = BIT(0) | BIT(1); + } else { + changed = hci_dev_test_and_clear_flag(hdev, + HCI_ENABLE_LL_PRIVACY); + + /* Disable LL privacy + supported settings changed */ + flags = BIT(1); + } + + memcpy(rp.uuid, rpa_resolution_uuid, 16); + rp.flags = cpu_to_le32(flags); + + hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS); + + err = mgmt_cmd_complete(sk, hdev->id, + MGMT_OP_SET_EXP_FEATURE, 0, + &rp, sizeof(rp)); + + if (changed) + exp_ll_privacy_feature_changed(val, hdev, sk); + + return err; +} + +static int set_quality_report_func(struct sock *sk, struct hci_dev *hdev, + struct mgmt_cp_set_exp_feature *cp, + u16 data_len) +{ + struct mgmt_rp_set_exp_feature rp; + bool val, changed; + int err; + + /* Command requires to use a valid controller index */ + if (!hdev) + return mgmt_cmd_status(sk, MGMT_INDEX_NONE, + MGMT_OP_SET_EXP_FEATURE, + MGMT_STATUS_INVALID_INDEX); + + /* Parameters are limited to a single octet */ + if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1) + return mgmt_cmd_status(sk, hdev->id, + MGMT_OP_SET_EXP_FEATURE, + MGMT_STATUS_INVALID_PARAMS); + + /* Only boolean on/off is supported */ + if (cp->param[0] != 0x00 && cp->param[0] != 0x01) + return mgmt_cmd_status(sk, hdev->id, + MGMT_OP_SET_EXP_FEATURE, + MGMT_STATUS_INVALID_PARAMS); + + hci_req_sync_lock(hdev); + + val = !!cp->param[0]; + changed = (val != hci_dev_test_flag(hdev, HCI_QUALITY_REPORT)); + + if (!aosp_has_quality_report(hdev) && !hdev->set_quality_report) { + err = mgmt_cmd_status(sk, hdev->id, + MGMT_OP_SET_EXP_FEATURE, + MGMT_STATUS_NOT_SUPPORTED); + goto unlock_quality_report; + } + + if (changed) { + if (hdev->set_quality_report) + err = hdev->set_quality_report(hdev, val); + else + err = aosp_set_quality_report(hdev, val); + + if (err) { + err = mgmt_cmd_status(sk, hdev->id, + MGMT_OP_SET_EXP_FEATURE, + MGMT_STATUS_FAILED); + goto unlock_quality_report; + } + + if (val) + hci_dev_set_flag(hdev, HCI_QUALITY_REPORT); + else + hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT); + } + + bt_dev_dbg(hdev, "quality report enable %d changed %d", val, changed); + + memcpy(rp.uuid, quality_report_uuid, 16); + rp.flags = cpu_to_le32(val ? BIT(0) : 0); + hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS); + + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_EXP_FEATURE, 0, + &rp, sizeof(rp)); + + if (changed) + exp_feature_changed(hdev, quality_report_uuid, val, sk); + +unlock_quality_report: + hci_req_sync_unlock(hdev); + return err; +} + +static int set_offload_codec_func(struct sock *sk, struct hci_dev *hdev, + struct mgmt_cp_set_exp_feature *cp, + u16 data_len) +{ + bool val, changed; + int err; + struct mgmt_rp_set_exp_feature rp; + + /* Command requires to use a valid controller index */ + if (!hdev) + return mgmt_cmd_status(sk, MGMT_INDEX_NONE, + MGMT_OP_SET_EXP_FEATURE, + MGMT_STATUS_INVALID_INDEX); + + /* Parameters are limited to a single octet */ + if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1) + return mgmt_cmd_status(sk, hdev->id, + MGMT_OP_SET_EXP_FEATURE, + MGMT_STATUS_INVALID_PARAMS); + + /* Only boolean on/off is supported */ + if (cp->param[0] != 0x00 && cp->param[0] != 0x01) + return mgmt_cmd_status(sk, hdev->id, + MGMT_OP_SET_EXP_FEATURE, + MGMT_STATUS_INVALID_PARAMS); + + val = !!cp->param[0]; + changed = (val != hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED)); + + if (!hdev->get_data_path_id) { + return mgmt_cmd_status(sk, hdev->id, + MGMT_OP_SET_EXP_FEATURE, + MGMT_STATUS_NOT_SUPPORTED); + } + + if (changed) { + if (val) + hci_dev_set_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED); + else + hci_dev_clear_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED); + } + + bt_dev_info(hdev, "offload codecs enable %d changed %d", + val, changed); + + memcpy(rp.uuid, offload_codecs_uuid, 16); + rp.flags = cpu_to_le32(val ? BIT(0) : 0); + hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS); + err = mgmt_cmd_complete(sk, hdev->id, + MGMT_OP_SET_EXP_FEATURE, 0, + &rp, sizeof(rp)); + + if (changed) + exp_feature_changed(hdev, offload_codecs_uuid, val, sk); + + return err; +} + +static int set_le_simultaneous_roles_func(struct sock *sk, struct hci_dev *hdev, + struct mgmt_cp_set_exp_feature *cp, + u16 data_len) +{ + bool val, changed; + int err; + struct mgmt_rp_set_exp_feature rp; + + /* Command requires to use a valid controller index */ + if (!hdev) + return mgmt_cmd_status(sk, MGMT_INDEX_NONE, + MGMT_OP_SET_EXP_FEATURE, + MGMT_STATUS_INVALID_INDEX); + + /* Parameters are limited to a single octet */ + if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1) + return mgmt_cmd_status(sk, hdev->id, + MGMT_OP_SET_EXP_FEATURE, + MGMT_STATUS_INVALID_PARAMS); + + /* Only boolean on/off is supported */ + if (cp->param[0] != 0x00 && cp->param[0] != 0x01) + return mgmt_cmd_status(sk, hdev->id, + MGMT_OP_SET_EXP_FEATURE, + MGMT_STATUS_INVALID_PARAMS); + + val = !!cp->param[0]; + changed = (val != hci_dev_test_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES)); + + if (!hci_dev_le_state_simultaneous(hdev)) { + return mgmt_cmd_status(sk, hdev->id, + MGMT_OP_SET_EXP_FEATURE, + MGMT_STATUS_NOT_SUPPORTED); + } + + if (changed) { + if (val) + hci_dev_set_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES); + else + hci_dev_clear_flag(hdev, HCI_LE_SIMULTANEOUS_ROLES); + } + + bt_dev_info(hdev, "LE simultaneous roles enable %d changed %d", + val, changed); + + memcpy(rp.uuid, le_simultaneous_roles_uuid, 16); + rp.flags = cpu_to_le32(val ? BIT(0) : 0); + hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS); + err = mgmt_cmd_complete(sk, hdev->id, + MGMT_OP_SET_EXP_FEATURE, 0, + &rp, sizeof(rp)); + + if (changed) + exp_feature_changed(hdev, le_simultaneous_roles_uuid, val, sk); + + return err; +} + +#ifdef CONFIG_BT_LE +static int set_iso_socket_func(struct sock *sk, struct hci_dev *hdev, + struct mgmt_cp_set_exp_feature *cp, u16 data_len) +{ + struct mgmt_rp_set_exp_feature rp; + bool val, changed = false; + int err; + + /* Command requires to use the non-controller index */ + if (hdev) + return mgmt_cmd_status(sk, hdev->id, + MGMT_OP_SET_EXP_FEATURE, + MGMT_STATUS_INVALID_INDEX); + + /* Parameters are limited to a single octet */ + if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1) + return mgmt_cmd_status(sk, MGMT_INDEX_NONE, + MGMT_OP_SET_EXP_FEATURE, + MGMT_STATUS_INVALID_PARAMS); + + /* Only boolean on/off is supported */ + if (cp->param[0] != 0x00 && cp->param[0] != 0x01) + return mgmt_cmd_status(sk, MGMT_INDEX_NONE, + MGMT_OP_SET_EXP_FEATURE, + MGMT_STATUS_INVALID_PARAMS); + + val = cp->param[0] ? true : false; + if (val) + err = iso_init(); + else + err = iso_exit(); + + if (!err) + changed = true; + + memcpy(rp.uuid, iso_socket_uuid, 16); + rp.flags = cpu_to_le32(val ? BIT(0) : 0); + + hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS); + + err = mgmt_cmd_complete(sk, MGMT_INDEX_NONE, + MGMT_OP_SET_EXP_FEATURE, 0, + &rp, sizeof(rp)); + + if (changed) + exp_feature_changed(hdev, iso_socket_uuid, val, sk); + + return err; +} +#endif + +static const struct mgmt_exp_feature { + const u8 *uuid; + int (*set_func)(struct sock *sk, struct hci_dev *hdev, + struct mgmt_cp_set_exp_feature *cp, u16 data_len); +} exp_features[] = { + EXP_FEAT(ZERO_KEY, set_zero_key_func), +#ifdef CONFIG_BT_FEATURE_DEBUG + EXP_FEAT(debug_uuid, set_debug_func), +#endif + EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func), + EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func), + EXP_FEAT(quality_report_uuid, set_quality_report_func), + EXP_FEAT(offload_codecs_uuid, set_offload_codec_func), + EXP_FEAT(le_simultaneous_roles_uuid, set_le_simultaneous_roles_func), +#ifdef CONFIG_BT_LE + EXP_FEAT(iso_socket_uuid, set_iso_socket_func), +#endif + + /* end with a null feature */ + EXP_FEAT(NULL, NULL) +}; + +static int set_exp_feature(struct sock *sk, struct hci_dev *hdev, + void *data, u16 data_len) +{ + struct mgmt_cp_set_exp_feature *cp = data; + size_t i = 0; + + bt_dev_dbg(hdev, "sock %p", sk); + + for (i = 0; exp_features[i].uuid; i++) { + if (!memcmp(cp->uuid, exp_features[i].uuid, 16)) + return exp_features[i].set_func(sk, hdev, cp, data_len); + } + + return mgmt_cmd_status(sk, hdev ? hdev->id : MGMT_INDEX_NONE, + MGMT_OP_SET_EXP_FEATURE, + MGMT_STATUS_NOT_SUPPORTED); +} + +static u32 get_params_flags(struct hci_dev *hdev, + struct hci_conn_params *params) +{ + u32 flags = hdev->conn_flags; + + /* Devices using RPAs can only be programmed in the acceptlist if + * LL Privacy has been enable otherwise they cannot mark + * HCI_CONN_FLAG_REMOTE_WAKEUP. + */ + if ((flags & HCI_CONN_FLAG_REMOTE_WAKEUP) && !use_ll_privacy(hdev) && + hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type)) + flags &= ~HCI_CONN_FLAG_REMOTE_WAKEUP; + + return flags; +} + +static int get_device_flags(struct sock *sk, struct hci_dev *hdev, void *data, + u16 data_len) +{ + struct mgmt_cp_get_device_flags *cp = data; + struct mgmt_rp_get_device_flags rp; + struct bdaddr_list_with_flags *br_params; + struct hci_conn_params *params; + u32 supported_flags; + u32 current_flags = 0; + u8 status = MGMT_STATUS_INVALID_PARAMS; + + bt_dev_dbg(hdev, "Get device flags %pMR (type 0x%x)\n", + &cp->addr.bdaddr, cp->addr.type); + + hci_dev_lock(hdev); + + supported_flags = hdev->conn_flags; + + memset(&rp, 0, sizeof(rp)); + + if (cp->addr.type == BDADDR_BREDR) { + br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, + &cp->addr.bdaddr, + cp->addr.type); + if (!br_params) + goto done; + + current_flags = br_params->flags; + } else { + params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, + le_addr_type(cp->addr.type)); + if (!params) + goto done; + + supported_flags = get_params_flags(hdev, params); + current_flags = params->flags; + } + + bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr); + rp.addr.type = cp->addr.type; + rp.supported_flags = cpu_to_le32(supported_flags); + rp.current_flags = cpu_to_le32(current_flags); + + status = MGMT_STATUS_SUCCESS; + +done: + hci_dev_unlock(hdev); + + return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_DEVICE_FLAGS, status, + &rp, sizeof(rp)); +} + +static void device_flags_changed(struct sock *sk, struct hci_dev *hdev, + bdaddr_t *bdaddr, u8 bdaddr_type, + u32 supported_flags, u32 current_flags) +{ + struct mgmt_ev_device_flags_changed ev; + + bacpy(&ev.addr.bdaddr, bdaddr); + ev.addr.type = bdaddr_type; + ev.supported_flags = cpu_to_le32(supported_flags); + ev.current_flags = cpu_to_le32(current_flags); + + mgmt_event(MGMT_EV_DEVICE_FLAGS_CHANGED, hdev, &ev, sizeof(ev), sk); +} + +static int set_device_flags(struct sock *sk, struct hci_dev *hdev, void *data, + u16 len) +{ + struct mgmt_cp_set_device_flags *cp = data; + struct bdaddr_list_with_flags *br_params; + struct hci_conn_params *params; + u8 status = MGMT_STATUS_INVALID_PARAMS; + u32 supported_flags; + u32 current_flags = __le32_to_cpu(cp->current_flags); + + bt_dev_dbg(hdev, "Set device flags %pMR (type 0x%x) = 0x%x", + &cp->addr.bdaddr, cp->addr.type, current_flags); + + // We should take hci_dev_lock() early, I think.. conn_flags can change + supported_flags = hdev->conn_flags; + + if ((supported_flags | current_flags) != supported_flags) { + bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)", + current_flags, supported_flags); + goto done; + } + + hci_dev_lock(hdev); + + if (cp->addr.type == BDADDR_BREDR) { + br_params = hci_bdaddr_list_lookup_with_flags(&hdev->accept_list, + &cp->addr.bdaddr, + cp->addr.type); + + if (br_params) { + br_params->flags = current_flags; + status = MGMT_STATUS_SUCCESS; + } else { + bt_dev_warn(hdev, "No such BR/EDR device %pMR (0x%x)", + &cp->addr.bdaddr, cp->addr.type); + } + + goto unlock; + } + + params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, + le_addr_type(cp->addr.type)); + if (!params) { + bt_dev_warn(hdev, "No such LE device %pMR (0x%x)", + &cp->addr.bdaddr, le_addr_type(cp->addr.type)); + goto unlock; + } + + supported_flags = get_params_flags(hdev, params); + + if ((supported_flags | current_flags) != supported_flags) { + bt_dev_warn(hdev, "Bad flag given (0x%x) vs supported (0x%0x)", + current_flags, supported_flags); + goto unlock; + } + + WRITE_ONCE(params->flags, current_flags); + status = MGMT_STATUS_SUCCESS; + + /* Update passive scan if HCI_CONN_FLAG_DEVICE_PRIVACY + * has been set. + */ + if (params->flags & HCI_CONN_FLAG_DEVICE_PRIVACY) + hci_update_passive_scan(hdev); + +unlock: + hci_dev_unlock(hdev); + +done: + if (status == MGMT_STATUS_SUCCESS) + device_flags_changed(sk, hdev, &cp->addr.bdaddr, cp->addr.type, + supported_flags, current_flags); + + return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_FLAGS, status, + &cp->addr, sizeof(cp->addr)); +} + +static void mgmt_adv_monitor_added(struct sock *sk, struct hci_dev *hdev, + u16 handle) +{ + struct mgmt_ev_adv_monitor_added ev; + + ev.monitor_handle = cpu_to_le16(handle); + + mgmt_event(MGMT_EV_ADV_MONITOR_ADDED, hdev, &ev, sizeof(ev), sk); +} + +void mgmt_adv_monitor_removed(struct hci_dev *hdev, u16 handle) +{ + struct mgmt_ev_adv_monitor_removed ev; + struct mgmt_pending_cmd *cmd; + struct sock *sk_skip = NULL; + struct mgmt_cp_remove_adv_monitor *cp; + + cmd = pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev); + if (cmd) { + cp = cmd->param; + + if (cp->monitor_handle) + sk_skip = cmd->sk; + } + + ev.monitor_handle = cpu_to_le16(handle); + + mgmt_event(MGMT_EV_ADV_MONITOR_REMOVED, hdev, &ev, sizeof(ev), sk_skip); +} + +static int read_adv_mon_features(struct sock *sk, struct hci_dev *hdev, + void *data, u16 len) +{ + struct adv_monitor *monitor = NULL; + struct mgmt_rp_read_adv_monitor_features *rp = NULL; + int handle, err; + size_t rp_size = 0; + __u32 supported = 0; + __u32 enabled = 0; + __u16 num_handles = 0; + __u16 handles[HCI_MAX_ADV_MONITOR_NUM_HANDLES]; + + BT_DBG("request for %s", hdev->name); + + hci_dev_lock(hdev); + + if (msft_monitor_supported(hdev)) + supported |= MGMT_ADV_MONITOR_FEATURE_MASK_OR_PATTERNS; + + idr_for_each_entry(&hdev->adv_monitors_idr, monitor, handle) + handles[num_handles++] = monitor->handle; + + hci_dev_unlock(hdev); + + rp_size = sizeof(*rp) + (num_handles * sizeof(u16)); + rp = kmalloc(rp_size, GFP_KERNEL); + if (!rp) + return -ENOMEM; + + /* All supported features are currently enabled */ + enabled = supported; + + rp->supported_features = cpu_to_le32(supported); + rp->enabled_features = cpu_to_le32(enabled); + rp->max_num_handles = cpu_to_le16(HCI_MAX_ADV_MONITOR_NUM_HANDLES); + rp->max_num_patterns = HCI_MAX_ADV_MONITOR_NUM_PATTERNS; + rp->num_handles = cpu_to_le16(num_handles); + if (num_handles) + memcpy(&rp->handles, &handles, (num_handles * sizeof(u16))); + + err = mgmt_cmd_complete(sk, hdev->id, + MGMT_OP_READ_ADV_MONITOR_FEATURES, + MGMT_STATUS_SUCCESS, rp, rp_size); + + kfree(rp); + + return err; +} + +static void mgmt_add_adv_patterns_monitor_complete(struct hci_dev *hdev, + void *data, int status) +{ + struct mgmt_rp_add_adv_patterns_monitor rp; + struct mgmt_pending_cmd *cmd = data; + struct adv_monitor *monitor = cmd->user_data; + + hci_dev_lock(hdev); + + rp.monitor_handle = cpu_to_le16(monitor->handle); + + if (!status) { + mgmt_adv_monitor_added(cmd->sk, hdev, monitor->handle); + hdev->adv_monitors_cnt++; + if (monitor->state == ADV_MONITOR_STATE_NOT_REGISTERED) + monitor->state = ADV_MONITOR_STATE_REGISTERED; + hci_update_passive_scan(hdev); + } + + mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, + mgmt_status(status), &rp, sizeof(rp)); + mgmt_pending_remove(cmd); + + hci_dev_unlock(hdev); + bt_dev_dbg(hdev, "add monitor %d complete, status %d", + rp.monitor_handle, status); +} + +static int mgmt_add_adv_patterns_monitor_sync(struct hci_dev *hdev, void *data) +{ + struct mgmt_pending_cmd *cmd = data; + struct adv_monitor *monitor = cmd->user_data; + + return hci_add_adv_monitor(hdev, monitor); +} + +static int __add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev, + struct adv_monitor *m, u8 status, + void *data, u16 len, u16 op) +{ + struct mgmt_pending_cmd *cmd; + int err; + + hci_dev_lock(hdev); + + if (status) + goto unlock; + + if (pending_find(MGMT_OP_SET_LE, hdev) || + pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) || + pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev) || + pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev)) { + status = MGMT_STATUS_BUSY; + goto unlock; + } + + cmd = mgmt_pending_add(sk, op, hdev, data, len); + if (!cmd) { + status = MGMT_STATUS_NO_RESOURCES; + goto unlock; + } + + cmd->user_data = m; + err = hci_cmd_sync_queue(hdev, mgmt_add_adv_patterns_monitor_sync, cmd, + mgmt_add_adv_patterns_monitor_complete); + if (err) { + if (err == -ENOMEM) + status = MGMT_STATUS_NO_RESOURCES; + else + status = MGMT_STATUS_FAILED; + + goto unlock; + } + + hci_dev_unlock(hdev); + + return 0; + +unlock: + hci_free_adv_monitor(hdev, m); + hci_dev_unlock(hdev); + return mgmt_cmd_status(sk, hdev->id, op, status); +} + +static void parse_adv_monitor_rssi(struct adv_monitor *m, + struct mgmt_adv_rssi_thresholds *rssi) +{ + if (rssi) { + m->rssi.low_threshold = rssi->low_threshold; + m->rssi.low_threshold_timeout = + __le16_to_cpu(rssi->low_threshold_timeout); + m->rssi.high_threshold = rssi->high_threshold; + m->rssi.high_threshold_timeout = + __le16_to_cpu(rssi->high_threshold_timeout); + m->rssi.sampling_period = rssi->sampling_period; + } else { + /* Default values. These numbers are the least constricting + * parameters for MSFT API to work, so it behaves as if there + * are no rssi parameter to consider. May need to be changed + * if other API are to be supported. + */ + m->rssi.low_threshold = -127; + m->rssi.low_threshold_timeout = 60; + m->rssi.high_threshold = -127; + m->rssi.high_threshold_timeout = 0; + m->rssi.sampling_period = 0; + } +} + +static u8 parse_adv_monitor_pattern(struct adv_monitor *m, u8 pattern_count, + struct mgmt_adv_pattern *patterns) +{ + u8 offset = 0, length = 0; + struct adv_pattern *p = NULL; + int i; + + for (i = 0; i < pattern_count; i++) { + offset = patterns[i].offset; + length = patterns[i].length; + if (offset >= HCI_MAX_AD_LENGTH || + length > HCI_MAX_AD_LENGTH || + (offset + length) > HCI_MAX_AD_LENGTH) + return MGMT_STATUS_INVALID_PARAMS; + + p = kmalloc(sizeof(*p), GFP_KERNEL); + if (!p) + return MGMT_STATUS_NO_RESOURCES; + + p->ad_type = patterns[i].ad_type; + p->offset = patterns[i].offset; + p->length = patterns[i].length; + memcpy(p->value, patterns[i].value, p->length); + + INIT_LIST_HEAD(&p->list); + list_add(&p->list, &m->patterns); + } + + return MGMT_STATUS_SUCCESS; +} + +static int add_adv_patterns_monitor(struct sock *sk, struct hci_dev *hdev, + void *data, u16 len) +{ + struct mgmt_cp_add_adv_patterns_monitor *cp = data; + struct adv_monitor *m = NULL; + u8 status = MGMT_STATUS_SUCCESS; + size_t expected_size = sizeof(*cp); + + BT_DBG("request for %s", hdev->name); + + if (len <= sizeof(*cp)) { + status = MGMT_STATUS_INVALID_PARAMS; + goto done; + } + + expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern); + if (len != expected_size) { + status = MGMT_STATUS_INVALID_PARAMS; + goto done; + } + + m = kzalloc(sizeof(*m), GFP_KERNEL); + if (!m) { + status = MGMT_STATUS_NO_RESOURCES; + goto done; + } + + INIT_LIST_HEAD(&m->patterns); + + parse_adv_monitor_rssi(m, NULL); + status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns); + +done: + return __add_adv_patterns_monitor(sk, hdev, m, status, data, len, + MGMT_OP_ADD_ADV_PATTERNS_MONITOR); +} + +static int add_adv_patterns_monitor_rssi(struct sock *sk, struct hci_dev *hdev, + void *data, u16 len) +{ + struct mgmt_cp_add_adv_patterns_monitor_rssi *cp = data; + struct adv_monitor *m = NULL; + u8 status = MGMT_STATUS_SUCCESS; + size_t expected_size = sizeof(*cp); + + BT_DBG("request for %s", hdev->name); + + if (len <= sizeof(*cp)) { + status = MGMT_STATUS_INVALID_PARAMS; + goto done; + } + + expected_size += cp->pattern_count * sizeof(struct mgmt_adv_pattern); + if (len != expected_size) { + status = MGMT_STATUS_INVALID_PARAMS; + goto done; + } + + m = kzalloc(sizeof(*m), GFP_KERNEL); + if (!m) { + status = MGMT_STATUS_NO_RESOURCES; + goto done; + } + + INIT_LIST_HEAD(&m->patterns); + + parse_adv_monitor_rssi(m, &cp->rssi); + status = parse_adv_monitor_pattern(m, cp->pattern_count, cp->patterns); + +done: + return __add_adv_patterns_monitor(sk, hdev, m, status, data, len, + MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI); +} + +static void mgmt_remove_adv_monitor_complete(struct hci_dev *hdev, + void *data, int status) +{ + struct mgmt_rp_remove_adv_monitor rp; + struct mgmt_pending_cmd *cmd = data; + struct mgmt_cp_remove_adv_monitor *cp = cmd->param; + + hci_dev_lock(hdev); + + rp.monitor_handle = cp->monitor_handle; + + if (!status) + hci_update_passive_scan(hdev); + + mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, + mgmt_status(status), &rp, sizeof(rp)); + mgmt_pending_remove(cmd); + + hci_dev_unlock(hdev); + bt_dev_dbg(hdev, "remove monitor %d complete, status %d", + rp.monitor_handle, status); +} + +static int mgmt_remove_adv_monitor_sync(struct hci_dev *hdev, void *data) +{ + struct mgmt_pending_cmd *cmd = data; + struct mgmt_cp_remove_adv_monitor *cp = cmd->param; + u16 handle = __le16_to_cpu(cp->monitor_handle); + + if (!handle) + return hci_remove_all_adv_monitor(hdev); + + return hci_remove_single_adv_monitor(hdev, handle); +} + +static int remove_adv_monitor(struct sock *sk, struct hci_dev *hdev, + void *data, u16 len) +{ + struct mgmt_pending_cmd *cmd; + int err, status; + + hci_dev_lock(hdev); + + if (pending_find(MGMT_OP_SET_LE, hdev) || + pending_find(MGMT_OP_REMOVE_ADV_MONITOR, hdev) || + pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR, hdev) || + pending_find(MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, hdev)) { + status = MGMT_STATUS_BUSY; + goto unlock; + } + + cmd = mgmt_pending_add(sk, MGMT_OP_REMOVE_ADV_MONITOR, hdev, data, len); + if (!cmd) { + status = MGMT_STATUS_NO_RESOURCES; + goto unlock; + } + + err = hci_cmd_sync_queue(hdev, mgmt_remove_adv_monitor_sync, cmd, + mgmt_remove_adv_monitor_complete); + + if (err) { + mgmt_pending_remove(cmd); + + if (err == -ENOMEM) + status = MGMT_STATUS_NO_RESOURCES; + else + status = MGMT_STATUS_FAILED; + + goto unlock; + } + + hci_dev_unlock(hdev); + + return 0; + +unlock: + hci_dev_unlock(hdev); + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADV_MONITOR, + status); +} + +static void read_local_oob_data_complete(struct hci_dev *hdev, void *data, int err) +{ + struct mgmt_rp_read_local_oob_data mgmt_rp; + size_t rp_size = sizeof(mgmt_rp); + struct mgmt_pending_cmd *cmd = data; + struct sk_buff *skb = cmd->skb; + u8 status = mgmt_status(err); + + if (!status) { + if (!skb) + status = MGMT_STATUS_FAILED; + else if (IS_ERR(skb)) + status = mgmt_status(PTR_ERR(skb)); + else + status = mgmt_status(skb->data[0]); + } + + bt_dev_dbg(hdev, "status %d", status); + + if (status) { + mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, status); + goto remove; + } + + memset(&mgmt_rp, 0, sizeof(mgmt_rp)); + + if (!bredr_sc_enabled(hdev)) { + struct hci_rp_read_local_oob_data *rp = (void *) skb->data; + + if (skb->len < sizeof(*rp)) { + mgmt_cmd_status(cmd->sk, hdev->id, + MGMT_OP_READ_LOCAL_OOB_DATA, + MGMT_STATUS_FAILED); + goto remove; + } + + memcpy(mgmt_rp.hash192, rp->hash, sizeof(rp->hash)); + memcpy(mgmt_rp.rand192, rp->rand, sizeof(rp->rand)); + + rp_size -= sizeof(mgmt_rp.hash256) + sizeof(mgmt_rp.rand256); + } else { + struct hci_rp_read_local_oob_ext_data *rp = (void *) skb->data; + + if (skb->len < sizeof(*rp)) { + mgmt_cmd_status(cmd->sk, hdev->id, + MGMT_OP_READ_LOCAL_OOB_DATA, + MGMT_STATUS_FAILED); + goto remove; + } + + memcpy(mgmt_rp.hash192, rp->hash192, sizeof(rp->hash192)); + memcpy(mgmt_rp.rand192, rp->rand192, sizeof(rp->rand192)); + + memcpy(mgmt_rp.hash256, rp->hash256, sizeof(rp->hash256)); + memcpy(mgmt_rp.rand256, rp->rand256, sizeof(rp->rand256)); + } + + mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, + MGMT_STATUS_SUCCESS, &mgmt_rp, rp_size); + +remove: + if (skb && !IS_ERR(skb)) + kfree_skb(skb); + + mgmt_pending_free(cmd); +} + +static int read_local_oob_data_sync(struct hci_dev *hdev, void *data) +{ + struct mgmt_pending_cmd *cmd = data; + + if (bredr_sc_enabled(hdev)) + cmd->skb = hci_read_local_oob_data_sync(hdev, true, cmd->sk); + else + cmd->skb = hci_read_local_oob_data_sync(hdev, false, cmd->sk); + + if (IS_ERR(cmd->skb)) + return PTR_ERR(cmd->skb); + else + return 0; +} + +static int read_local_oob_data(struct sock *sk, struct hci_dev *hdev, + void *data, u16 data_len) +{ + struct mgmt_pending_cmd *cmd; + int err; + + bt_dev_dbg(hdev, "sock %p", sk); + + hci_dev_lock(hdev); + + if (!hdev_is_powered(hdev)) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, + MGMT_STATUS_NOT_POWERED); + goto unlock; + } + + if (!lmp_ssp_capable(hdev)) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, + MGMT_STATUS_NOT_SUPPORTED); + goto unlock; + } + + cmd = mgmt_pending_new(sk, MGMT_OP_READ_LOCAL_OOB_DATA, hdev, NULL, 0); + if (!cmd) + err = -ENOMEM; + else + err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd, + read_local_oob_data_complete); + + if (err < 0) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_DATA, + MGMT_STATUS_FAILED); + + if (cmd) + mgmt_pending_free(cmd); + } + +unlock: + hci_dev_unlock(hdev); + return err; +} + +static int add_remote_oob_data(struct sock *sk, struct hci_dev *hdev, + void *data, u16 len) +{ + struct mgmt_addr_info *addr = data; + int err; + + bt_dev_dbg(hdev, "sock %p", sk); + + if (!bdaddr_type_is_valid(addr->type)) + return mgmt_cmd_complete(sk, hdev->id, + MGMT_OP_ADD_REMOTE_OOB_DATA, + MGMT_STATUS_INVALID_PARAMS, + addr, sizeof(*addr)); + + hci_dev_lock(hdev); + + if (len == MGMT_ADD_REMOTE_OOB_DATA_SIZE) { + struct mgmt_cp_add_remote_oob_data *cp = data; + u8 status; + + if (cp->addr.type != BDADDR_BREDR) { + err = mgmt_cmd_complete(sk, hdev->id, + MGMT_OP_ADD_REMOTE_OOB_DATA, + MGMT_STATUS_INVALID_PARAMS, + &cp->addr, sizeof(cp->addr)); + goto unlock; + } + + err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr, + cp->addr.type, cp->hash, + cp->rand, NULL, NULL); + if (err < 0) + status = MGMT_STATUS_FAILED; + else + status = MGMT_STATUS_SUCCESS; + + err = mgmt_cmd_complete(sk, hdev->id, + MGMT_OP_ADD_REMOTE_OOB_DATA, status, + &cp->addr, sizeof(cp->addr)); + } else if (len == MGMT_ADD_REMOTE_OOB_EXT_DATA_SIZE) { + struct mgmt_cp_add_remote_oob_ext_data *cp = data; + u8 *rand192, *hash192, *rand256, *hash256; + u8 status; + + if (bdaddr_type_is_le(cp->addr.type)) { + /* Enforce zero-valued 192-bit parameters as + * long as legacy SMP OOB isn't implemented. + */ + if (memcmp(cp->rand192, ZERO_KEY, 16) || + memcmp(cp->hash192, ZERO_KEY, 16)) { + err = mgmt_cmd_complete(sk, hdev->id, + MGMT_OP_ADD_REMOTE_OOB_DATA, + MGMT_STATUS_INVALID_PARAMS, + addr, sizeof(*addr)); + goto unlock; + } + + rand192 = NULL; + hash192 = NULL; + } else { + /* In case one of the P-192 values is set to zero, + * then just disable OOB data for P-192. + */ + if (!memcmp(cp->rand192, ZERO_KEY, 16) || + !memcmp(cp->hash192, ZERO_KEY, 16)) { + rand192 = NULL; + hash192 = NULL; + } else { + rand192 = cp->rand192; + hash192 = cp->hash192; + } + } + + /* In case one of the P-256 values is set to zero, then just + * disable OOB data for P-256. + */ + if (!memcmp(cp->rand256, ZERO_KEY, 16) || + !memcmp(cp->hash256, ZERO_KEY, 16)) { + rand256 = NULL; + hash256 = NULL; + } else { + rand256 = cp->rand256; + hash256 = cp->hash256; + } + + err = hci_add_remote_oob_data(hdev, &cp->addr.bdaddr, + cp->addr.type, hash192, rand192, + hash256, rand256); + if (err < 0) + status = MGMT_STATUS_FAILED; + else + status = MGMT_STATUS_SUCCESS; + + err = mgmt_cmd_complete(sk, hdev->id, + MGMT_OP_ADD_REMOTE_OOB_DATA, + status, &cp->addr, sizeof(cp->addr)); + } else { + bt_dev_err(hdev, "add_remote_oob_data: invalid len of %u bytes", + len); + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_REMOTE_OOB_DATA, + MGMT_STATUS_INVALID_PARAMS); + } + +unlock: + hci_dev_unlock(hdev); + return err; +} + +static int remove_remote_oob_data(struct sock *sk, struct hci_dev *hdev, + void *data, u16 len) +{ + struct mgmt_cp_remove_remote_oob_data *cp = data; + u8 status; + int err; + + bt_dev_dbg(hdev, "sock %p", sk); + + if (cp->addr.type != BDADDR_BREDR) + return mgmt_cmd_complete(sk, hdev->id, + MGMT_OP_REMOVE_REMOTE_OOB_DATA, + MGMT_STATUS_INVALID_PARAMS, + &cp->addr, sizeof(cp->addr)); + + hci_dev_lock(hdev); + + if (!bacmp(&cp->addr.bdaddr, BDADDR_ANY)) { + hci_remote_oob_data_clear(hdev); + status = MGMT_STATUS_SUCCESS; + goto done; + } + + err = hci_remove_remote_oob_data(hdev, &cp->addr.bdaddr, cp->addr.type); + if (err < 0) + status = MGMT_STATUS_INVALID_PARAMS; + else + status = MGMT_STATUS_SUCCESS; + +done: + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_REMOTE_OOB_DATA, + status, &cp->addr, sizeof(cp->addr)); + + hci_dev_unlock(hdev); + return err; +} + +void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status) +{ + struct mgmt_pending_cmd *cmd; + + bt_dev_dbg(hdev, "status %u", status); + + hci_dev_lock(hdev); + + cmd = pending_find(MGMT_OP_START_DISCOVERY, hdev); + if (!cmd) + cmd = pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev); + + if (!cmd) + cmd = pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev); + + if (cmd) { + cmd->cmd_complete(cmd, mgmt_status(status)); + mgmt_pending_remove(cmd); + } + + hci_dev_unlock(hdev); +} + +static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type, + uint8_t *mgmt_status) +{ + switch (type) { + case DISCOV_TYPE_LE: + *mgmt_status = mgmt_le_support(hdev); + if (*mgmt_status) + return false; + break; + case DISCOV_TYPE_INTERLEAVED: + *mgmt_status = mgmt_le_support(hdev); + if (*mgmt_status) + return false; + fallthrough; + case DISCOV_TYPE_BREDR: + *mgmt_status = mgmt_bredr_support(hdev); + if (*mgmt_status) + return false; + break; + default: + *mgmt_status = MGMT_STATUS_INVALID_PARAMS; + return false; + } + + return true; +} + +static void start_discovery_complete(struct hci_dev *hdev, void *data, int err) +{ + struct mgmt_pending_cmd *cmd = data; + + if (cmd != pending_find(MGMT_OP_START_DISCOVERY, hdev) && + cmd != pending_find(MGMT_OP_START_LIMITED_DISCOVERY, hdev) && + cmd != pending_find(MGMT_OP_START_SERVICE_DISCOVERY, hdev)) + return; + + bt_dev_dbg(hdev, "err %d", err); + + mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err), + cmd->param, 1); + mgmt_pending_remove(cmd); + + hci_discovery_set_state(hdev, err ? DISCOVERY_STOPPED: + DISCOVERY_FINDING); +} + +static int start_discovery_sync(struct hci_dev *hdev, void *data) +{ + return hci_start_discovery_sync(hdev); +} + +static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev, + u16 op, void *data, u16 len) +{ + struct mgmt_cp_start_discovery *cp = data; + struct mgmt_pending_cmd *cmd; + u8 status; + int err; + + bt_dev_dbg(hdev, "sock %p", sk); + + hci_dev_lock(hdev); + + if (!hdev_is_powered(hdev)) { + err = mgmt_cmd_complete(sk, hdev->id, op, + MGMT_STATUS_NOT_POWERED, + &cp->type, sizeof(cp->type)); + goto failed; + } + + if (hdev->discovery.state != DISCOVERY_STOPPED || + hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) { + err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY, + &cp->type, sizeof(cp->type)); + goto failed; + } + + if (!discovery_type_is_valid(hdev, cp->type, &status)) { + err = mgmt_cmd_complete(sk, hdev->id, op, status, + &cp->type, sizeof(cp->type)); + goto failed; + } + + /* Can't start discovery when it is paused */ + if (hdev->discovery_paused) { + err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY, + &cp->type, sizeof(cp->type)); + goto failed; + } + + /* Clear the discovery filter first to free any previously + * allocated memory for the UUID list. + */ + hci_discovery_filter_clear(hdev); + + hdev->discovery.type = cp->type; + hdev->discovery.report_invalid_rssi = false; + if (op == MGMT_OP_START_LIMITED_DISCOVERY) + hdev->discovery.limited = true; + else + hdev->discovery.limited = false; + + cmd = mgmt_pending_add(sk, op, hdev, data, len); + if (!cmd) { + err = -ENOMEM; + goto failed; + } + + err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd, + start_discovery_complete); + if (err < 0) { + mgmt_pending_remove(cmd); + goto failed; + } + + hci_discovery_set_state(hdev, DISCOVERY_STARTING); + +failed: + hci_dev_unlock(hdev); + return err; +} + +static int start_discovery(struct sock *sk, struct hci_dev *hdev, + void *data, u16 len) +{ + return start_discovery_internal(sk, hdev, MGMT_OP_START_DISCOVERY, + data, len); +} + +static int start_limited_discovery(struct sock *sk, struct hci_dev *hdev, + void *data, u16 len) +{ + return start_discovery_internal(sk, hdev, + MGMT_OP_START_LIMITED_DISCOVERY, + data, len); +} + +static int start_service_discovery(struct sock *sk, struct hci_dev *hdev, + void *data, u16 len) +{ + struct mgmt_cp_start_service_discovery *cp = data; + struct mgmt_pending_cmd *cmd; + const u16 max_uuid_count = ((U16_MAX - sizeof(*cp)) / 16); + u16 uuid_count, expected_len; + u8 status; + int err; + + bt_dev_dbg(hdev, "sock %p", sk); + + hci_dev_lock(hdev); + + if (!hdev_is_powered(hdev)) { + err = mgmt_cmd_complete(sk, hdev->id, + MGMT_OP_START_SERVICE_DISCOVERY, + MGMT_STATUS_NOT_POWERED, + &cp->type, sizeof(cp->type)); + goto failed; + } + + if (hdev->discovery.state != DISCOVERY_STOPPED || + hci_dev_test_flag(hdev, HCI_PERIODIC_INQ)) { + err = mgmt_cmd_complete(sk, hdev->id, + MGMT_OP_START_SERVICE_DISCOVERY, + MGMT_STATUS_BUSY, &cp->type, + sizeof(cp->type)); + goto failed; + } + + if (hdev->discovery_paused) { + err = mgmt_cmd_complete(sk, hdev->id, + MGMT_OP_START_SERVICE_DISCOVERY, + MGMT_STATUS_BUSY, &cp->type, + sizeof(cp->type)); + goto failed; + } + + uuid_count = __le16_to_cpu(cp->uuid_count); + if (uuid_count > max_uuid_count) { + bt_dev_err(hdev, "service_discovery: too big uuid_count value %u", + uuid_count); + err = mgmt_cmd_complete(sk, hdev->id, + MGMT_OP_START_SERVICE_DISCOVERY, + MGMT_STATUS_INVALID_PARAMS, &cp->type, + sizeof(cp->type)); + goto failed; + } + + expected_len = sizeof(*cp) + uuid_count * 16; + if (expected_len != len) { + bt_dev_err(hdev, "service_discovery: expected %u bytes, got %u bytes", + expected_len, len); + err = mgmt_cmd_complete(sk, hdev->id, + MGMT_OP_START_SERVICE_DISCOVERY, + MGMT_STATUS_INVALID_PARAMS, &cp->type, + sizeof(cp->type)); + goto failed; + } + + if (!discovery_type_is_valid(hdev, cp->type, &status)) { + err = mgmt_cmd_complete(sk, hdev->id, + MGMT_OP_START_SERVICE_DISCOVERY, + status, &cp->type, sizeof(cp->type)); + goto failed; + } + + cmd = mgmt_pending_add(sk, MGMT_OP_START_SERVICE_DISCOVERY, + hdev, data, len); + if (!cmd) { + err = -ENOMEM; + goto failed; + } + + /* Clear the discovery filter first to free any previously + * allocated memory for the UUID list. + */ + hci_discovery_filter_clear(hdev); + + hdev->discovery.result_filtering = true; + hdev->discovery.type = cp->type; + hdev->discovery.rssi = cp->rssi; + hdev->discovery.uuid_count = uuid_count; + + if (uuid_count > 0) { + hdev->discovery.uuids = kmemdup(cp->uuids, uuid_count * 16, + GFP_KERNEL); + if (!hdev->discovery.uuids) { + err = mgmt_cmd_complete(sk, hdev->id, + MGMT_OP_START_SERVICE_DISCOVERY, + MGMT_STATUS_FAILED, + &cp->type, sizeof(cp->type)); + mgmt_pending_remove(cmd); + goto failed; + } + } + + err = hci_cmd_sync_queue(hdev, start_discovery_sync, cmd, + start_discovery_complete); + if (err < 0) { + mgmt_pending_remove(cmd); + goto failed; + } + + hci_discovery_set_state(hdev, DISCOVERY_STARTING); + +failed: + hci_dev_unlock(hdev); + return err; +} + +void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status) +{ + struct mgmt_pending_cmd *cmd; + + bt_dev_dbg(hdev, "status %u", status); + + hci_dev_lock(hdev); + + cmd = pending_find(MGMT_OP_STOP_DISCOVERY, hdev); + if (cmd) { + cmd->cmd_complete(cmd, mgmt_status(status)); + mgmt_pending_remove(cmd); + } + + hci_dev_unlock(hdev); +} + +static void stop_discovery_complete(struct hci_dev *hdev, void *data, int err) +{ + struct mgmt_pending_cmd *cmd = data; + + if (cmd != pending_find(MGMT_OP_STOP_DISCOVERY, hdev)) + return; + + bt_dev_dbg(hdev, "err %d", err); + + mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, mgmt_status(err), + cmd->param, 1); + mgmt_pending_remove(cmd); + + if (!err) + hci_discovery_set_state(hdev, DISCOVERY_STOPPED); +} + +static int stop_discovery_sync(struct hci_dev *hdev, void *data) +{ + return hci_stop_discovery_sync(hdev); +} + +static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data, + u16 len) +{ + struct mgmt_cp_stop_discovery *mgmt_cp = data; + struct mgmt_pending_cmd *cmd; + int err; + + bt_dev_dbg(hdev, "sock %p", sk); + + hci_dev_lock(hdev); + + if (!hci_discovery_active(hdev)) { + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, + MGMT_STATUS_REJECTED, &mgmt_cp->type, + sizeof(mgmt_cp->type)); + goto unlock; + } + + if (hdev->discovery.type != mgmt_cp->type) { + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, + MGMT_STATUS_INVALID_PARAMS, + &mgmt_cp->type, sizeof(mgmt_cp->type)); + goto unlock; + } + + cmd = mgmt_pending_add(sk, MGMT_OP_STOP_DISCOVERY, hdev, data, len); + if (!cmd) { + err = -ENOMEM; + goto unlock; + } + + err = hci_cmd_sync_queue(hdev, stop_discovery_sync, cmd, + stop_discovery_complete); + if (err < 0) { + mgmt_pending_remove(cmd); + goto unlock; + } + + hci_discovery_set_state(hdev, DISCOVERY_STOPPING); + +unlock: + hci_dev_unlock(hdev); + return err; +} + +static int confirm_name(struct sock *sk, struct hci_dev *hdev, void *data, + u16 len) +{ + struct mgmt_cp_confirm_name *cp = data; + struct inquiry_entry *e; + int err; + + bt_dev_dbg(hdev, "sock %p", sk); + + hci_dev_lock(hdev); + + if (!hci_discovery_active(hdev)) { + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, + MGMT_STATUS_FAILED, &cp->addr, + sizeof(cp->addr)); + goto failed; + } + + e = hci_inquiry_cache_lookup_unknown(hdev, &cp->addr.bdaddr); + if (!e) { + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, + MGMT_STATUS_INVALID_PARAMS, &cp->addr, + sizeof(cp->addr)); + goto failed; + } + + if (cp->name_known) { + e->name_state = NAME_KNOWN; + list_del(&e->list); + } else { + e->name_state = NAME_NEEDED; + hci_inquiry_cache_update_resolve(hdev, e); + } + + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_CONFIRM_NAME, 0, + &cp->addr, sizeof(cp->addr)); + +failed: + hci_dev_unlock(hdev); + return err; +} + +static int block_device(struct sock *sk, struct hci_dev *hdev, void *data, + u16 len) +{ + struct mgmt_cp_block_device *cp = data; + u8 status; + int err; + + bt_dev_dbg(hdev, "sock %p", sk); + + if (!bdaddr_type_is_valid(cp->addr.type)) + return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, + MGMT_STATUS_INVALID_PARAMS, + &cp->addr, sizeof(cp->addr)); + + hci_dev_lock(hdev); + + err = hci_bdaddr_list_add(&hdev->reject_list, &cp->addr.bdaddr, + cp->addr.type); + if (err < 0) { + status = MGMT_STATUS_FAILED; + goto done; + } + + mgmt_event(MGMT_EV_DEVICE_BLOCKED, hdev, &cp->addr, sizeof(cp->addr), + sk); + status = MGMT_STATUS_SUCCESS; + +done: + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_BLOCK_DEVICE, status, + &cp->addr, sizeof(cp->addr)); + + hci_dev_unlock(hdev); + + return err; +} + +static int unblock_device(struct sock *sk, struct hci_dev *hdev, void *data, + u16 len) +{ + struct mgmt_cp_unblock_device *cp = data; + u8 status; + int err; + + bt_dev_dbg(hdev, "sock %p", sk); + + if (!bdaddr_type_is_valid(cp->addr.type)) + return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, + MGMT_STATUS_INVALID_PARAMS, + &cp->addr, sizeof(cp->addr)); + + hci_dev_lock(hdev); + + err = hci_bdaddr_list_del(&hdev->reject_list, &cp->addr.bdaddr, + cp->addr.type); + if (err < 0) { + status = MGMT_STATUS_INVALID_PARAMS; + goto done; + } + + mgmt_event(MGMT_EV_DEVICE_UNBLOCKED, hdev, &cp->addr, sizeof(cp->addr), + sk); + status = MGMT_STATUS_SUCCESS; + +done: + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNBLOCK_DEVICE, status, + &cp->addr, sizeof(cp->addr)); + + hci_dev_unlock(hdev); + + return err; +} + +static int set_device_id_sync(struct hci_dev *hdev, void *data) +{ + return hci_update_eir_sync(hdev); +} + +static int set_device_id(struct sock *sk, struct hci_dev *hdev, void *data, + u16 len) +{ + struct mgmt_cp_set_device_id *cp = data; + int err; + __u16 source; + + bt_dev_dbg(hdev, "sock %p", sk); + + source = __le16_to_cpu(cp->source); + + if (source > 0x0002) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, + MGMT_STATUS_INVALID_PARAMS); + + hci_dev_lock(hdev); + + hdev->devid_source = source; + hdev->devid_vendor = __le16_to_cpu(cp->vendor); + hdev->devid_product = __le16_to_cpu(cp->product); + hdev->devid_version = __le16_to_cpu(cp->version); + + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_DEVICE_ID, 0, + NULL, 0); + + hci_cmd_sync_queue(hdev, set_device_id_sync, NULL, NULL); + + hci_dev_unlock(hdev); + + return err; +} + +static void enable_advertising_instance(struct hci_dev *hdev, int err) +{ + if (err) + bt_dev_err(hdev, "failed to re-configure advertising %d", err); + else + bt_dev_dbg(hdev, "status %d", err); +} + +static void set_advertising_complete(struct hci_dev *hdev, void *data, int err) +{ + struct cmd_lookup match = { NULL, hdev }; + u8 instance; + struct adv_info *adv_instance; + u8 status = mgmt_status(err); + + if (status) { + mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, + cmd_status_rsp, &status); + return; + } + + if (hci_dev_test_flag(hdev, HCI_LE_ADV)) + hci_dev_set_flag(hdev, HCI_ADVERTISING); + else + hci_dev_clear_flag(hdev, HCI_ADVERTISING); + + mgmt_pending_foreach(MGMT_OP_SET_ADVERTISING, hdev, settings_rsp, + &match); + + new_settings(hdev, match.sk); + + if (match.sk) + sock_put(match.sk); + + /* If "Set Advertising" was just disabled and instance advertising was + * set up earlier, then re-enable multi-instance advertising. + */ + if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || + list_empty(&hdev->adv_instances)) + return; + + instance = hdev->cur_adv_instance; + if (!instance) { + adv_instance = list_first_entry_or_null(&hdev->adv_instances, + struct adv_info, list); + if (!adv_instance) + return; + + instance = adv_instance->instance; + } + + err = hci_schedule_adv_instance_sync(hdev, instance, true); + + enable_advertising_instance(hdev, err); +} + +static int set_adv_sync(struct hci_dev *hdev, void *data) +{ + struct mgmt_pending_cmd *cmd = data; + struct mgmt_mode *cp = cmd->param; + u8 val = !!cp->val; + + if (cp->val == 0x02) + hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE); + else + hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE); + + cancel_adv_timeout(hdev); + + if (val) { + /* Switch to instance "0" for the Set Advertising setting. + * We cannot use update_[adv|scan_rsp]_data() here as the + * HCI_ADVERTISING flag is not yet set. + */ + hdev->cur_adv_instance = 0x00; + + if (ext_adv_capable(hdev)) { + hci_start_ext_adv_sync(hdev, 0x00); + } else { + hci_update_adv_data_sync(hdev, 0x00); + hci_update_scan_rsp_data_sync(hdev, 0x00); + hci_enable_advertising_sync(hdev); + } + } else { + hci_disable_advertising_sync(hdev); + } + + return 0; +} + +static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data, + u16 len) +{ + struct mgmt_mode *cp = data; + struct mgmt_pending_cmd *cmd; + u8 val, status; + int err; + + bt_dev_dbg(hdev, "sock %p", sk); + + status = mgmt_le_support(hdev); + if (status) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING, + status); + + if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING, + MGMT_STATUS_INVALID_PARAMS); + + if (hdev->advertising_paused) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING, + MGMT_STATUS_BUSY); + + hci_dev_lock(hdev); + + val = !!cp->val; + + /* The following conditions are ones which mean that we should + * not do any HCI communication but directly send a mgmt + * response to user space (after toggling the flag if + * necessary). + */ + if (!hdev_is_powered(hdev) || + (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) && + (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) || + hci_dev_test_flag(hdev, HCI_MESH) || + hci_conn_num(hdev, LE_LINK) > 0 || + (hci_dev_test_flag(hdev, HCI_LE_SCAN) && + hdev->le_scan_type == LE_SCAN_ACTIVE)) { + bool changed; + + if (cp->val) { + hdev->cur_adv_instance = 0x00; + changed = !hci_dev_test_and_set_flag(hdev, HCI_ADVERTISING); + if (cp->val == 0x02) + hci_dev_set_flag(hdev, HCI_ADVERTISING_CONNECTABLE); + else + hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE); + } else { + changed = hci_dev_test_and_clear_flag(hdev, HCI_ADVERTISING); + hci_dev_clear_flag(hdev, HCI_ADVERTISING_CONNECTABLE); + } + + err = send_settings_rsp(sk, MGMT_OP_SET_ADVERTISING, hdev); + if (err < 0) + goto unlock; + + if (changed) + err = new_settings(hdev, sk); + + goto unlock; + } + + if (pending_find(MGMT_OP_SET_ADVERTISING, hdev) || + pending_find(MGMT_OP_SET_LE, hdev)) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING, + MGMT_STATUS_BUSY); + goto unlock; + } + + cmd = mgmt_pending_add(sk, MGMT_OP_SET_ADVERTISING, hdev, data, len); + if (!cmd) + err = -ENOMEM; + else + err = hci_cmd_sync_queue(hdev, set_adv_sync, cmd, + set_advertising_complete); + + if (err < 0 && cmd) + mgmt_pending_remove(cmd); + +unlock: + hci_dev_unlock(hdev); + return err; +} + +static int set_static_address(struct sock *sk, struct hci_dev *hdev, + void *data, u16 len) +{ + struct mgmt_cp_set_static_address *cp = data; + int err; + + bt_dev_dbg(hdev, "sock %p", sk); + + if (!lmp_le_capable(hdev)) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, + MGMT_STATUS_NOT_SUPPORTED); + + if (hdev_is_powered(hdev)) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_STATIC_ADDRESS, + MGMT_STATUS_REJECTED); + + if (bacmp(&cp->bdaddr, BDADDR_ANY)) { + if (!bacmp(&cp->bdaddr, BDADDR_NONE)) + return mgmt_cmd_status(sk, hdev->id, + MGMT_OP_SET_STATIC_ADDRESS, + MGMT_STATUS_INVALID_PARAMS); + + /* Two most significant bits shall be set */ + if ((cp->bdaddr.b[5] & 0xc0) != 0xc0) + return mgmt_cmd_status(sk, hdev->id, + MGMT_OP_SET_STATIC_ADDRESS, + MGMT_STATUS_INVALID_PARAMS); + } + + hci_dev_lock(hdev); + + bacpy(&hdev->static_addr, &cp->bdaddr); + + err = send_settings_rsp(sk, MGMT_OP_SET_STATIC_ADDRESS, hdev); + if (err < 0) + goto unlock; + + err = new_settings(hdev, sk); + +unlock: + hci_dev_unlock(hdev); + return err; +} + +static int set_scan_params(struct sock *sk, struct hci_dev *hdev, + void *data, u16 len) +{ + struct mgmt_cp_set_scan_params *cp = data; + __u16 interval, window; + int err; + + bt_dev_dbg(hdev, "sock %p", sk); + + if (!lmp_le_capable(hdev)) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, + MGMT_STATUS_NOT_SUPPORTED); + + interval = __le16_to_cpu(cp->interval); + + if (interval < 0x0004 || interval > 0x4000) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, + MGMT_STATUS_INVALID_PARAMS); + + window = __le16_to_cpu(cp->window); + + if (window < 0x0004 || window > 0x4000) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, + MGMT_STATUS_INVALID_PARAMS); + + if (window > interval) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, + MGMT_STATUS_INVALID_PARAMS); + + hci_dev_lock(hdev); + + hdev->le_scan_interval = interval; + hdev->le_scan_window = window; + + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_SCAN_PARAMS, 0, + NULL, 0); + + /* If background scan is running, restart it so new parameters are + * loaded. + */ + if (hci_dev_test_flag(hdev, HCI_LE_SCAN) && + hdev->discovery.state == DISCOVERY_STOPPED) + hci_update_passive_scan(hdev); + + hci_dev_unlock(hdev); + + return err; +} + +static void fast_connectable_complete(struct hci_dev *hdev, void *data, int err) +{ + struct mgmt_pending_cmd *cmd = data; + + bt_dev_dbg(hdev, "err %d", err); + + if (err) { + mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, + mgmt_status(err)); + } else { + struct mgmt_mode *cp = cmd->param; + + if (cp->val) + hci_dev_set_flag(hdev, HCI_FAST_CONNECTABLE); + else + hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE); + + send_settings_rsp(cmd->sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev); + new_settings(hdev, cmd->sk); + } + + mgmt_pending_free(cmd); +} + +static int write_fast_connectable_sync(struct hci_dev *hdev, void *data) +{ + struct mgmt_pending_cmd *cmd = data; + struct mgmt_mode *cp = cmd->param; + + return hci_write_fast_connectable_sync(hdev, cp->val); +} + +static int set_fast_connectable(struct sock *sk, struct hci_dev *hdev, + void *data, u16 len) +{ + struct mgmt_mode *cp = data; + struct mgmt_pending_cmd *cmd; + int err; + + bt_dev_dbg(hdev, "sock %p", sk); + + if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) || + hdev->hci_ver < BLUETOOTH_VER_1_2) + return mgmt_cmd_status(sk, hdev->id, + MGMT_OP_SET_FAST_CONNECTABLE, + MGMT_STATUS_NOT_SUPPORTED); + + if (cp->val != 0x00 && cp->val != 0x01) + return mgmt_cmd_status(sk, hdev->id, + MGMT_OP_SET_FAST_CONNECTABLE, + MGMT_STATUS_INVALID_PARAMS); + + hci_dev_lock(hdev); + + if (!!cp->val == hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) { + err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev); + goto unlock; + } + + if (!hdev_is_powered(hdev)) { + hci_dev_change_flag(hdev, HCI_FAST_CONNECTABLE); + err = send_settings_rsp(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev); + new_settings(hdev, sk); + goto unlock; + } + + cmd = mgmt_pending_new(sk, MGMT_OP_SET_FAST_CONNECTABLE, hdev, data, + len); + if (!cmd) + err = -ENOMEM; + else + err = hci_cmd_sync_queue(hdev, write_fast_connectable_sync, cmd, + fast_connectable_complete); + + if (err < 0) { + mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_FAST_CONNECTABLE, + MGMT_STATUS_FAILED); + + if (cmd) + mgmt_pending_free(cmd); + } + +unlock: + hci_dev_unlock(hdev); + + return err; +} + +static void set_bredr_complete(struct hci_dev *hdev, void *data, int err) +{ + struct mgmt_pending_cmd *cmd = data; + + bt_dev_dbg(hdev, "err %d", err); + + if (err) { + u8 mgmt_err = mgmt_status(err); + + /* We need to restore the flag if related HCI commands + * failed. + */ + hci_dev_clear_flag(hdev, HCI_BREDR_ENABLED); + + mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err); + } else { + send_settings_rsp(cmd->sk, MGMT_OP_SET_BREDR, hdev); + new_settings(hdev, cmd->sk); + } + + mgmt_pending_free(cmd); +} + +static int set_bredr_sync(struct hci_dev *hdev, void *data) +{ + int status; + + status = hci_write_fast_connectable_sync(hdev, false); + + if (!status) + status = hci_update_scan_sync(hdev); + + /* Since only the advertising data flags will change, there + * is no need to update the scan response data. + */ + if (!status) + status = hci_update_adv_data_sync(hdev, hdev->cur_adv_instance); + + return status; +} + +static int set_bredr(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) +{ + struct mgmt_mode *cp = data; + struct mgmt_pending_cmd *cmd; + int err; + + bt_dev_dbg(hdev, "sock %p", sk); + + if (!lmp_bredr_capable(hdev) || !lmp_le_capable(hdev)) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR, + MGMT_STATUS_NOT_SUPPORTED); + + if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR, + MGMT_STATUS_REJECTED); + + if (cp->val != 0x00 && cp->val != 0x01) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR, + MGMT_STATUS_INVALID_PARAMS); + + hci_dev_lock(hdev); + + if (cp->val == hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { + err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev); + goto unlock; + } + + if (!hdev_is_powered(hdev)) { + if (!cp->val) { + hci_dev_clear_flag(hdev, HCI_DISCOVERABLE); + hci_dev_clear_flag(hdev, HCI_SSP_ENABLED); + hci_dev_clear_flag(hdev, HCI_LINK_SECURITY); + hci_dev_clear_flag(hdev, HCI_FAST_CONNECTABLE); + hci_dev_clear_flag(hdev, HCI_HS_ENABLED); + } + + hci_dev_change_flag(hdev, HCI_BREDR_ENABLED); + + err = send_settings_rsp(sk, MGMT_OP_SET_BREDR, hdev); + if (err < 0) + goto unlock; + + err = new_settings(hdev, sk); + goto unlock; + } + + /* Reject disabling when powered on */ + if (!cp->val) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR, + MGMT_STATUS_REJECTED); + goto unlock; + } else { + /* When configuring a dual-mode controller to operate + * with LE only and using a static address, then switching + * BR/EDR back on is not allowed. + * + * Dual-mode controllers shall operate with the public + * address as its identity address for BR/EDR and LE. So + * reject the attempt to create an invalid configuration. + * + * The same restrictions applies when secure connections + * has been enabled. For BR/EDR this is a controller feature + * while for LE it is a host stack feature. This means that + * switching BR/EDR back on when secure connections has been + * enabled is not a supported transaction. + */ + if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) && + (bacmp(&hdev->static_addr, BDADDR_ANY) || + hci_dev_test_flag(hdev, HCI_SC_ENABLED))) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR, + MGMT_STATUS_REJECTED); + goto unlock; + } + } + + cmd = mgmt_pending_new(sk, MGMT_OP_SET_BREDR, hdev, data, len); + if (!cmd) + err = -ENOMEM; + else + err = hci_cmd_sync_queue(hdev, set_bredr_sync, cmd, + set_bredr_complete); + + if (err < 0) { + mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_BREDR, + MGMT_STATUS_FAILED); + if (cmd) + mgmt_pending_free(cmd); + + goto unlock; + } + + /* We need to flip the bit already here so that + * hci_req_update_adv_data generates the correct flags. + */ + hci_dev_set_flag(hdev, HCI_BREDR_ENABLED); + +unlock: + hci_dev_unlock(hdev); + return err; +} + +static void set_secure_conn_complete(struct hci_dev *hdev, void *data, int err) +{ + struct mgmt_pending_cmd *cmd = data; + struct mgmt_mode *cp; + + bt_dev_dbg(hdev, "err %d", err); + + if (err) { + u8 mgmt_err = mgmt_status(err); + + mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, mgmt_err); + goto done; + } + + cp = cmd->param; + + switch (cp->val) { + case 0x00: + hci_dev_clear_flag(hdev, HCI_SC_ENABLED); + hci_dev_clear_flag(hdev, HCI_SC_ONLY); + break; + case 0x01: + hci_dev_set_flag(hdev, HCI_SC_ENABLED); + hci_dev_clear_flag(hdev, HCI_SC_ONLY); + break; + case 0x02: + hci_dev_set_flag(hdev, HCI_SC_ENABLED); + hci_dev_set_flag(hdev, HCI_SC_ONLY); + break; + } + + send_settings_rsp(cmd->sk, cmd->opcode, hdev); + new_settings(hdev, cmd->sk); + +done: + mgmt_pending_free(cmd); +} + +static int set_secure_conn_sync(struct hci_dev *hdev, void *data) +{ + struct mgmt_pending_cmd *cmd = data; + struct mgmt_mode *cp = cmd->param; + u8 val = !!cp->val; + + /* Force write of val */ + hci_dev_set_flag(hdev, HCI_SC_ENABLED); + + return hci_write_sc_support_sync(hdev, val); +} + +static int set_secure_conn(struct sock *sk, struct hci_dev *hdev, + void *data, u16 len) +{ + struct mgmt_mode *cp = data; + struct mgmt_pending_cmd *cmd; + u8 val; + int err; + + bt_dev_dbg(hdev, "sock %p", sk); + + if (!lmp_sc_capable(hdev) && + !hci_dev_test_flag(hdev, HCI_LE_ENABLED)) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN, + MGMT_STATUS_NOT_SUPPORTED); + + if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) && + lmp_sc_capable(hdev) && + !hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN, + MGMT_STATUS_REJECTED); + + if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN, + MGMT_STATUS_INVALID_PARAMS); + + hci_dev_lock(hdev); + + if (!hdev_is_powered(hdev) || !lmp_sc_capable(hdev) || + !hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { + bool changed; + + if (cp->val) { + changed = !hci_dev_test_and_set_flag(hdev, + HCI_SC_ENABLED); + if (cp->val == 0x02) + hci_dev_set_flag(hdev, HCI_SC_ONLY); + else + hci_dev_clear_flag(hdev, HCI_SC_ONLY); + } else { + changed = hci_dev_test_and_clear_flag(hdev, + HCI_SC_ENABLED); + hci_dev_clear_flag(hdev, HCI_SC_ONLY); + } + + err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev); + if (err < 0) + goto failed; + + if (changed) + err = new_settings(hdev, sk); + + goto failed; + } + + val = !!cp->val; + + if (val == hci_dev_test_flag(hdev, HCI_SC_ENABLED) && + (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_SC_ONLY)) { + err = send_settings_rsp(sk, MGMT_OP_SET_SECURE_CONN, hdev); + goto failed; + } + + cmd = mgmt_pending_new(sk, MGMT_OP_SET_SECURE_CONN, hdev, data, len); + if (!cmd) + err = -ENOMEM; + else + err = hci_cmd_sync_queue(hdev, set_secure_conn_sync, cmd, + set_secure_conn_complete); + + if (err < 0) { + mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_SECURE_CONN, + MGMT_STATUS_FAILED); + if (cmd) + mgmt_pending_free(cmd); + } + +failed: + hci_dev_unlock(hdev); + return err; +} + +static int set_debug_keys(struct sock *sk, struct hci_dev *hdev, + void *data, u16 len) +{ + struct mgmt_mode *cp = data; + bool changed, use_changed; + int err; + + bt_dev_dbg(hdev, "sock %p", sk); + + if (cp->val != 0x00 && cp->val != 0x01 && cp->val != 0x02) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEBUG_KEYS, + MGMT_STATUS_INVALID_PARAMS); + + hci_dev_lock(hdev); + + if (cp->val) + changed = !hci_dev_test_and_set_flag(hdev, HCI_KEEP_DEBUG_KEYS); + else + changed = hci_dev_test_and_clear_flag(hdev, + HCI_KEEP_DEBUG_KEYS); + + if (cp->val == 0x02) + use_changed = !hci_dev_test_and_set_flag(hdev, + HCI_USE_DEBUG_KEYS); + else + use_changed = hci_dev_test_and_clear_flag(hdev, + HCI_USE_DEBUG_KEYS); + + if (hdev_is_powered(hdev) && use_changed && + hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) { + u8 mode = (cp->val == 0x02) ? 0x01 : 0x00; + hci_send_cmd(hdev, HCI_OP_WRITE_SSP_DEBUG_MODE, + sizeof(mode), &mode); + } + + err = send_settings_rsp(sk, MGMT_OP_SET_DEBUG_KEYS, hdev); + if (err < 0) + goto unlock; + + if (changed) + err = new_settings(hdev, sk); + +unlock: + hci_dev_unlock(hdev); + return err; +} + +static int set_privacy(struct sock *sk, struct hci_dev *hdev, void *cp_data, + u16 len) +{ + struct mgmt_cp_set_privacy *cp = cp_data; + bool changed; + int err; + + bt_dev_dbg(hdev, "sock %p", sk); + + if (!lmp_le_capable(hdev)) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY, + MGMT_STATUS_NOT_SUPPORTED); + + if (cp->privacy != 0x00 && cp->privacy != 0x01 && cp->privacy != 0x02) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY, + MGMT_STATUS_INVALID_PARAMS); + + if (hdev_is_powered(hdev)) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PRIVACY, + MGMT_STATUS_REJECTED); + + hci_dev_lock(hdev); + + /* If user space supports this command it is also expected to + * handle IRKs. Therefore, set the HCI_RPA_RESOLVING flag. + */ + hci_dev_set_flag(hdev, HCI_RPA_RESOLVING); + + if (cp->privacy) { + changed = !hci_dev_test_and_set_flag(hdev, HCI_PRIVACY); + memcpy(hdev->irk, cp->irk, sizeof(hdev->irk)); + hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); + hci_adv_instances_set_rpa_expired(hdev, true); + if (cp->privacy == 0x02) + hci_dev_set_flag(hdev, HCI_LIMITED_PRIVACY); + else + hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY); + } else { + changed = hci_dev_test_and_clear_flag(hdev, HCI_PRIVACY); + memset(hdev->irk, 0, sizeof(hdev->irk)); + hci_dev_clear_flag(hdev, HCI_RPA_EXPIRED); + hci_adv_instances_set_rpa_expired(hdev, false); + hci_dev_clear_flag(hdev, HCI_LIMITED_PRIVACY); + } + + err = send_settings_rsp(sk, MGMT_OP_SET_PRIVACY, hdev); + if (err < 0) + goto unlock; + + if (changed) + err = new_settings(hdev, sk); + +unlock: + hci_dev_unlock(hdev); + return err; +} + +static bool irk_is_valid(struct mgmt_irk_info *irk) +{ + switch (irk->addr.type) { + case BDADDR_LE_PUBLIC: + return true; + + case BDADDR_LE_RANDOM: + /* Two most significant bits shall be set */ + if ((irk->addr.bdaddr.b[5] & 0xc0) != 0xc0) + return false; + return true; + } + + return false; +} + +static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data, + u16 len) +{ + struct mgmt_cp_load_irks *cp = cp_data; + const u16 max_irk_count = ((U16_MAX - sizeof(*cp)) / + sizeof(struct mgmt_irk_info)); + u16 irk_count, expected_len; + int i, err; + + bt_dev_dbg(hdev, "sock %p", sk); + + if (!lmp_le_capable(hdev)) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS, + MGMT_STATUS_NOT_SUPPORTED); + + irk_count = __le16_to_cpu(cp->irk_count); + if (irk_count > max_irk_count) { + bt_dev_err(hdev, "load_irks: too big irk_count value %u", + irk_count); + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS, + MGMT_STATUS_INVALID_PARAMS); + } + + expected_len = struct_size(cp, irks, irk_count); + if (expected_len != len) { + bt_dev_err(hdev, "load_irks: expected %u bytes, got %u bytes", + expected_len, len); + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_IRKS, + MGMT_STATUS_INVALID_PARAMS); + } + + bt_dev_dbg(hdev, "irk_count %u", irk_count); + + for (i = 0; i < irk_count; i++) { + struct mgmt_irk_info *key = &cp->irks[i]; + + if (!irk_is_valid(key)) + return mgmt_cmd_status(sk, hdev->id, + MGMT_OP_LOAD_IRKS, + MGMT_STATUS_INVALID_PARAMS); + } + + hci_dev_lock(hdev); + + hci_smp_irks_clear(hdev); + + for (i = 0; i < irk_count; i++) { + struct mgmt_irk_info *irk = &cp->irks[i]; + u8 addr_type = le_addr_type(irk->addr.type); + + if (hci_is_blocked_key(hdev, + HCI_BLOCKED_KEY_TYPE_IRK, + irk->val)) { + bt_dev_warn(hdev, "Skipping blocked IRK for %pMR", + &irk->addr.bdaddr); + continue; + } + + /* When using SMP over BR/EDR, the addr type should be set to BREDR */ + if (irk->addr.type == BDADDR_BREDR) + addr_type = BDADDR_BREDR; + + hci_add_irk(hdev, &irk->addr.bdaddr, + addr_type, irk->val, + BDADDR_ANY); + } + + hci_dev_set_flag(hdev, HCI_RPA_RESOLVING); + + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_IRKS, 0, NULL, 0); + + hci_dev_unlock(hdev); + + return err; +} + +static bool ltk_is_valid(struct mgmt_ltk_info *key) +{ + if (key->initiator != 0x00 && key->initiator != 0x01) + return false; + + switch (key->addr.type) { + case BDADDR_LE_PUBLIC: + return true; + + case BDADDR_LE_RANDOM: + /* Two most significant bits shall be set */ + if ((key->addr.bdaddr.b[5] & 0xc0) != 0xc0) + return false; + return true; + } + + return false; +} + +static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev, + void *cp_data, u16 len) +{ + struct mgmt_cp_load_long_term_keys *cp = cp_data; + const u16 max_key_count = ((U16_MAX - sizeof(*cp)) / + sizeof(struct mgmt_ltk_info)); + u16 key_count, expected_len; + int i, err; + + bt_dev_dbg(hdev, "sock %p", sk); + + if (!lmp_le_capable(hdev)) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, + MGMT_STATUS_NOT_SUPPORTED); + + key_count = __le16_to_cpu(cp->key_count); + if (key_count > max_key_count) { + bt_dev_err(hdev, "load_ltks: too big key_count value %u", + key_count); + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, + MGMT_STATUS_INVALID_PARAMS); + } + + expected_len = struct_size(cp, keys, key_count); + if (expected_len != len) { + bt_dev_err(hdev, "load_keys: expected %u bytes, got %u bytes", + expected_len, len); + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, + MGMT_STATUS_INVALID_PARAMS); + } + + bt_dev_dbg(hdev, "key_count %u", key_count); + + for (i = 0; i < key_count; i++) { + struct mgmt_ltk_info *key = &cp->keys[i]; + + if (!ltk_is_valid(key)) + return mgmt_cmd_status(sk, hdev->id, + MGMT_OP_LOAD_LONG_TERM_KEYS, + MGMT_STATUS_INVALID_PARAMS); + } + + hci_dev_lock(hdev); + + hci_smp_ltks_clear(hdev); + + for (i = 0; i < key_count; i++) { + struct mgmt_ltk_info *key = &cp->keys[i]; + u8 type, authenticated; + u8 addr_type = le_addr_type(key->addr.type); + + if (hci_is_blocked_key(hdev, + HCI_BLOCKED_KEY_TYPE_LTK, + key->val)) { + bt_dev_warn(hdev, "Skipping blocked LTK for %pMR", + &key->addr.bdaddr); + continue; + } + + switch (key->type) { + case MGMT_LTK_UNAUTHENTICATED: + authenticated = 0x00; + type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER; + break; + case MGMT_LTK_AUTHENTICATED: + authenticated = 0x01; + type = key->initiator ? SMP_LTK : SMP_LTK_RESPONDER; + break; + case MGMT_LTK_P256_UNAUTH: + authenticated = 0x00; + type = SMP_LTK_P256; + break; + case MGMT_LTK_P256_AUTH: + authenticated = 0x01; + type = SMP_LTK_P256; + break; + case MGMT_LTK_P256_DEBUG: + authenticated = 0x00; + type = SMP_LTK_P256_DEBUG; + fallthrough; + default: + continue; + } + + /* When using SMP over BR/EDR, the addr type should be set to BREDR */ + if (key->addr.type == BDADDR_BREDR) + addr_type = BDADDR_BREDR; + + hci_add_ltk(hdev, &key->addr.bdaddr, + addr_type, type, authenticated, + key->val, key->enc_size, key->ediv, key->rand); + } + + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0, + NULL, 0); + + hci_dev_unlock(hdev); + + return err; +} + +static void get_conn_info_complete(struct hci_dev *hdev, void *data, int err) +{ + struct mgmt_pending_cmd *cmd = data; + struct hci_conn *conn = cmd->user_data; + struct mgmt_cp_get_conn_info *cp = cmd->param; + struct mgmt_rp_get_conn_info rp; + u8 status; + + bt_dev_dbg(hdev, "err %d", err); + + memcpy(&rp.addr, &cp->addr, sizeof(rp.addr)); + + status = mgmt_status(err); + if (status == MGMT_STATUS_SUCCESS) { + rp.rssi = conn->rssi; + rp.tx_power = conn->tx_power; + rp.max_tx_power = conn->max_tx_power; + } else { + rp.rssi = HCI_RSSI_INVALID; + rp.tx_power = HCI_TX_POWER_INVALID; + rp.max_tx_power = HCI_TX_POWER_INVALID; + } + + mgmt_cmd_complete(cmd->sk, cmd->index, MGMT_OP_GET_CONN_INFO, status, + &rp, sizeof(rp)); + + mgmt_pending_free(cmd); +} + +static int get_conn_info_sync(struct hci_dev *hdev, void *data) +{ + struct mgmt_pending_cmd *cmd = data; + struct mgmt_cp_get_conn_info *cp = cmd->param; + struct hci_conn *conn; + int err; + __le16 handle; + + /* Make sure we are still connected */ + if (cp->addr.type == BDADDR_BREDR) + conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, + &cp->addr.bdaddr); + else + conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr); + + if (!conn || conn->state != BT_CONNECTED) + return MGMT_STATUS_NOT_CONNECTED; + + cmd->user_data = conn; + handle = cpu_to_le16(conn->handle); + + /* Refresh RSSI each time */ + err = hci_read_rssi_sync(hdev, handle); + + /* For LE links TX power does not change thus we don't need to + * query for it once value is known. + */ + if (!err && (!bdaddr_type_is_le(cp->addr.type) || + conn->tx_power == HCI_TX_POWER_INVALID)) + err = hci_read_tx_power_sync(hdev, handle, 0x00); + + /* Max TX power needs to be read only once per connection */ + if (!err && conn->max_tx_power == HCI_TX_POWER_INVALID) + err = hci_read_tx_power_sync(hdev, handle, 0x01); + + return err; +} + +static int get_conn_info(struct sock *sk, struct hci_dev *hdev, void *data, + u16 len) +{ + struct mgmt_cp_get_conn_info *cp = data; + struct mgmt_rp_get_conn_info rp; + struct hci_conn *conn; + unsigned long conn_info_age; + int err = 0; + + bt_dev_dbg(hdev, "sock %p", sk); + + memset(&rp, 0, sizeof(rp)); + bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr); + rp.addr.type = cp->addr.type; + + if (!bdaddr_type_is_valid(cp->addr.type)) + return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO, + MGMT_STATUS_INVALID_PARAMS, + &rp, sizeof(rp)); + + hci_dev_lock(hdev); + + if (!hdev_is_powered(hdev)) { + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO, + MGMT_STATUS_NOT_POWERED, &rp, + sizeof(rp)); + goto unlock; + } + + if (cp->addr.type == BDADDR_BREDR) + conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, + &cp->addr.bdaddr); + else + conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr); + + if (!conn || conn->state != BT_CONNECTED) { + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO, + MGMT_STATUS_NOT_CONNECTED, &rp, + sizeof(rp)); + goto unlock; + } + + /* To avoid client trying to guess when to poll again for information we + * calculate conn info age as random value between min/max set in hdev. + */ + conn_info_age = hdev->conn_info_min_age + + prandom_u32_max(hdev->conn_info_max_age - + hdev->conn_info_min_age); + + /* Query controller to refresh cached values if they are too old or were + * never read. + */ + if (time_after(jiffies, conn->conn_info_timestamp + + msecs_to_jiffies(conn_info_age)) || + !conn->conn_info_timestamp) { + struct mgmt_pending_cmd *cmd; + + cmd = mgmt_pending_new(sk, MGMT_OP_GET_CONN_INFO, hdev, data, + len); + if (!cmd) { + err = -ENOMEM; + } else { + err = hci_cmd_sync_queue(hdev, get_conn_info_sync, + cmd, get_conn_info_complete); + } + + if (err < 0) { + mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO, + MGMT_STATUS_FAILED, &rp, sizeof(rp)); + + if (cmd) + mgmt_pending_free(cmd); + + goto unlock; + } + + conn->conn_info_timestamp = jiffies; + } else { + /* Cache is valid, just reply with values cached in hci_conn */ + rp.rssi = conn->rssi; + rp.tx_power = conn->tx_power; + rp.max_tx_power = conn->max_tx_power; + + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CONN_INFO, + MGMT_STATUS_SUCCESS, &rp, sizeof(rp)); + } + +unlock: + hci_dev_unlock(hdev); + return err; +} + +static void get_clock_info_complete(struct hci_dev *hdev, void *data, int err) +{ + struct mgmt_pending_cmd *cmd = data; + struct mgmt_cp_get_clock_info *cp = cmd->param; + struct mgmt_rp_get_clock_info rp; + struct hci_conn *conn = cmd->user_data; + u8 status = mgmt_status(err); + + bt_dev_dbg(hdev, "err %d", err); + + memset(&rp, 0, sizeof(rp)); + bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr); + rp.addr.type = cp->addr.type; + + if (err) + goto complete; + + rp.local_clock = cpu_to_le32(hdev->clock); + + if (conn) { + rp.piconet_clock = cpu_to_le32(conn->clock); + rp.accuracy = cpu_to_le16(conn->clock_accuracy); + } + +complete: + mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, status, &rp, + sizeof(rp)); + + mgmt_pending_free(cmd); +} + +static int get_clock_info_sync(struct hci_dev *hdev, void *data) +{ + struct mgmt_pending_cmd *cmd = data; + struct mgmt_cp_get_clock_info *cp = cmd->param; + struct hci_cp_read_clock hci_cp; + struct hci_conn *conn; + + memset(&hci_cp, 0, sizeof(hci_cp)); + hci_read_clock_sync(hdev, &hci_cp); + + /* Make sure connection still exists */ + conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &cp->addr.bdaddr); + if (!conn || conn->state != BT_CONNECTED) + return MGMT_STATUS_NOT_CONNECTED; + + cmd->user_data = conn; + hci_cp.handle = cpu_to_le16(conn->handle); + hci_cp.which = 0x01; /* Piconet clock */ + + return hci_read_clock_sync(hdev, &hci_cp); +} + +static int get_clock_info(struct sock *sk, struct hci_dev *hdev, void *data, + u16 len) +{ + struct mgmt_cp_get_clock_info *cp = data; + struct mgmt_rp_get_clock_info rp; + struct mgmt_pending_cmd *cmd; + struct hci_conn *conn; + int err; + + bt_dev_dbg(hdev, "sock %p", sk); + + memset(&rp, 0, sizeof(rp)); + bacpy(&rp.addr.bdaddr, &cp->addr.bdaddr); + rp.addr.type = cp->addr.type; + + if (cp->addr.type != BDADDR_BREDR) + return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO, + MGMT_STATUS_INVALID_PARAMS, + &rp, sizeof(rp)); + + hci_dev_lock(hdev); + + if (!hdev_is_powered(hdev)) { + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO, + MGMT_STATUS_NOT_POWERED, &rp, + sizeof(rp)); + goto unlock; + } + + if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) { + conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, + &cp->addr.bdaddr); + if (!conn || conn->state != BT_CONNECTED) { + err = mgmt_cmd_complete(sk, hdev->id, + MGMT_OP_GET_CLOCK_INFO, + MGMT_STATUS_NOT_CONNECTED, + &rp, sizeof(rp)); + goto unlock; + } + } else { + conn = NULL; + } + + cmd = mgmt_pending_new(sk, MGMT_OP_GET_CLOCK_INFO, hdev, data, len); + if (!cmd) + err = -ENOMEM; + else + err = hci_cmd_sync_queue(hdev, get_clock_info_sync, cmd, + get_clock_info_complete); + + if (err < 0) { + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_CLOCK_INFO, + MGMT_STATUS_FAILED, &rp, sizeof(rp)); + + if (cmd) + mgmt_pending_free(cmd); + } + + +unlock: + hci_dev_unlock(hdev); + return err; +} + +static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type) +{ + struct hci_conn *conn; + + conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr); + if (!conn) + return false; + + if (conn->dst_type != type) + return false; + + if (conn->state != BT_CONNECTED) + return false; + + return true; +} + +/* This function requires the caller holds hdev->lock */ +static int hci_conn_params_set(struct hci_dev *hdev, bdaddr_t *addr, + u8 addr_type, u8 auto_connect) +{ + struct hci_conn_params *params; + + params = hci_conn_params_add(hdev, addr, addr_type); + if (!params) + return -EIO; + + if (params->auto_connect == auto_connect) + return 0; + + hci_pend_le_list_del_init(params); + + switch (auto_connect) { + case HCI_AUTO_CONN_DISABLED: + case HCI_AUTO_CONN_LINK_LOSS: + /* If auto connect is being disabled when we're trying to + * connect to device, keep connecting. + */ + if (params->explicit_connect) + hci_pend_le_list_add(params, &hdev->pend_le_conns); + break; + case HCI_AUTO_CONN_REPORT: + if (params->explicit_connect) + hci_pend_le_list_add(params, &hdev->pend_le_conns); + else + hci_pend_le_list_add(params, &hdev->pend_le_reports); + break; + case HCI_AUTO_CONN_DIRECT: + case HCI_AUTO_CONN_ALWAYS: + if (!is_connected(hdev, addr, addr_type)) + hci_pend_le_list_add(params, &hdev->pend_le_conns); + break; + } + + params->auto_connect = auto_connect; + + bt_dev_dbg(hdev, "addr %pMR (type %u) auto_connect %u", + addr, addr_type, auto_connect); + + return 0; +} + +static void device_added(struct sock *sk, struct hci_dev *hdev, + bdaddr_t *bdaddr, u8 type, u8 action) +{ + struct mgmt_ev_device_added ev; + + bacpy(&ev.addr.bdaddr, bdaddr); + ev.addr.type = type; + ev.action = action; + + mgmt_event(MGMT_EV_DEVICE_ADDED, hdev, &ev, sizeof(ev), sk); +} + +static int add_device_sync(struct hci_dev *hdev, void *data) +{ + return hci_update_passive_scan_sync(hdev); +} + +static int add_device(struct sock *sk, struct hci_dev *hdev, + void *data, u16 len) +{ + struct mgmt_cp_add_device *cp = data; + u8 auto_conn, addr_type; + struct hci_conn_params *params; + int err; + u32 current_flags = 0; + u32 supported_flags; + + bt_dev_dbg(hdev, "sock %p", sk); + + if (!bdaddr_type_is_valid(cp->addr.type) || + !bacmp(&cp->addr.bdaddr, BDADDR_ANY)) + return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE, + MGMT_STATUS_INVALID_PARAMS, + &cp->addr, sizeof(cp->addr)); + + if (cp->action != 0x00 && cp->action != 0x01 && cp->action != 0x02) + return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE, + MGMT_STATUS_INVALID_PARAMS, + &cp->addr, sizeof(cp->addr)); + + hci_dev_lock(hdev); + + if (cp->addr.type == BDADDR_BREDR) { + /* Only incoming connections action is supported for now */ + if (cp->action != 0x01) { + err = mgmt_cmd_complete(sk, hdev->id, + MGMT_OP_ADD_DEVICE, + MGMT_STATUS_INVALID_PARAMS, + &cp->addr, sizeof(cp->addr)); + goto unlock; + } + + err = hci_bdaddr_list_add_with_flags(&hdev->accept_list, + &cp->addr.bdaddr, + cp->addr.type, 0); + if (err) + goto unlock; + + hci_update_scan(hdev); + + goto added; + } + + addr_type = le_addr_type(cp->addr.type); + + if (cp->action == 0x02) + auto_conn = HCI_AUTO_CONN_ALWAYS; + else if (cp->action == 0x01) + auto_conn = HCI_AUTO_CONN_DIRECT; + else + auto_conn = HCI_AUTO_CONN_REPORT; + + /* Kernel internally uses conn_params with resolvable private + * address, but Add Device allows only identity addresses. + * Make sure it is enforced before calling + * hci_conn_params_lookup. + */ + if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) { + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE, + MGMT_STATUS_INVALID_PARAMS, + &cp->addr, sizeof(cp->addr)); + goto unlock; + } + + /* If the connection parameters don't exist for this device, + * they will be created and configured with defaults. + */ + if (hci_conn_params_set(hdev, &cp->addr.bdaddr, addr_type, + auto_conn) < 0) { + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE, + MGMT_STATUS_FAILED, &cp->addr, + sizeof(cp->addr)); + goto unlock; + } else { + params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, + addr_type); + if (params) + current_flags = params->flags; + } + + err = hci_cmd_sync_queue(hdev, add_device_sync, NULL, NULL); + if (err < 0) + goto unlock; + +added: + device_added(sk, hdev, &cp->addr.bdaddr, cp->addr.type, cp->action); + supported_flags = hdev->conn_flags; + device_flags_changed(NULL, hdev, &cp->addr.bdaddr, cp->addr.type, + supported_flags, current_flags); + + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_DEVICE, + MGMT_STATUS_SUCCESS, &cp->addr, + sizeof(cp->addr)); + +unlock: + hci_dev_unlock(hdev); + return err; +} + +static void device_removed(struct sock *sk, struct hci_dev *hdev, + bdaddr_t *bdaddr, u8 type) +{ + struct mgmt_ev_device_removed ev; + + bacpy(&ev.addr.bdaddr, bdaddr); + ev.addr.type = type; + + mgmt_event(MGMT_EV_DEVICE_REMOVED, hdev, &ev, sizeof(ev), sk); +} + +static int remove_device_sync(struct hci_dev *hdev, void *data) +{ + return hci_update_passive_scan_sync(hdev); +} + +static int remove_device(struct sock *sk, struct hci_dev *hdev, + void *data, u16 len) +{ + struct mgmt_cp_remove_device *cp = data; + int err; + + bt_dev_dbg(hdev, "sock %p", sk); + + hci_dev_lock(hdev); + + if (bacmp(&cp->addr.bdaddr, BDADDR_ANY)) { + struct hci_conn_params *params; + u8 addr_type; + + if (!bdaddr_type_is_valid(cp->addr.type)) { + err = mgmt_cmd_complete(sk, hdev->id, + MGMT_OP_REMOVE_DEVICE, + MGMT_STATUS_INVALID_PARAMS, + &cp->addr, sizeof(cp->addr)); + goto unlock; + } + + if (cp->addr.type == BDADDR_BREDR) { + err = hci_bdaddr_list_del(&hdev->accept_list, + &cp->addr.bdaddr, + cp->addr.type); + if (err) { + err = mgmt_cmd_complete(sk, hdev->id, + MGMT_OP_REMOVE_DEVICE, + MGMT_STATUS_INVALID_PARAMS, + &cp->addr, + sizeof(cp->addr)); + goto unlock; + } + + hci_update_scan(hdev); + + device_removed(sk, hdev, &cp->addr.bdaddr, + cp->addr.type); + goto complete; + } + + addr_type = le_addr_type(cp->addr.type); + + /* Kernel internally uses conn_params with resolvable private + * address, but Remove Device allows only identity addresses. + * Make sure it is enforced before calling + * hci_conn_params_lookup. + */ + if (!hci_is_identity_address(&cp->addr.bdaddr, addr_type)) { + err = mgmt_cmd_complete(sk, hdev->id, + MGMT_OP_REMOVE_DEVICE, + MGMT_STATUS_INVALID_PARAMS, + &cp->addr, sizeof(cp->addr)); + goto unlock; + } + + params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, + addr_type); + if (!params) { + err = mgmt_cmd_complete(sk, hdev->id, + MGMT_OP_REMOVE_DEVICE, + MGMT_STATUS_INVALID_PARAMS, + &cp->addr, sizeof(cp->addr)); + goto unlock; + } + + if (params->auto_connect == HCI_AUTO_CONN_DISABLED || + params->auto_connect == HCI_AUTO_CONN_EXPLICIT) { + err = mgmt_cmd_complete(sk, hdev->id, + MGMT_OP_REMOVE_DEVICE, + MGMT_STATUS_INVALID_PARAMS, + &cp->addr, sizeof(cp->addr)); + goto unlock; + } + + hci_conn_params_free(params); + + device_removed(sk, hdev, &cp->addr.bdaddr, cp->addr.type); + } else { + struct hci_conn_params *p, *tmp; + struct bdaddr_list *b, *btmp; + + if (cp->addr.type) { + err = mgmt_cmd_complete(sk, hdev->id, + MGMT_OP_REMOVE_DEVICE, + MGMT_STATUS_INVALID_PARAMS, + &cp->addr, sizeof(cp->addr)); + goto unlock; + } + + list_for_each_entry_safe(b, btmp, &hdev->accept_list, list) { + device_removed(sk, hdev, &b->bdaddr, b->bdaddr_type); + list_del(&b->list); + kfree(b); + } + + hci_update_scan(hdev); + + list_for_each_entry_safe(p, tmp, &hdev->le_conn_params, list) { + if (p->auto_connect == HCI_AUTO_CONN_DISABLED) + continue; + device_removed(sk, hdev, &p->addr, p->addr_type); + if (p->explicit_connect) { + p->auto_connect = HCI_AUTO_CONN_EXPLICIT; + continue; + } + hci_conn_params_free(p); + } + + bt_dev_dbg(hdev, "All LE connection parameters were removed"); + } + + hci_cmd_sync_queue(hdev, remove_device_sync, NULL, NULL); + +complete: + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_REMOVE_DEVICE, + MGMT_STATUS_SUCCESS, &cp->addr, + sizeof(cp->addr)); +unlock: + hci_dev_unlock(hdev); + return err; +} + +static int load_conn_param(struct sock *sk, struct hci_dev *hdev, void *data, + u16 len) +{ + struct mgmt_cp_load_conn_param *cp = data; + const u16 max_param_count = ((U16_MAX - sizeof(*cp)) / + sizeof(struct mgmt_conn_param)); + u16 param_count, expected_len; + int i; + + if (!lmp_le_capable(hdev)) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, + MGMT_STATUS_NOT_SUPPORTED); + + param_count = __le16_to_cpu(cp->param_count); + if (param_count > max_param_count) { + bt_dev_err(hdev, "load_conn_param: too big param_count value %u", + param_count); + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, + MGMT_STATUS_INVALID_PARAMS); + } + + expected_len = struct_size(cp, params, param_count); + if (expected_len != len) { + bt_dev_err(hdev, "load_conn_param: expected %u bytes, got %u bytes", + expected_len, len); + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, + MGMT_STATUS_INVALID_PARAMS); + } + + bt_dev_dbg(hdev, "param_count %u", param_count); + + hci_dev_lock(hdev); + + hci_conn_params_clear_disabled(hdev); + + for (i = 0; i < param_count; i++) { + struct mgmt_conn_param *param = &cp->params[i]; + struct hci_conn_params *hci_param; + u16 min, max, latency, timeout; + u8 addr_type; + + bt_dev_dbg(hdev, "Adding %pMR (type %u)", ¶m->addr.bdaddr, + param->addr.type); + + if (param->addr.type == BDADDR_LE_PUBLIC) { + addr_type = ADDR_LE_DEV_PUBLIC; + } else if (param->addr.type == BDADDR_LE_RANDOM) { + addr_type = ADDR_LE_DEV_RANDOM; + } else { + bt_dev_err(hdev, "ignoring invalid connection parameters"); + continue; + } + + min = le16_to_cpu(param->min_interval); + max = le16_to_cpu(param->max_interval); + latency = le16_to_cpu(param->latency); + timeout = le16_to_cpu(param->timeout); + + bt_dev_dbg(hdev, "min 0x%04x max 0x%04x latency 0x%04x timeout 0x%04x", + min, max, latency, timeout); + + if (hci_check_conn_params(min, max, latency, timeout) < 0) { + bt_dev_err(hdev, "ignoring invalid connection parameters"); + continue; + } + + hci_param = hci_conn_params_add(hdev, ¶m->addr.bdaddr, + addr_type); + if (!hci_param) { + bt_dev_err(hdev, "failed to add connection parameters"); + continue; + } + + hci_param->conn_min_interval = min; + hci_param->conn_max_interval = max; + hci_param->conn_latency = latency; + hci_param->supervision_timeout = timeout; + } + + hci_dev_unlock(hdev); + + return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_CONN_PARAM, 0, + NULL, 0); +} + +static int set_external_config(struct sock *sk, struct hci_dev *hdev, + void *data, u16 len) +{ + struct mgmt_cp_set_external_config *cp = data; + bool changed; + int err; + + bt_dev_dbg(hdev, "sock %p", sk); + + if (hdev_is_powered(hdev)) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG, + MGMT_STATUS_REJECTED); + + if (cp->config != 0x00 && cp->config != 0x01) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG, + MGMT_STATUS_INVALID_PARAMS); + + if (!test_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks)) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_EXTERNAL_CONFIG, + MGMT_STATUS_NOT_SUPPORTED); + + hci_dev_lock(hdev); + + if (cp->config) + changed = !hci_dev_test_and_set_flag(hdev, HCI_EXT_CONFIGURED); + else + changed = hci_dev_test_and_clear_flag(hdev, HCI_EXT_CONFIGURED); + + err = send_options_rsp(sk, MGMT_OP_SET_EXTERNAL_CONFIG, hdev); + if (err < 0) + goto unlock; + + if (!changed) + goto unlock; + + err = new_options(hdev, sk); + + if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) == is_configured(hdev)) { + mgmt_index_removed(hdev); + + if (hci_dev_test_and_change_flag(hdev, HCI_UNCONFIGURED)) { + hci_dev_set_flag(hdev, HCI_CONFIG); + hci_dev_set_flag(hdev, HCI_AUTO_OFF); + + queue_work(hdev->req_workqueue, &hdev->power_on); + } else { + set_bit(HCI_RAW, &hdev->flags); + mgmt_index_added(hdev); + } + } + +unlock: + hci_dev_unlock(hdev); + return err; +} + +static int set_public_address(struct sock *sk, struct hci_dev *hdev, + void *data, u16 len) +{ + struct mgmt_cp_set_public_address *cp = data; + bool changed; + int err; + + bt_dev_dbg(hdev, "sock %p", sk); + + if (hdev_is_powered(hdev)) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS, + MGMT_STATUS_REJECTED); + + if (!bacmp(&cp->bdaddr, BDADDR_ANY)) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS, + MGMT_STATUS_INVALID_PARAMS); + + if (!hdev->set_bdaddr) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_PUBLIC_ADDRESS, + MGMT_STATUS_NOT_SUPPORTED); + + hci_dev_lock(hdev); + + changed = !!bacmp(&hdev->public_addr, &cp->bdaddr); + bacpy(&hdev->public_addr, &cp->bdaddr); + + err = send_options_rsp(sk, MGMT_OP_SET_PUBLIC_ADDRESS, hdev); + if (err < 0) + goto unlock; + + if (!changed) + goto unlock; + + if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) + err = new_options(hdev, sk); + + if (is_configured(hdev)) { + mgmt_index_removed(hdev); + + hci_dev_clear_flag(hdev, HCI_UNCONFIGURED); + + hci_dev_set_flag(hdev, HCI_CONFIG); + hci_dev_set_flag(hdev, HCI_AUTO_OFF); + + queue_work(hdev->req_workqueue, &hdev->power_on); + } + +unlock: + hci_dev_unlock(hdev); + return err; +} + +static void read_local_oob_ext_data_complete(struct hci_dev *hdev, void *data, + int err) +{ + const struct mgmt_cp_read_local_oob_ext_data *mgmt_cp; + struct mgmt_rp_read_local_oob_ext_data *mgmt_rp; + u8 *h192, *r192, *h256, *r256; + struct mgmt_pending_cmd *cmd = data; + struct sk_buff *skb = cmd->skb; + u8 status = mgmt_status(err); + u16 eir_len; + + if (cmd != pending_find(MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev)) + return; + + if (!status) { + if (!skb) + status = MGMT_STATUS_FAILED; + else if (IS_ERR(skb)) + status = mgmt_status(PTR_ERR(skb)); + else + status = mgmt_status(skb->data[0]); + } + + bt_dev_dbg(hdev, "status %u", status); + + mgmt_cp = cmd->param; + + if (status) { + status = mgmt_status(status); + eir_len = 0; + + h192 = NULL; + r192 = NULL; + h256 = NULL; + r256 = NULL; + } else if (!bredr_sc_enabled(hdev)) { + struct hci_rp_read_local_oob_data *rp; + + if (skb->len != sizeof(*rp)) { + status = MGMT_STATUS_FAILED; + eir_len = 0; + } else { + status = MGMT_STATUS_SUCCESS; + rp = (void *)skb->data; + + eir_len = 5 + 18 + 18; + h192 = rp->hash; + r192 = rp->rand; + h256 = NULL; + r256 = NULL; + } + } else { + struct hci_rp_read_local_oob_ext_data *rp; + + if (skb->len != sizeof(*rp)) { + status = MGMT_STATUS_FAILED; + eir_len = 0; + } else { + status = MGMT_STATUS_SUCCESS; + rp = (void *)skb->data; + + if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) { + eir_len = 5 + 18 + 18; + h192 = NULL; + r192 = NULL; + } else { + eir_len = 5 + 18 + 18 + 18 + 18; + h192 = rp->hash192; + r192 = rp->rand192; + } + + h256 = rp->hash256; + r256 = rp->rand256; + } + } + + mgmt_rp = kmalloc(sizeof(*mgmt_rp) + eir_len, GFP_KERNEL); + if (!mgmt_rp) + goto done; + + if (eir_len == 0) + goto send_rsp; + + eir_len = eir_append_data(mgmt_rp->eir, 0, EIR_CLASS_OF_DEV, + hdev->dev_class, 3); + + if (h192 && r192) { + eir_len = eir_append_data(mgmt_rp->eir, eir_len, + EIR_SSP_HASH_C192, h192, 16); + eir_len = eir_append_data(mgmt_rp->eir, eir_len, + EIR_SSP_RAND_R192, r192, 16); + } + + if (h256 && r256) { + eir_len = eir_append_data(mgmt_rp->eir, eir_len, + EIR_SSP_HASH_C256, h256, 16); + eir_len = eir_append_data(mgmt_rp->eir, eir_len, + EIR_SSP_RAND_R256, r256, 16); + } + +send_rsp: + mgmt_rp->type = mgmt_cp->type; + mgmt_rp->eir_len = cpu_to_le16(eir_len); + + err = mgmt_cmd_complete(cmd->sk, hdev->id, + MGMT_OP_READ_LOCAL_OOB_EXT_DATA, status, + mgmt_rp, sizeof(*mgmt_rp) + eir_len); + if (err < 0 || status) + goto done; + + hci_sock_set_flag(cmd->sk, HCI_MGMT_OOB_DATA_EVENTS); + + err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev, + mgmt_rp, sizeof(*mgmt_rp) + eir_len, + HCI_MGMT_OOB_DATA_EVENTS, cmd->sk); +done: + if (skb && !IS_ERR(skb)) + kfree_skb(skb); + + kfree(mgmt_rp); + mgmt_pending_remove(cmd); +} + +static int read_local_ssp_oob_req(struct hci_dev *hdev, struct sock *sk, + struct mgmt_cp_read_local_oob_ext_data *cp) +{ + struct mgmt_pending_cmd *cmd; + int err; + + cmd = mgmt_pending_add(sk, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, hdev, + cp, sizeof(*cp)); + if (!cmd) + return -ENOMEM; + + err = hci_cmd_sync_queue(hdev, read_local_oob_data_sync, cmd, + read_local_oob_ext_data_complete); + + if (err < 0) { + mgmt_pending_remove(cmd); + return err; + } + + return 0; +} + +static int read_local_oob_ext_data(struct sock *sk, struct hci_dev *hdev, + void *data, u16 data_len) +{ + struct mgmt_cp_read_local_oob_ext_data *cp = data; + struct mgmt_rp_read_local_oob_ext_data *rp; + size_t rp_len; + u16 eir_len; + u8 status, flags, role, addr[7], hash[16], rand[16]; + int err; + + bt_dev_dbg(hdev, "sock %p", sk); + + if (hdev_is_powered(hdev)) { + switch (cp->type) { + case BIT(BDADDR_BREDR): + status = mgmt_bredr_support(hdev); + if (status) + eir_len = 0; + else + eir_len = 5; + break; + case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)): + status = mgmt_le_support(hdev); + if (status) + eir_len = 0; + else + eir_len = 9 + 3 + 18 + 18 + 3; + break; + default: + status = MGMT_STATUS_INVALID_PARAMS; + eir_len = 0; + break; + } + } else { + status = MGMT_STATUS_NOT_POWERED; + eir_len = 0; + } + + rp_len = sizeof(*rp) + eir_len; + rp = kmalloc(rp_len, GFP_ATOMIC); + if (!rp) + return -ENOMEM; + + if (!status && !lmp_ssp_capable(hdev)) { + status = MGMT_STATUS_NOT_SUPPORTED; + eir_len = 0; + } + + if (status) + goto complete; + + hci_dev_lock(hdev); + + eir_len = 0; + switch (cp->type) { + case BIT(BDADDR_BREDR): + if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) { + err = read_local_ssp_oob_req(hdev, sk, cp); + hci_dev_unlock(hdev); + if (!err) + goto done; + + status = MGMT_STATUS_FAILED; + goto complete; + } else { + eir_len = eir_append_data(rp->eir, eir_len, + EIR_CLASS_OF_DEV, + hdev->dev_class, 3); + } + break; + case (BIT(BDADDR_LE_PUBLIC) | BIT(BDADDR_LE_RANDOM)): + if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) && + smp_generate_oob(hdev, hash, rand) < 0) { + hci_dev_unlock(hdev); + status = MGMT_STATUS_FAILED; + goto complete; + } + + /* This should return the active RPA, but since the RPA + * is only programmed on demand, it is really hard to fill + * this in at the moment. For now disallow retrieving + * local out-of-band data when privacy is in use. + * + * Returning the identity address will not help here since + * pairing happens before the identity resolving key is + * known and thus the connection establishment happens + * based on the RPA and not the identity address. + */ + if (hci_dev_test_flag(hdev, HCI_PRIVACY)) { + hci_dev_unlock(hdev); + status = MGMT_STATUS_REJECTED; + goto complete; + } + + if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) || + !bacmp(&hdev->bdaddr, BDADDR_ANY) || + (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) && + bacmp(&hdev->static_addr, BDADDR_ANY))) { + memcpy(addr, &hdev->static_addr, 6); + addr[6] = 0x01; + } else { + memcpy(addr, &hdev->bdaddr, 6); + addr[6] = 0x00; + } + + eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_BDADDR, + addr, sizeof(addr)); + + if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) + role = 0x02; + else + role = 0x01; + + eir_len = eir_append_data(rp->eir, eir_len, EIR_LE_ROLE, + &role, sizeof(role)); + + if (hci_dev_test_flag(hdev, HCI_SC_ENABLED)) { + eir_len = eir_append_data(rp->eir, eir_len, + EIR_LE_SC_CONFIRM, + hash, sizeof(hash)); + + eir_len = eir_append_data(rp->eir, eir_len, + EIR_LE_SC_RANDOM, + rand, sizeof(rand)); + } + + flags = mgmt_get_adv_discov_flags(hdev); + + if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) + flags |= LE_AD_NO_BREDR; + + eir_len = eir_append_data(rp->eir, eir_len, EIR_FLAGS, + &flags, sizeof(flags)); + break; + } + + hci_dev_unlock(hdev); + + hci_sock_set_flag(sk, HCI_MGMT_OOB_DATA_EVENTS); + + status = MGMT_STATUS_SUCCESS; + +complete: + rp->type = cp->type; + rp->eir_len = cpu_to_le16(eir_len); + + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_LOCAL_OOB_EXT_DATA, + status, rp, sizeof(*rp) + eir_len); + if (err < 0 || status) + goto done; + + err = mgmt_limited_event(MGMT_EV_LOCAL_OOB_DATA_UPDATED, hdev, + rp, sizeof(*rp) + eir_len, + HCI_MGMT_OOB_DATA_EVENTS, sk); + +done: + kfree(rp); + + return err; +} + +static u32 get_supported_adv_flags(struct hci_dev *hdev) +{ + u32 flags = 0; + + flags |= MGMT_ADV_FLAG_CONNECTABLE; + flags |= MGMT_ADV_FLAG_DISCOV; + flags |= MGMT_ADV_FLAG_LIMITED_DISCOV; + flags |= MGMT_ADV_FLAG_MANAGED_FLAGS; + flags |= MGMT_ADV_FLAG_APPEARANCE; + flags |= MGMT_ADV_FLAG_LOCAL_NAME; + flags |= MGMT_ADV_PARAM_DURATION; + flags |= MGMT_ADV_PARAM_TIMEOUT; + flags |= MGMT_ADV_PARAM_INTERVALS; + flags |= MGMT_ADV_PARAM_TX_POWER; + flags |= MGMT_ADV_PARAM_SCAN_RSP; + + /* In extended adv TX_POWER returned from Set Adv Param + * will be always valid. + */ + if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev)) + flags |= MGMT_ADV_FLAG_TX_POWER; + + if (ext_adv_capable(hdev)) { + flags |= MGMT_ADV_FLAG_SEC_1M; + flags |= MGMT_ADV_FLAG_HW_OFFLOAD; + flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER; + + if (hdev->le_features[1] & HCI_LE_PHY_2M) + flags |= MGMT_ADV_FLAG_SEC_2M; + + if (hdev->le_features[1] & HCI_LE_PHY_CODED) + flags |= MGMT_ADV_FLAG_SEC_CODED; + } + + return flags; +} + +static int read_adv_features(struct sock *sk, struct hci_dev *hdev, + void *data, u16 data_len) +{ + struct mgmt_rp_read_adv_features *rp; + size_t rp_len; + int err; + struct adv_info *adv_instance; + u32 supported_flags; + u8 *instance; + + bt_dev_dbg(hdev, "sock %p", sk); + + if (!lmp_le_capable(hdev)) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES, + MGMT_STATUS_REJECTED); + + hci_dev_lock(hdev); + + rp_len = sizeof(*rp) + hdev->adv_instance_cnt; + rp = kmalloc(rp_len, GFP_ATOMIC); + if (!rp) { + hci_dev_unlock(hdev); + return -ENOMEM; + } + + supported_flags = get_supported_adv_flags(hdev); + + rp->supported_flags = cpu_to_le32(supported_flags); + rp->max_adv_data_len = HCI_MAX_AD_LENGTH; + rp->max_scan_rsp_len = HCI_MAX_AD_LENGTH; + rp->max_instances = hdev->le_num_of_adv_sets; + rp->num_instances = hdev->adv_instance_cnt; + + instance = rp->instance; + list_for_each_entry(adv_instance, &hdev->adv_instances, list) { + /* Only instances 1-le_num_of_adv_sets are externally visible */ + if (adv_instance->instance <= hdev->adv_instance_cnt) { + *instance = adv_instance->instance; + instance++; + } else { + rp->num_instances--; + rp_len--; + } + } + + hci_dev_unlock(hdev); + + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_READ_ADV_FEATURES, + MGMT_STATUS_SUCCESS, rp, rp_len); + + kfree(rp); + + return err; +} + +static u8 calculate_name_len(struct hci_dev *hdev) +{ + u8 buf[HCI_MAX_SHORT_NAME_LENGTH + 3]; + + return eir_append_local_name(hdev, buf, 0); +} + +static u8 tlv_data_max_len(struct hci_dev *hdev, u32 adv_flags, + bool is_adv_data) +{ + u8 max_len = HCI_MAX_AD_LENGTH; + + if (is_adv_data) { + if (adv_flags & (MGMT_ADV_FLAG_DISCOV | + MGMT_ADV_FLAG_LIMITED_DISCOV | + MGMT_ADV_FLAG_MANAGED_FLAGS)) + max_len -= 3; + + if (adv_flags & MGMT_ADV_FLAG_TX_POWER) + max_len -= 3; + } else { + if (adv_flags & MGMT_ADV_FLAG_LOCAL_NAME) + max_len -= calculate_name_len(hdev); + + if (adv_flags & (MGMT_ADV_FLAG_APPEARANCE)) + max_len -= 4; + } + + return max_len; +} + +static bool flags_managed(u32 adv_flags) +{ + return adv_flags & (MGMT_ADV_FLAG_DISCOV | + MGMT_ADV_FLAG_LIMITED_DISCOV | + MGMT_ADV_FLAG_MANAGED_FLAGS); +} + +static bool tx_power_managed(u32 adv_flags) +{ + return adv_flags & MGMT_ADV_FLAG_TX_POWER; +} + +static bool name_managed(u32 adv_flags) +{ + return adv_flags & MGMT_ADV_FLAG_LOCAL_NAME; +} + +static bool appearance_managed(u32 adv_flags) +{ + return adv_flags & MGMT_ADV_FLAG_APPEARANCE; +} + +static bool tlv_data_is_valid(struct hci_dev *hdev, u32 adv_flags, u8 *data, + u8 len, bool is_adv_data) +{ + int i, cur_len; + u8 max_len; + + max_len = tlv_data_max_len(hdev, adv_flags, is_adv_data); + + if (len > max_len) + return false; + + /* Make sure that the data is correctly formatted. */ + for (i = 0; i < len; i += (cur_len + 1)) { + cur_len = data[i]; + + if (!cur_len) + continue; + + if (data[i + 1] == EIR_FLAGS && + (!is_adv_data || flags_managed(adv_flags))) + return false; + + if (data[i + 1] == EIR_TX_POWER && tx_power_managed(adv_flags)) + return false; + + if (data[i + 1] == EIR_NAME_COMPLETE && name_managed(adv_flags)) + return false; + + if (data[i + 1] == EIR_NAME_SHORT && name_managed(adv_flags)) + return false; + + if (data[i + 1] == EIR_APPEARANCE && + appearance_managed(adv_flags)) + return false; + + /* If the current field length would exceed the total data + * length, then it's invalid. + */ + if (i + cur_len >= len) + return false; + } + + return true; +} + +static bool requested_adv_flags_are_valid(struct hci_dev *hdev, u32 adv_flags) +{ + u32 supported_flags, phy_flags; + + /* The current implementation only supports a subset of the specified + * flags. Also need to check mutual exclusiveness of sec flags. + */ + supported_flags = get_supported_adv_flags(hdev); + phy_flags = adv_flags & MGMT_ADV_FLAG_SEC_MASK; + if (adv_flags & ~supported_flags || + ((phy_flags && (phy_flags ^ (phy_flags & -phy_flags))))) + return false; + + return true; +} + +static bool adv_busy(struct hci_dev *hdev) +{ + return pending_find(MGMT_OP_SET_LE, hdev); +} + +static void add_adv_complete(struct hci_dev *hdev, struct sock *sk, u8 instance, + int err) +{ + struct adv_info *adv, *n; + + bt_dev_dbg(hdev, "err %d", err); + + hci_dev_lock(hdev); + + list_for_each_entry_safe(adv, n, &hdev->adv_instances, list) { + u8 instance; + + if (!adv->pending) + continue; + + if (!err) { + adv->pending = false; + continue; + } + + instance = adv->instance; + + if (hdev->cur_adv_instance == instance) + cancel_adv_timeout(hdev); + + hci_remove_adv_instance(hdev, instance); + mgmt_advertising_removed(sk, hdev, instance); + } + + hci_dev_unlock(hdev); +} + +static void add_advertising_complete(struct hci_dev *hdev, void *data, int err) +{ + struct mgmt_pending_cmd *cmd = data; + struct mgmt_cp_add_advertising *cp = cmd->param; + struct mgmt_rp_add_advertising rp; + + memset(&rp, 0, sizeof(rp)); + + rp.instance = cp->instance; + + if (err) + mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, + mgmt_status(err)); + else + mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, + mgmt_status(err), &rp, sizeof(rp)); + + add_adv_complete(hdev, cmd->sk, cp->instance, err); + + mgmt_pending_free(cmd); +} + +static int add_advertising_sync(struct hci_dev *hdev, void *data) +{ + struct mgmt_pending_cmd *cmd = data; + struct mgmt_cp_add_advertising *cp = cmd->param; + + return hci_schedule_adv_instance_sync(hdev, cp->instance, true); +} + +static int add_advertising(struct sock *sk, struct hci_dev *hdev, + void *data, u16 data_len) +{ + struct mgmt_cp_add_advertising *cp = data; + struct mgmt_rp_add_advertising rp; + u32 flags; + u8 status; + u16 timeout, duration; + unsigned int prev_instance_cnt; + u8 schedule_instance = 0; + struct adv_info *adv, *next_instance; + int err; + struct mgmt_pending_cmd *cmd; + + bt_dev_dbg(hdev, "sock %p", sk); + + status = mgmt_le_support(hdev); + if (status) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING, + status); + + if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING, + MGMT_STATUS_INVALID_PARAMS); + + if (data_len != sizeof(*cp) + cp->adv_data_len + cp->scan_rsp_len) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING, + MGMT_STATUS_INVALID_PARAMS); + + flags = __le32_to_cpu(cp->flags); + timeout = __le16_to_cpu(cp->timeout); + duration = __le16_to_cpu(cp->duration); + + if (!requested_adv_flags_are_valid(hdev, flags)) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING, + MGMT_STATUS_INVALID_PARAMS); + + hci_dev_lock(hdev); + + if (timeout && !hdev_is_powered(hdev)) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING, + MGMT_STATUS_REJECTED); + goto unlock; + } + + if (adv_busy(hdev)) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING, + MGMT_STATUS_BUSY); + goto unlock; + } + + if (!tlv_data_is_valid(hdev, flags, cp->data, cp->adv_data_len, true) || + !tlv_data_is_valid(hdev, flags, cp->data + cp->adv_data_len, + cp->scan_rsp_len, false)) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING, + MGMT_STATUS_INVALID_PARAMS); + goto unlock; + } + + prev_instance_cnt = hdev->adv_instance_cnt; + + adv = hci_add_adv_instance(hdev, cp->instance, flags, + cp->adv_data_len, cp->data, + cp->scan_rsp_len, + cp->data + cp->adv_data_len, + timeout, duration, + HCI_ADV_TX_POWER_NO_PREFERENCE, + hdev->le_adv_min_interval, + hdev->le_adv_max_interval, 0); + if (IS_ERR(adv)) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING, + MGMT_STATUS_FAILED); + goto unlock; + } + + /* Only trigger an advertising added event if a new instance was + * actually added. + */ + if (hdev->adv_instance_cnt > prev_instance_cnt) + mgmt_advertising_added(sk, hdev, cp->instance); + + if (hdev->cur_adv_instance == cp->instance) { + /* If the currently advertised instance is being changed then + * cancel the current advertising and schedule the next + * instance. If there is only one instance then the overridden + * advertising data will be visible right away. + */ + cancel_adv_timeout(hdev); + + next_instance = hci_get_next_instance(hdev, cp->instance); + if (next_instance) + schedule_instance = next_instance->instance; + } else if (!hdev->adv_instance_timeout) { + /* Immediately advertise the new instance if no other + * instance is currently being advertised. + */ + schedule_instance = cp->instance; + } + + /* If the HCI_ADVERTISING flag is set or the device isn't powered or + * there is no instance to be advertised then we have no HCI + * communication to make. Simply return. + */ + if (!hdev_is_powered(hdev) || + hci_dev_test_flag(hdev, HCI_ADVERTISING) || + !schedule_instance) { + rp.instance = cp->instance; + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_ADVERTISING, + MGMT_STATUS_SUCCESS, &rp, sizeof(rp)); + goto unlock; + } + + /* We're good to go, update advertising data, parameters, and start + * advertising. + */ + cmd = mgmt_pending_new(sk, MGMT_OP_ADD_ADVERTISING, hdev, data, + data_len); + if (!cmd) { + err = -ENOMEM; + goto unlock; + } + + cp->instance = schedule_instance; + + err = hci_cmd_sync_queue(hdev, add_advertising_sync, cmd, + add_advertising_complete); + if (err < 0) + mgmt_pending_free(cmd); + +unlock: + hci_dev_unlock(hdev); + + return err; +} + +static void add_ext_adv_params_complete(struct hci_dev *hdev, void *data, + int err) +{ + struct mgmt_pending_cmd *cmd = data; + struct mgmt_cp_add_ext_adv_params *cp = cmd->param; + struct mgmt_rp_add_ext_adv_params rp; + struct adv_info *adv; + u32 flags; + + BT_DBG("%s", hdev->name); + + hci_dev_lock(hdev); + + adv = hci_find_adv_instance(hdev, cp->instance); + if (!adv) + goto unlock; + + rp.instance = cp->instance; + rp.tx_power = adv->tx_power; + + /* While we're at it, inform userspace of the available space for this + * advertisement, given the flags that will be used. + */ + flags = __le32_to_cpu(cp->flags); + rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true); + rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false); + + if (err) { + /* If this advertisement was previously advertising and we + * failed to update it, we signal that it has been removed and + * delete its structure + */ + if (!adv->pending) + mgmt_advertising_removed(cmd->sk, hdev, cp->instance); + + hci_remove_adv_instance(hdev, cp->instance); + + mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, + mgmt_status(err)); + } else { + mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, + mgmt_status(err), &rp, sizeof(rp)); + } + +unlock: + if (cmd) + mgmt_pending_free(cmd); + + hci_dev_unlock(hdev); +} + +static int add_ext_adv_params_sync(struct hci_dev *hdev, void *data) +{ + struct mgmt_pending_cmd *cmd = data; + struct mgmt_cp_add_ext_adv_params *cp = cmd->param; + + return hci_setup_ext_adv_instance_sync(hdev, cp->instance); +} + +static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev, + void *data, u16 data_len) +{ + struct mgmt_cp_add_ext_adv_params *cp = data; + struct mgmt_rp_add_ext_adv_params rp; + struct mgmt_pending_cmd *cmd = NULL; + struct adv_info *adv; + u32 flags, min_interval, max_interval; + u16 timeout, duration; + u8 status; + s8 tx_power; + int err; + + BT_DBG("%s", hdev->name); + + status = mgmt_le_support(hdev); + if (status) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS, + status); + + if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS, + MGMT_STATUS_INVALID_PARAMS); + + /* The purpose of breaking add_advertising into two separate MGMT calls + * for params and data is to allow more parameters to be added to this + * structure in the future. For this reason, we verify that we have the + * bare minimum structure we know of when the interface was defined. Any + * extra parameters we don't know about will be ignored in this request. + */ + if (data_len < MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS, + MGMT_STATUS_INVALID_PARAMS); + + flags = __le32_to_cpu(cp->flags); + + if (!requested_adv_flags_are_valid(hdev, flags)) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS, + MGMT_STATUS_INVALID_PARAMS); + + hci_dev_lock(hdev); + + /* In new interface, we require that we are powered to register */ + if (!hdev_is_powered(hdev)) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS, + MGMT_STATUS_REJECTED); + goto unlock; + } + + if (adv_busy(hdev)) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS, + MGMT_STATUS_BUSY); + goto unlock; + } + + /* Parse defined parameters from request, use defaults otherwise */ + timeout = (flags & MGMT_ADV_PARAM_TIMEOUT) ? + __le16_to_cpu(cp->timeout) : 0; + + duration = (flags & MGMT_ADV_PARAM_DURATION) ? + __le16_to_cpu(cp->duration) : + hdev->def_multi_adv_rotation_duration; + + min_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ? + __le32_to_cpu(cp->min_interval) : + hdev->le_adv_min_interval; + + max_interval = (flags & MGMT_ADV_PARAM_INTERVALS) ? + __le32_to_cpu(cp->max_interval) : + hdev->le_adv_max_interval; + + tx_power = (flags & MGMT_ADV_PARAM_TX_POWER) ? + cp->tx_power : + HCI_ADV_TX_POWER_NO_PREFERENCE; + + /* Create advertising instance with no advertising or response data */ + adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL, + timeout, duration, tx_power, min_interval, + max_interval, 0); + + if (IS_ERR(adv)) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS, + MGMT_STATUS_FAILED); + goto unlock; + } + + /* Submit request for advertising params if ext adv available */ + if (ext_adv_capable(hdev)) { + cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_PARAMS, hdev, + data, data_len); + if (!cmd) { + err = -ENOMEM; + hci_remove_adv_instance(hdev, cp->instance); + goto unlock; + } + + err = hci_cmd_sync_queue(hdev, add_ext_adv_params_sync, cmd, + add_ext_adv_params_complete); + if (err < 0) + mgmt_pending_free(cmd); + } else { + rp.instance = cp->instance; + rp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE; + rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true); + rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false); + err = mgmt_cmd_complete(sk, hdev->id, + MGMT_OP_ADD_EXT_ADV_PARAMS, + MGMT_STATUS_SUCCESS, &rp, sizeof(rp)); + } + +unlock: + hci_dev_unlock(hdev); + + return err; +} + +static void add_ext_adv_data_complete(struct hci_dev *hdev, void *data, int err) +{ + struct mgmt_pending_cmd *cmd = data; + struct mgmt_cp_add_ext_adv_data *cp = cmd->param; + struct mgmt_rp_add_advertising rp; + + add_adv_complete(hdev, cmd->sk, cp->instance, err); + + memset(&rp, 0, sizeof(rp)); + + rp.instance = cp->instance; + + if (err) + mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, + mgmt_status(err)); + else + mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, + mgmt_status(err), &rp, sizeof(rp)); + + mgmt_pending_free(cmd); +} + +static int add_ext_adv_data_sync(struct hci_dev *hdev, void *data) +{ + struct mgmt_pending_cmd *cmd = data; + struct mgmt_cp_add_ext_adv_data *cp = cmd->param; + int err; + + if (ext_adv_capable(hdev)) { + err = hci_update_adv_data_sync(hdev, cp->instance); + if (err) + return err; + + err = hci_update_scan_rsp_data_sync(hdev, cp->instance); + if (err) + return err; + + return hci_enable_ext_advertising_sync(hdev, cp->instance); + } + + return hci_schedule_adv_instance_sync(hdev, cp->instance, true); +} + +static int add_ext_adv_data(struct sock *sk, struct hci_dev *hdev, void *data, + u16 data_len) +{ + struct mgmt_cp_add_ext_adv_data *cp = data; + struct mgmt_rp_add_ext_adv_data rp; + u8 schedule_instance = 0; + struct adv_info *next_instance; + struct adv_info *adv_instance; + int err = 0; + struct mgmt_pending_cmd *cmd; + + BT_DBG("%s", hdev->name); + + hci_dev_lock(hdev); + + adv_instance = hci_find_adv_instance(hdev, cp->instance); + + if (!adv_instance) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA, + MGMT_STATUS_INVALID_PARAMS); + goto unlock; + } + + /* In new interface, we require that we are powered to register */ + if (!hdev_is_powered(hdev)) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA, + MGMT_STATUS_REJECTED); + goto clear_new_instance; + } + + if (adv_busy(hdev)) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA, + MGMT_STATUS_BUSY); + goto clear_new_instance; + } + + /* Validate new data */ + if (!tlv_data_is_valid(hdev, adv_instance->flags, cp->data, + cp->adv_data_len, true) || + !tlv_data_is_valid(hdev, adv_instance->flags, cp->data + + cp->adv_data_len, cp->scan_rsp_len, false)) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA, + MGMT_STATUS_INVALID_PARAMS); + goto clear_new_instance; + } + + /* Set the data in the advertising instance */ + hci_set_adv_instance_data(hdev, cp->instance, cp->adv_data_len, + cp->data, cp->scan_rsp_len, + cp->data + cp->adv_data_len); + + /* If using software rotation, determine next instance to use */ + if (hdev->cur_adv_instance == cp->instance) { + /* If the currently advertised instance is being changed + * then cancel the current advertising and schedule the + * next instance. If there is only one instance then the + * overridden advertising data will be visible right + * away + */ + cancel_adv_timeout(hdev); + + next_instance = hci_get_next_instance(hdev, cp->instance); + if (next_instance) + schedule_instance = next_instance->instance; + } else if (!hdev->adv_instance_timeout) { + /* Immediately advertise the new instance if no other + * instance is currently being advertised. + */ + schedule_instance = cp->instance; + } + + /* If the HCI_ADVERTISING flag is set or there is no instance to + * be advertised then we have no HCI communication to make. + * Simply return. + */ + if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || !schedule_instance) { + if (adv_instance->pending) { + mgmt_advertising_added(sk, hdev, cp->instance); + adv_instance->pending = false; + } + rp.instance = cp->instance; + err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_DATA, + MGMT_STATUS_SUCCESS, &rp, sizeof(rp)); + goto unlock; + } + + cmd = mgmt_pending_new(sk, MGMT_OP_ADD_EXT_ADV_DATA, hdev, data, + data_len); + if (!cmd) { + err = -ENOMEM; + goto clear_new_instance; + } + + err = hci_cmd_sync_queue(hdev, add_ext_adv_data_sync, cmd, + add_ext_adv_data_complete); + if (err < 0) { + mgmt_pending_free(cmd); + goto clear_new_instance; + } + + /* We were successful in updating data, so trigger advertising_added + * event if this is an instance that wasn't previously advertising. If + * a failure occurs in the requests we initiated, we will remove the + * instance again in add_advertising_complete + */ + if (adv_instance->pending) + mgmt_advertising_added(sk, hdev, cp->instance); + + goto unlock; + +clear_new_instance: + hci_remove_adv_instance(hdev, cp->instance); + +unlock: + hci_dev_unlock(hdev); + + return err; +} + +static void remove_advertising_complete(struct hci_dev *hdev, void *data, + int err) +{ + struct mgmt_pending_cmd *cmd = data; + struct mgmt_cp_remove_advertising *cp = cmd->param; + struct mgmt_rp_remove_advertising rp; + + bt_dev_dbg(hdev, "err %d", err); + + memset(&rp, 0, sizeof(rp)); + rp.instance = cp->instance; + + if (err) + mgmt_cmd_status(cmd->sk, cmd->index, cmd->opcode, + mgmt_status(err)); + else + mgmt_cmd_complete(cmd->sk, cmd->index, cmd->opcode, + MGMT_STATUS_SUCCESS, &rp, sizeof(rp)); + + mgmt_pending_free(cmd); +} + +static int remove_advertising_sync(struct hci_dev *hdev, void *data) +{ + struct mgmt_pending_cmd *cmd = data; + struct mgmt_cp_remove_advertising *cp = cmd->param; + int err; + + err = hci_remove_advertising_sync(hdev, cmd->sk, cp->instance, true); + if (err) + return err; + + if (list_empty(&hdev->adv_instances)) + err = hci_disable_advertising_sync(hdev); + + return err; +} + +static int remove_advertising(struct sock *sk, struct hci_dev *hdev, + void *data, u16 data_len) +{ + struct mgmt_cp_remove_advertising *cp = data; + struct mgmt_pending_cmd *cmd; + int err; + + bt_dev_dbg(hdev, "sock %p", sk); + + hci_dev_lock(hdev); + + if (cp->instance && !hci_find_adv_instance(hdev, cp->instance)) { + err = mgmt_cmd_status(sk, hdev->id, + MGMT_OP_REMOVE_ADVERTISING, + MGMT_STATUS_INVALID_PARAMS); + goto unlock; + } + + if (pending_find(MGMT_OP_SET_LE, hdev)) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING, + MGMT_STATUS_BUSY); + goto unlock; + } + + if (list_empty(&hdev->adv_instances)) { + err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_REMOVE_ADVERTISING, + MGMT_STATUS_INVALID_PARAMS); + goto unlock; + } + + cmd = mgmt_pending_new(sk, MGMT_OP_REMOVE_ADVERTISING, hdev, data, + data_len); + if (!cmd) { + err = -ENOMEM; + goto unlock; + } + + err = hci_cmd_sync_queue(hdev, remove_advertising_sync, cmd, + remove_advertising_complete); + if (err < 0) + mgmt_pending_free(cmd); + +unlock: + hci_dev_unlock(hdev); + + return err; +} + +static int get_adv_size_info(struct sock *sk, struct hci_dev *hdev, + void *data, u16 data_len) +{ + struct mgmt_cp_get_adv_size_info *cp = data; + struct mgmt_rp_get_adv_size_info rp; + u32 flags, supported_flags; + + bt_dev_dbg(hdev, "sock %p", sk); + + if (!lmp_le_capable(hdev)) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO, + MGMT_STATUS_REJECTED); + + if (cp->instance < 1 || cp->instance > hdev->le_num_of_adv_sets) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO, + MGMT_STATUS_INVALID_PARAMS); + + flags = __le32_to_cpu(cp->flags); + + /* The current implementation only supports a subset of the specified + * flags. + */ + supported_flags = get_supported_adv_flags(hdev); + if (flags & ~supported_flags) + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO, + MGMT_STATUS_INVALID_PARAMS); + + rp.instance = cp->instance; + rp.flags = cp->flags; + rp.max_adv_data_len = tlv_data_max_len(hdev, flags, true); + rp.max_scan_rsp_len = tlv_data_max_len(hdev, flags, false); + + return mgmt_cmd_complete(sk, hdev->id, MGMT_OP_GET_ADV_SIZE_INFO, + MGMT_STATUS_SUCCESS, &rp, sizeof(rp)); +} + +static const struct hci_mgmt_handler mgmt_handlers[] = { + { NULL }, /* 0x0000 (no command) */ + { read_version, MGMT_READ_VERSION_SIZE, + HCI_MGMT_NO_HDEV | + HCI_MGMT_UNTRUSTED }, + { read_commands, MGMT_READ_COMMANDS_SIZE, + HCI_MGMT_NO_HDEV | + HCI_MGMT_UNTRUSTED }, + { read_index_list, MGMT_READ_INDEX_LIST_SIZE, + HCI_MGMT_NO_HDEV | + HCI_MGMT_UNTRUSTED }, + { read_controller_info, MGMT_READ_INFO_SIZE, + HCI_MGMT_UNTRUSTED }, + { set_powered, MGMT_SETTING_SIZE }, + { set_discoverable, MGMT_SET_DISCOVERABLE_SIZE }, + { set_connectable, MGMT_SETTING_SIZE }, + { set_fast_connectable, MGMT_SETTING_SIZE }, + { set_bondable, MGMT_SETTING_SIZE }, + { set_link_security, MGMT_SETTING_SIZE }, + { set_ssp, MGMT_SETTING_SIZE }, + { set_hs, MGMT_SETTING_SIZE }, + { set_le, MGMT_SETTING_SIZE }, + { set_dev_class, MGMT_SET_DEV_CLASS_SIZE }, + { set_local_name, MGMT_SET_LOCAL_NAME_SIZE }, + { add_uuid, MGMT_ADD_UUID_SIZE }, + { remove_uuid, MGMT_REMOVE_UUID_SIZE }, + { load_link_keys, MGMT_LOAD_LINK_KEYS_SIZE, + HCI_MGMT_VAR_LEN }, + { load_long_term_keys, MGMT_LOAD_LONG_TERM_KEYS_SIZE, + HCI_MGMT_VAR_LEN }, + { disconnect, MGMT_DISCONNECT_SIZE }, + { get_connections, MGMT_GET_CONNECTIONS_SIZE }, + { pin_code_reply, MGMT_PIN_CODE_REPLY_SIZE }, + { pin_code_neg_reply, MGMT_PIN_CODE_NEG_REPLY_SIZE }, + { set_io_capability, MGMT_SET_IO_CAPABILITY_SIZE }, + { pair_device, MGMT_PAIR_DEVICE_SIZE }, + { cancel_pair_device, MGMT_CANCEL_PAIR_DEVICE_SIZE }, + { unpair_device, MGMT_UNPAIR_DEVICE_SIZE }, + { user_confirm_reply, MGMT_USER_CONFIRM_REPLY_SIZE }, + { user_confirm_neg_reply, MGMT_USER_CONFIRM_NEG_REPLY_SIZE }, + { user_passkey_reply, MGMT_USER_PASSKEY_REPLY_SIZE }, + { user_passkey_neg_reply, MGMT_USER_PASSKEY_NEG_REPLY_SIZE }, + { read_local_oob_data, MGMT_READ_LOCAL_OOB_DATA_SIZE }, + { add_remote_oob_data, MGMT_ADD_REMOTE_OOB_DATA_SIZE, + HCI_MGMT_VAR_LEN }, + { remove_remote_oob_data, MGMT_REMOVE_REMOTE_OOB_DATA_SIZE }, + { start_discovery, MGMT_START_DISCOVERY_SIZE }, + { stop_discovery, MGMT_STOP_DISCOVERY_SIZE }, + { confirm_name, MGMT_CONFIRM_NAME_SIZE }, + { block_device, MGMT_BLOCK_DEVICE_SIZE }, + { unblock_device, MGMT_UNBLOCK_DEVICE_SIZE }, + { set_device_id, MGMT_SET_DEVICE_ID_SIZE }, + { set_advertising, MGMT_SETTING_SIZE }, + { set_bredr, MGMT_SETTING_SIZE }, + { set_static_address, MGMT_SET_STATIC_ADDRESS_SIZE }, + { set_scan_params, MGMT_SET_SCAN_PARAMS_SIZE }, + { set_secure_conn, MGMT_SETTING_SIZE }, + { set_debug_keys, MGMT_SETTING_SIZE }, + { set_privacy, MGMT_SET_PRIVACY_SIZE }, + { load_irks, MGMT_LOAD_IRKS_SIZE, + HCI_MGMT_VAR_LEN }, + { get_conn_info, MGMT_GET_CONN_INFO_SIZE }, + { get_clock_info, MGMT_GET_CLOCK_INFO_SIZE }, + { add_device, MGMT_ADD_DEVICE_SIZE }, + { remove_device, MGMT_REMOVE_DEVICE_SIZE }, + { load_conn_param, MGMT_LOAD_CONN_PARAM_SIZE, + HCI_MGMT_VAR_LEN }, + { read_unconf_index_list, MGMT_READ_UNCONF_INDEX_LIST_SIZE, + HCI_MGMT_NO_HDEV | + HCI_MGMT_UNTRUSTED }, + { read_config_info, MGMT_READ_CONFIG_INFO_SIZE, + HCI_MGMT_UNCONFIGURED | + HCI_MGMT_UNTRUSTED }, + { set_external_config, MGMT_SET_EXTERNAL_CONFIG_SIZE, + HCI_MGMT_UNCONFIGURED }, + { set_public_address, MGMT_SET_PUBLIC_ADDRESS_SIZE, + HCI_MGMT_UNCONFIGURED }, + { start_service_discovery, MGMT_START_SERVICE_DISCOVERY_SIZE, + HCI_MGMT_VAR_LEN }, + { read_local_oob_ext_data, MGMT_READ_LOCAL_OOB_EXT_DATA_SIZE }, + { read_ext_index_list, MGMT_READ_EXT_INDEX_LIST_SIZE, + HCI_MGMT_NO_HDEV | + HCI_MGMT_UNTRUSTED }, + { read_adv_features, MGMT_READ_ADV_FEATURES_SIZE }, + { add_advertising, MGMT_ADD_ADVERTISING_SIZE, + HCI_MGMT_VAR_LEN }, + { remove_advertising, MGMT_REMOVE_ADVERTISING_SIZE }, + { get_adv_size_info, MGMT_GET_ADV_SIZE_INFO_SIZE }, + { start_limited_discovery, MGMT_START_DISCOVERY_SIZE }, + { read_ext_controller_info,MGMT_READ_EXT_INFO_SIZE, + HCI_MGMT_UNTRUSTED }, + { set_appearance, MGMT_SET_APPEARANCE_SIZE }, + { get_phy_configuration, MGMT_GET_PHY_CONFIGURATION_SIZE }, + { set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE }, + { set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE, + HCI_MGMT_VAR_LEN }, + { set_wideband_speech, MGMT_SETTING_SIZE }, + { read_controller_cap, MGMT_READ_CONTROLLER_CAP_SIZE, + HCI_MGMT_UNTRUSTED }, + { read_exp_features_info, MGMT_READ_EXP_FEATURES_INFO_SIZE, + HCI_MGMT_UNTRUSTED | + HCI_MGMT_HDEV_OPTIONAL }, + { set_exp_feature, MGMT_SET_EXP_FEATURE_SIZE, + HCI_MGMT_VAR_LEN | + HCI_MGMT_HDEV_OPTIONAL }, + { read_def_system_config, MGMT_READ_DEF_SYSTEM_CONFIG_SIZE, + HCI_MGMT_UNTRUSTED }, + { set_def_system_config, MGMT_SET_DEF_SYSTEM_CONFIG_SIZE, + HCI_MGMT_VAR_LEN }, + { read_def_runtime_config, MGMT_READ_DEF_RUNTIME_CONFIG_SIZE, + HCI_MGMT_UNTRUSTED }, + { set_def_runtime_config, MGMT_SET_DEF_RUNTIME_CONFIG_SIZE, + HCI_MGMT_VAR_LEN }, + { get_device_flags, MGMT_GET_DEVICE_FLAGS_SIZE }, + { set_device_flags, MGMT_SET_DEVICE_FLAGS_SIZE }, + { read_adv_mon_features, MGMT_READ_ADV_MONITOR_FEATURES_SIZE }, + { add_adv_patterns_monitor,MGMT_ADD_ADV_PATTERNS_MONITOR_SIZE, + HCI_MGMT_VAR_LEN }, + { remove_adv_monitor, MGMT_REMOVE_ADV_MONITOR_SIZE }, + { add_ext_adv_params, MGMT_ADD_EXT_ADV_PARAMS_MIN_SIZE, + HCI_MGMT_VAR_LEN }, + { add_ext_adv_data, MGMT_ADD_EXT_ADV_DATA_SIZE, + HCI_MGMT_VAR_LEN }, + { add_adv_patterns_monitor_rssi, + MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE, + HCI_MGMT_VAR_LEN }, + { set_mesh, MGMT_SET_MESH_RECEIVER_SIZE, + HCI_MGMT_VAR_LEN }, + { mesh_features, MGMT_MESH_READ_FEATURES_SIZE }, + { mesh_send, MGMT_MESH_SEND_SIZE, + HCI_MGMT_VAR_LEN }, + { mesh_send_cancel, MGMT_MESH_SEND_CANCEL_SIZE }, +}; + +void mgmt_index_added(struct hci_dev *hdev) +{ + struct mgmt_ev_ext_index ev; + + if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) + return; + + switch (hdev->dev_type) { + case HCI_PRIMARY: + if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { + mgmt_index_event(MGMT_EV_UNCONF_INDEX_ADDED, hdev, + NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS); + ev.type = 0x01; + } else { + mgmt_index_event(MGMT_EV_INDEX_ADDED, hdev, NULL, 0, + HCI_MGMT_INDEX_EVENTS); + ev.type = 0x00; + } + break; + case HCI_AMP: + ev.type = 0x02; + break; + default: + return; + } + + ev.bus = hdev->bus; + + mgmt_index_event(MGMT_EV_EXT_INDEX_ADDED, hdev, &ev, sizeof(ev), + HCI_MGMT_EXT_INDEX_EVENTS); +} + +void mgmt_index_removed(struct hci_dev *hdev) +{ + struct mgmt_ev_ext_index ev; + u8 status = MGMT_STATUS_INVALID_INDEX; + + if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks)) + return; + + switch (hdev->dev_type) { + case HCI_PRIMARY: + mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status); + + if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { + mgmt_index_event(MGMT_EV_UNCONF_INDEX_REMOVED, hdev, + NULL, 0, HCI_MGMT_UNCONF_INDEX_EVENTS); + ev.type = 0x01; + } else { + mgmt_index_event(MGMT_EV_INDEX_REMOVED, hdev, NULL, 0, + HCI_MGMT_INDEX_EVENTS); + ev.type = 0x00; + } + break; + case HCI_AMP: + ev.type = 0x02; + break; + default: + return; + } + + ev.bus = hdev->bus; + + mgmt_index_event(MGMT_EV_EXT_INDEX_REMOVED, hdev, &ev, sizeof(ev), + HCI_MGMT_EXT_INDEX_EVENTS); + + /* Cancel any remaining timed work */ + if (!hci_dev_test_flag(hdev, HCI_MGMT)) + return; + cancel_delayed_work_sync(&hdev->discov_off); + cancel_delayed_work_sync(&hdev->service_cache); + cancel_delayed_work_sync(&hdev->rpa_expired); +} + +void mgmt_power_on(struct hci_dev *hdev, int err) +{ + struct cmd_lookup match = { NULL, hdev }; + + bt_dev_dbg(hdev, "err %d", err); + + hci_dev_lock(hdev); + + if (!err) { + restart_le_actions(hdev); + hci_update_passive_scan(hdev); + } + + mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match); + + new_settings(hdev, match.sk); + + if (match.sk) + sock_put(match.sk); + + hci_dev_unlock(hdev); +} + +void __mgmt_power_off(struct hci_dev *hdev) +{ + struct cmd_lookup match = { NULL, hdev }; + u8 status, zero_cod[] = { 0, 0, 0 }; + + mgmt_pending_foreach(MGMT_OP_SET_POWERED, hdev, settings_rsp, &match); + + /* If the power off is because of hdev unregistration let + * use the appropriate INVALID_INDEX status. Otherwise use + * NOT_POWERED. We cover both scenarios here since later in + * mgmt_index_removed() any hci_conn callbacks will have already + * been triggered, potentially causing misleading DISCONNECTED + * status responses. + */ + if (hci_dev_test_flag(hdev, HCI_UNREGISTER)) + status = MGMT_STATUS_INVALID_INDEX; + else + status = MGMT_STATUS_NOT_POWERED; + + mgmt_pending_foreach(0, hdev, cmd_complete_rsp, &status); + + if (memcmp(hdev->dev_class, zero_cod, sizeof(zero_cod)) != 0) { + mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, + zero_cod, sizeof(zero_cod), + HCI_MGMT_DEV_CLASS_EVENTS, NULL); + ext_info_changed(hdev, NULL); + } + + new_settings(hdev, match.sk); + + if (match.sk) + sock_put(match.sk); +} + +void mgmt_set_powered_failed(struct hci_dev *hdev, int err) +{ + struct mgmt_pending_cmd *cmd; + u8 status; + + cmd = pending_find(MGMT_OP_SET_POWERED, hdev); + if (!cmd) + return; + + if (err == -ERFKILL) + status = MGMT_STATUS_RFKILLED; + else + status = MGMT_STATUS_FAILED; + + mgmt_cmd_status(cmd->sk, hdev->id, MGMT_OP_SET_POWERED, status); + + mgmt_pending_remove(cmd); +} + +void mgmt_new_link_key(struct hci_dev *hdev, struct link_key *key, + bool persistent) +{ + struct mgmt_ev_new_link_key ev; + + memset(&ev, 0, sizeof(ev)); + + ev.store_hint = persistent; + bacpy(&ev.key.addr.bdaddr, &key->bdaddr); + ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type); + ev.key.type = key->type; + memcpy(ev.key.val, key->val, HCI_LINK_KEY_SIZE); + ev.key.pin_len = key->pin_len; + + mgmt_event(MGMT_EV_NEW_LINK_KEY, hdev, &ev, sizeof(ev), NULL); +} + +static u8 mgmt_ltk_type(struct smp_ltk *ltk) +{ + switch (ltk->type) { + case SMP_LTK: + case SMP_LTK_RESPONDER: + if (ltk->authenticated) + return MGMT_LTK_AUTHENTICATED; + return MGMT_LTK_UNAUTHENTICATED; + case SMP_LTK_P256: + if (ltk->authenticated) + return MGMT_LTK_P256_AUTH; + return MGMT_LTK_P256_UNAUTH; + case SMP_LTK_P256_DEBUG: + return MGMT_LTK_P256_DEBUG; + } + + return MGMT_LTK_UNAUTHENTICATED; +} + +void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent) +{ + struct mgmt_ev_new_long_term_key ev; + + memset(&ev, 0, sizeof(ev)); + + /* Devices using resolvable or non-resolvable random addresses + * without providing an identity resolving key don't require + * to store long term keys. Their addresses will change the + * next time around. + * + * Only when a remote device provides an identity address + * make sure the long term key is stored. If the remote + * identity is known, the long term keys are internally + * mapped to the identity address. So allow static random + * and public addresses here. + */ + if (key->bdaddr_type == ADDR_LE_DEV_RANDOM && + (key->bdaddr.b[5] & 0xc0) != 0xc0) + ev.store_hint = 0x00; + else + ev.store_hint = persistent; + + bacpy(&ev.key.addr.bdaddr, &key->bdaddr); + ev.key.addr.type = link_to_bdaddr(key->link_type, key->bdaddr_type); + ev.key.type = mgmt_ltk_type(key); + ev.key.enc_size = key->enc_size; + ev.key.ediv = key->ediv; + ev.key.rand = key->rand; + + if (key->type == SMP_LTK) + ev.key.initiator = 1; + + /* Make sure we copy only the significant bytes based on the + * encryption key size, and set the rest of the value to zeroes. + */ + memcpy(ev.key.val, key->val, key->enc_size); + memset(ev.key.val + key->enc_size, 0, + sizeof(ev.key.val) - key->enc_size); + + mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL); +} + +void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent) +{ + struct mgmt_ev_new_irk ev; + + memset(&ev, 0, sizeof(ev)); + + ev.store_hint = persistent; + + bacpy(&ev.rpa, &irk->rpa); + bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr); + ev.irk.addr.type = link_to_bdaddr(irk->link_type, irk->addr_type); + memcpy(ev.irk.val, irk->val, sizeof(irk->val)); + + mgmt_event(MGMT_EV_NEW_IRK, hdev, &ev, sizeof(ev), NULL); +} + +void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk, + bool persistent) +{ + struct mgmt_ev_new_csrk ev; + + memset(&ev, 0, sizeof(ev)); + + /* Devices using resolvable or non-resolvable random addresses + * without providing an identity resolving key don't require + * to store signature resolving keys. Their addresses will change + * the next time around. + * + * Only when a remote device provides an identity address + * make sure the signature resolving key is stored. So allow + * static random and public addresses here. + */ + if (csrk->bdaddr_type == ADDR_LE_DEV_RANDOM && + (csrk->bdaddr.b[5] & 0xc0) != 0xc0) + ev.store_hint = 0x00; + else + ev.store_hint = persistent; + + bacpy(&ev.key.addr.bdaddr, &csrk->bdaddr); + ev.key.addr.type = link_to_bdaddr(csrk->link_type, csrk->bdaddr_type); + ev.key.type = csrk->type; + memcpy(ev.key.val, csrk->val, sizeof(csrk->val)); + + mgmt_event(MGMT_EV_NEW_CSRK, hdev, &ev, sizeof(ev), NULL); +} + +void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr, + u8 bdaddr_type, u8 store_hint, u16 min_interval, + u16 max_interval, u16 latency, u16 timeout) +{ + struct mgmt_ev_new_conn_param ev; + + if (!hci_is_identity_address(bdaddr, bdaddr_type)) + return; + + memset(&ev, 0, sizeof(ev)); + bacpy(&ev.addr.bdaddr, bdaddr); + ev.addr.type = link_to_bdaddr(LE_LINK, bdaddr_type); + ev.store_hint = store_hint; + ev.min_interval = cpu_to_le16(min_interval); + ev.max_interval = cpu_to_le16(max_interval); + ev.latency = cpu_to_le16(latency); + ev.timeout = cpu_to_le16(timeout); + + mgmt_event(MGMT_EV_NEW_CONN_PARAM, hdev, &ev, sizeof(ev), NULL); +} + +void mgmt_device_connected(struct hci_dev *hdev, struct hci_conn *conn, + u8 *name, u8 name_len) +{ + struct sk_buff *skb; + struct mgmt_ev_device_connected *ev; + u16 eir_len = 0; + u32 flags = 0; + + /* allocate buff for LE or BR/EDR adv */ + if (conn->le_adv_data_len > 0) + skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED, + sizeof(*ev) + conn->le_adv_data_len); + else + skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_CONNECTED, + sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0) + + eir_precalc_len(sizeof(conn->dev_class))); + + ev = skb_put(skb, sizeof(*ev)); + bacpy(&ev->addr.bdaddr, &conn->dst); + ev->addr.type = link_to_bdaddr(conn->type, conn->dst_type); + + if (conn->out) + flags |= MGMT_DEV_FOUND_INITIATED_CONN; + + ev->flags = __cpu_to_le32(flags); + + /* We must ensure that the EIR Data fields are ordered and + * unique. Keep it simple for now and avoid the problem by not + * adding any BR/EDR data to the LE adv. + */ + if (conn->le_adv_data_len > 0) { + skb_put_data(skb, conn->le_adv_data, conn->le_adv_data_len); + eir_len = conn->le_adv_data_len; + } else { + if (name) + eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len); + + if (memcmp(conn->dev_class, "\0\0\0", sizeof(conn->dev_class))) + eir_len += eir_skb_put_data(skb, EIR_CLASS_OF_DEV, + conn->dev_class, sizeof(conn->dev_class)); + } + + ev->eir_len = cpu_to_le16(eir_len); + + mgmt_event_skb(skb, NULL); +} + +static void disconnect_rsp(struct mgmt_pending_cmd *cmd, void *data) +{ + struct sock **sk = data; + + cmd->cmd_complete(cmd, 0); + + *sk = cmd->sk; + sock_hold(*sk); + + mgmt_pending_remove(cmd); +} + +static void unpair_device_rsp(struct mgmt_pending_cmd *cmd, void *data) +{ + struct hci_dev *hdev = data; + struct mgmt_cp_unpair_device *cp = cmd->param; + + device_unpaired(hdev, &cp->addr.bdaddr, cp->addr.type, cmd->sk); + + cmd->cmd_complete(cmd, 0); + mgmt_pending_remove(cmd); +} + +bool mgmt_powering_down(struct hci_dev *hdev) +{ + struct mgmt_pending_cmd *cmd; + struct mgmt_mode *cp; + + cmd = pending_find(MGMT_OP_SET_POWERED, hdev); + if (!cmd) + return false; + + cp = cmd->param; + if (!cp->val) + return true; + + return false; +} + +void mgmt_device_disconnected(struct hci_dev *hdev, bdaddr_t *bdaddr, + u8 link_type, u8 addr_type, u8 reason, + bool mgmt_connected) +{ + struct mgmt_ev_device_disconnected ev; + struct sock *sk = NULL; + + /* The connection is still in hci_conn_hash so test for 1 + * instead of 0 to know if this is the last one. + */ + if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) { + cancel_delayed_work(&hdev->power_off); + queue_work(hdev->req_workqueue, &hdev->power_off.work); + } + + if (!mgmt_connected) + return; + + if (link_type != ACL_LINK && link_type != LE_LINK) + return; + + mgmt_pending_foreach(MGMT_OP_DISCONNECT, hdev, disconnect_rsp, &sk); + + bacpy(&ev.addr.bdaddr, bdaddr); + ev.addr.type = link_to_bdaddr(link_type, addr_type); + ev.reason = reason; + + /* Report disconnects due to suspend */ + if (hdev->suspended) + ev.reason = MGMT_DEV_DISCONN_LOCAL_HOST_SUSPEND; + + mgmt_event(MGMT_EV_DEVICE_DISCONNECTED, hdev, &ev, sizeof(ev), sk); + + if (sk) + sock_put(sk); + + mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp, + hdev); +} + +void mgmt_disconnect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, + u8 link_type, u8 addr_type, u8 status) +{ + u8 bdaddr_type = link_to_bdaddr(link_type, addr_type); + struct mgmt_cp_disconnect *cp; + struct mgmt_pending_cmd *cmd; + + mgmt_pending_foreach(MGMT_OP_UNPAIR_DEVICE, hdev, unpair_device_rsp, + hdev); + + cmd = pending_find(MGMT_OP_DISCONNECT, hdev); + if (!cmd) + return; + + cp = cmd->param; + + if (bacmp(bdaddr, &cp->addr.bdaddr)) + return; + + if (cp->addr.type != bdaddr_type) + return; + + cmd->cmd_complete(cmd, mgmt_status(status)); + mgmt_pending_remove(cmd); +} + +void mgmt_connect_failed(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, + u8 addr_type, u8 status) +{ + struct mgmt_ev_connect_failed ev; + + /* The connection is still in hci_conn_hash so test for 1 + * instead of 0 to know if this is the last one. + */ + if (mgmt_powering_down(hdev) && hci_conn_count(hdev) == 1) { + cancel_delayed_work(&hdev->power_off); + queue_work(hdev->req_workqueue, &hdev->power_off.work); + } + + bacpy(&ev.addr.bdaddr, bdaddr); + ev.addr.type = link_to_bdaddr(link_type, addr_type); + ev.status = mgmt_status(status); + + mgmt_event(MGMT_EV_CONNECT_FAILED, hdev, &ev, sizeof(ev), NULL); +} + +void mgmt_pin_code_request(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 secure) +{ + struct mgmt_ev_pin_code_request ev; + + bacpy(&ev.addr.bdaddr, bdaddr); + ev.addr.type = BDADDR_BREDR; + ev.secure = secure; + + mgmt_event(MGMT_EV_PIN_CODE_REQUEST, hdev, &ev, sizeof(ev), NULL); +} + +void mgmt_pin_code_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, + u8 status) +{ + struct mgmt_pending_cmd *cmd; + + cmd = pending_find(MGMT_OP_PIN_CODE_REPLY, hdev); + if (!cmd) + return; + + cmd->cmd_complete(cmd, mgmt_status(status)); + mgmt_pending_remove(cmd); +} + +void mgmt_pin_code_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, + u8 status) +{ + struct mgmt_pending_cmd *cmd; + + cmd = pending_find(MGMT_OP_PIN_CODE_NEG_REPLY, hdev); + if (!cmd) + return; + + cmd->cmd_complete(cmd, mgmt_status(status)); + mgmt_pending_remove(cmd); +} + +int mgmt_user_confirm_request(struct hci_dev *hdev, bdaddr_t *bdaddr, + u8 link_type, u8 addr_type, u32 value, + u8 confirm_hint) +{ + struct mgmt_ev_user_confirm_request ev; + + bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr); + + bacpy(&ev.addr.bdaddr, bdaddr); + ev.addr.type = link_to_bdaddr(link_type, addr_type); + ev.confirm_hint = confirm_hint; + ev.value = cpu_to_le32(value); + + return mgmt_event(MGMT_EV_USER_CONFIRM_REQUEST, hdev, &ev, sizeof(ev), + NULL); +} + +int mgmt_user_passkey_request(struct hci_dev *hdev, bdaddr_t *bdaddr, + u8 link_type, u8 addr_type) +{ + struct mgmt_ev_user_passkey_request ev; + + bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr); + + bacpy(&ev.addr.bdaddr, bdaddr); + ev.addr.type = link_to_bdaddr(link_type, addr_type); + + return mgmt_event(MGMT_EV_USER_PASSKEY_REQUEST, hdev, &ev, sizeof(ev), + NULL); +} + +static int user_pairing_resp_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, + u8 link_type, u8 addr_type, u8 status, + u8 opcode) +{ + struct mgmt_pending_cmd *cmd; + + cmd = pending_find(opcode, hdev); + if (!cmd) + return -ENOENT; + + cmd->cmd_complete(cmd, mgmt_status(status)); + mgmt_pending_remove(cmd); + + return 0; +} + +int mgmt_user_confirm_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, + u8 link_type, u8 addr_type, u8 status) +{ + return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type, + status, MGMT_OP_USER_CONFIRM_REPLY); +} + +int mgmt_user_confirm_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, + u8 link_type, u8 addr_type, u8 status) +{ + return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type, + status, + MGMT_OP_USER_CONFIRM_NEG_REPLY); +} + +int mgmt_user_passkey_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, + u8 link_type, u8 addr_type, u8 status) +{ + return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type, + status, MGMT_OP_USER_PASSKEY_REPLY); +} + +int mgmt_user_passkey_neg_reply_complete(struct hci_dev *hdev, bdaddr_t *bdaddr, + u8 link_type, u8 addr_type, u8 status) +{ + return user_pairing_resp_complete(hdev, bdaddr, link_type, addr_type, + status, + MGMT_OP_USER_PASSKEY_NEG_REPLY); +} + +int mgmt_user_passkey_notify(struct hci_dev *hdev, bdaddr_t *bdaddr, + u8 link_type, u8 addr_type, u32 passkey, + u8 entered) +{ + struct mgmt_ev_passkey_notify ev; + + bt_dev_dbg(hdev, "bdaddr %pMR", bdaddr); + + bacpy(&ev.addr.bdaddr, bdaddr); + ev.addr.type = link_to_bdaddr(link_type, addr_type); + ev.passkey = __cpu_to_le32(passkey); + ev.entered = entered; + + return mgmt_event(MGMT_EV_PASSKEY_NOTIFY, hdev, &ev, sizeof(ev), NULL); +} + +void mgmt_auth_failed(struct hci_conn *conn, u8 hci_status) +{ + struct mgmt_ev_auth_failed ev; + struct mgmt_pending_cmd *cmd; + u8 status = mgmt_status(hci_status); + + bacpy(&ev.addr.bdaddr, &conn->dst); + ev.addr.type = link_to_bdaddr(conn->type, conn->dst_type); + ev.status = status; + + cmd = find_pairing(conn); + + mgmt_event(MGMT_EV_AUTH_FAILED, conn->hdev, &ev, sizeof(ev), + cmd ? cmd->sk : NULL); + + if (cmd) { + cmd->cmd_complete(cmd, status); + mgmt_pending_remove(cmd); + } +} + +void mgmt_auth_enable_complete(struct hci_dev *hdev, u8 status) +{ + struct cmd_lookup match = { NULL, hdev }; + bool changed; + + if (status) { + u8 mgmt_err = mgmt_status(status); + mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, + cmd_status_rsp, &mgmt_err); + return; + } + + if (test_bit(HCI_AUTH, &hdev->flags)) + changed = !hci_dev_test_and_set_flag(hdev, HCI_LINK_SECURITY); + else + changed = hci_dev_test_and_clear_flag(hdev, HCI_LINK_SECURITY); + + mgmt_pending_foreach(MGMT_OP_SET_LINK_SECURITY, hdev, settings_rsp, + &match); + + if (changed) + new_settings(hdev, match.sk); + + if (match.sk) + sock_put(match.sk); +} + +static void sk_lookup(struct mgmt_pending_cmd *cmd, void *data) +{ + struct cmd_lookup *match = data; + + if (match->sk == NULL) { + match->sk = cmd->sk; + sock_hold(match->sk); + } +} + +void mgmt_set_class_of_dev_complete(struct hci_dev *hdev, u8 *dev_class, + u8 status) +{ + struct cmd_lookup match = { NULL, hdev, mgmt_status(status) }; + + mgmt_pending_foreach(MGMT_OP_SET_DEV_CLASS, hdev, sk_lookup, &match); + mgmt_pending_foreach(MGMT_OP_ADD_UUID, hdev, sk_lookup, &match); + mgmt_pending_foreach(MGMT_OP_REMOVE_UUID, hdev, sk_lookup, &match); + + if (!status) { + mgmt_limited_event(MGMT_EV_CLASS_OF_DEV_CHANGED, hdev, dev_class, + 3, HCI_MGMT_DEV_CLASS_EVENTS, NULL); + ext_info_changed(hdev, NULL); + } + + if (match.sk) + sock_put(match.sk); +} + +void mgmt_set_local_name_complete(struct hci_dev *hdev, u8 *name, u8 status) +{ + struct mgmt_cp_set_local_name ev; + struct mgmt_pending_cmd *cmd; + + if (status) + return; + + memset(&ev, 0, sizeof(ev)); + memcpy(ev.name, name, HCI_MAX_NAME_LENGTH); + memcpy(ev.short_name, hdev->short_name, HCI_MAX_SHORT_NAME_LENGTH); + + cmd = pending_find(MGMT_OP_SET_LOCAL_NAME, hdev); + if (!cmd) { + memcpy(hdev->dev_name, name, sizeof(hdev->dev_name)); + + /* If this is a HCI command related to powering on the + * HCI dev don't send any mgmt signals. + */ + if (pending_find(MGMT_OP_SET_POWERED, hdev)) + return; + } + + mgmt_limited_event(MGMT_EV_LOCAL_NAME_CHANGED, hdev, &ev, sizeof(ev), + HCI_MGMT_LOCAL_NAME_EVENTS, cmd ? cmd->sk : NULL); + ext_info_changed(hdev, cmd ? cmd->sk : NULL); +} + +static inline bool has_uuid(u8 *uuid, u16 uuid_count, u8 (*uuids)[16]) +{ + int i; + + for (i = 0; i < uuid_count; i++) { + if (!memcmp(uuid, uuids[i], 16)) + return true; + } + + return false; +} + +static bool eir_has_uuids(u8 *eir, u16 eir_len, u16 uuid_count, u8 (*uuids)[16]) +{ + u16 parsed = 0; + + while (parsed < eir_len) { + u8 field_len = eir[0]; + u8 uuid[16]; + int i; + + if (field_len == 0) + break; + + if (eir_len - parsed < field_len + 1) + break; + + switch (eir[1]) { + case EIR_UUID16_ALL: + case EIR_UUID16_SOME: + for (i = 0; i + 3 <= field_len; i += 2) { + memcpy(uuid, bluetooth_base_uuid, 16); + uuid[13] = eir[i + 3]; + uuid[12] = eir[i + 2]; + if (has_uuid(uuid, uuid_count, uuids)) + return true; + } + break; + case EIR_UUID32_ALL: + case EIR_UUID32_SOME: + for (i = 0; i + 5 <= field_len; i += 4) { + memcpy(uuid, bluetooth_base_uuid, 16); + uuid[15] = eir[i + 5]; + uuid[14] = eir[i + 4]; + uuid[13] = eir[i + 3]; + uuid[12] = eir[i + 2]; + if (has_uuid(uuid, uuid_count, uuids)) + return true; + } + break; + case EIR_UUID128_ALL: + case EIR_UUID128_SOME: + for (i = 0; i + 17 <= field_len; i += 16) { + memcpy(uuid, eir + i + 2, 16); + if (has_uuid(uuid, uuid_count, uuids)) + return true; + } + break; + } + + parsed += field_len + 1; + eir += field_len + 1; + } + + return false; +} + +static void restart_le_scan(struct hci_dev *hdev) +{ + /* If controller is not scanning we are done. */ + if (!hci_dev_test_flag(hdev, HCI_LE_SCAN)) + return; + + if (time_after(jiffies + DISCOV_LE_RESTART_DELAY, + hdev->discovery.scan_start + + hdev->discovery.scan_duration)) + return; + + queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_restart, + DISCOV_LE_RESTART_DELAY); +} + +static bool is_filter_match(struct hci_dev *hdev, s8 rssi, u8 *eir, + u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len) +{ + /* If a RSSI threshold has been specified, and + * HCI_QUIRK_STRICT_DUPLICATE_FILTER is not set, then all results with + * a RSSI smaller than the RSSI threshold will be dropped. If the quirk + * is set, let it through for further processing, as we might need to + * restart the scan. + * + * For BR/EDR devices (pre 1.2) providing no RSSI during inquiry, + * the results are also dropped. + */ + if (hdev->discovery.rssi != HCI_RSSI_INVALID && + (rssi == HCI_RSSI_INVALID || + (rssi < hdev->discovery.rssi && + !test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)))) + return false; + + if (hdev->discovery.uuid_count != 0) { + /* If a list of UUIDs is provided in filter, results with no + * matching UUID should be dropped. + */ + if (!eir_has_uuids(eir, eir_len, hdev->discovery.uuid_count, + hdev->discovery.uuids) && + !eir_has_uuids(scan_rsp, scan_rsp_len, + hdev->discovery.uuid_count, + hdev->discovery.uuids)) + return false; + } + + /* If duplicate filtering does not report RSSI changes, then restart + * scanning to ensure updated result with updated RSSI values. + */ + if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks)) { + restart_le_scan(hdev); + + /* Validate RSSI value against the RSSI threshold once more. */ + if (hdev->discovery.rssi != HCI_RSSI_INVALID && + rssi < hdev->discovery.rssi) + return false; + } + + return true; +} + +void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle, + bdaddr_t *bdaddr, u8 addr_type) +{ + struct mgmt_ev_adv_monitor_device_lost ev; + + ev.monitor_handle = cpu_to_le16(handle); + bacpy(&ev.addr.bdaddr, bdaddr); + ev.addr.type = addr_type; + + mgmt_event(MGMT_EV_ADV_MONITOR_DEVICE_LOST, hdev, &ev, sizeof(ev), + NULL); +} + +static void mgmt_send_adv_monitor_device_found(struct hci_dev *hdev, + struct sk_buff *skb, + struct sock *skip_sk, + u16 handle) +{ + struct sk_buff *advmon_skb; + size_t advmon_skb_len; + __le16 *monitor_handle; + + if (!skb) + return; + + advmon_skb_len = (sizeof(struct mgmt_ev_adv_monitor_device_found) - + sizeof(struct mgmt_ev_device_found)) + skb->len; + advmon_skb = mgmt_alloc_skb(hdev, MGMT_EV_ADV_MONITOR_DEVICE_FOUND, + advmon_skb_len); + if (!advmon_skb) + return; + + /* ADV_MONITOR_DEVICE_FOUND is similar to DEVICE_FOUND event except + * that it also has 'monitor_handle'. Make a copy of DEVICE_FOUND and + * store monitor_handle of the matched monitor. + */ + monitor_handle = skb_put(advmon_skb, sizeof(*monitor_handle)); + *monitor_handle = cpu_to_le16(handle); + skb_put_data(advmon_skb, skb->data, skb->len); + + mgmt_event_skb(advmon_skb, skip_sk); +} + +static void mgmt_adv_monitor_device_found(struct hci_dev *hdev, + bdaddr_t *bdaddr, bool report_device, + struct sk_buff *skb, + struct sock *skip_sk) +{ + struct monitored_device *dev, *tmp; + bool matched = false; + bool notified = false; + + /* We have received the Advertisement Report because: + * 1. the kernel has initiated active discovery + * 2. if not, we have pend_le_reports > 0 in which case we are doing + * passive scanning + * 3. if none of the above is true, we have one or more active + * Advertisement Monitor + * + * For case 1 and 2, report all advertisements via MGMT_EV_DEVICE_FOUND + * and report ONLY one advertisement per device for the matched Monitor + * via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event. + * + * For case 3, since we are not active scanning and all advertisements + * received are due to a matched Advertisement Monitor, report all + * advertisements ONLY via MGMT_EV_ADV_MONITOR_DEVICE_FOUND event. + */ + if (report_device && !hdev->advmon_pend_notify) { + mgmt_event_skb(skb, skip_sk); + return; + } + + hdev->advmon_pend_notify = false; + + list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) { + if (!bacmp(&dev->bdaddr, bdaddr)) { + matched = true; + + if (!dev->notified) { + mgmt_send_adv_monitor_device_found(hdev, skb, + skip_sk, + dev->handle); + notified = true; + dev->notified = true; + } + } + + if (!dev->notified) + hdev->advmon_pend_notify = true; + } + + if (!report_device && + ((matched && !notified) || !msft_monitor_supported(hdev))) { + /* Handle 0 indicates that we are not active scanning and this + * is a subsequent advertisement report for an already matched + * Advertisement Monitor or the controller offloading support + * is not available. + */ + mgmt_send_adv_monitor_device_found(hdev, skb, skip_sk, 0); + } + + if (report_device) + mgmt_event_skb(skb, skip_sk); + else + kfree_skb(skb); +} + +static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, + u8 addr_type, s8 rssi, u32 flags, u8 *eir, + u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len, + u64 instant) +{ + struct sk_buff *skb; + struct mgmt_ev_mesh_device_found *ev; + int i, j; + + if (!hdev->mesh_ad_types[0]) + goto accepted; + + /* Scan for requested AD types */ + if (eir_len > 0) { + for (i = 0; i + 1 < eir_len; i += eir[i] + 1) { + for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) { + if (!hdev->mesh_ad_types[j]) + break; + + if (hdev->mesh_ad_types[j] == eir[i + 1]) + goto accepted; + } + } + } + + if (scan_rsp_len > 0) { + for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) { + for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) { + if (!hdev->mesh_ad_types[j]) + break; + + if (hdev->mesh_ad_types[j] == scan_rsp[i + 1]) + goto accepted; + } + } + } + + return; + +accepted: + skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND, + sizeof(*ev) + eir_len + scan_rsp_len); + if (!skb) + return; + + ev = skb_put(skb, sizeof(*ev)); + + bacpy(&ev->addr.bdaddr, bdaddr); + ev->addr.type = link_to_bdaddr(LE_LINK, addr_type); + ev->rssi = rssi; + ev->flags = cpu_to_le32(flags); + ev->instant = cpu_to_le64(instant); + + if (eir_len > 0) + /* Copy EIR or advertising data into event */ + skb_put_data(skb, eir, eir_len); + + if (scan_rsp_len > 0) + /* Append scan response data to event */ + skb_put_data(skb, scan_rsp, scan_rsp_len); + + ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len); + + mgmt_event_skb(skb, NULL); +} + +void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, + u8 addr_type, u8 *dev_class, s8 rssi, u32 flags, + u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len, + u64 instant) +{ + struct sk_buff *skb; + struct mgmt_ev_device_found *ev; + bool report_device = hci_discovery_active(hdev); + + if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK) + mesh_device_found(hdev, bdaddr, addr_type, rssi, flags, + eir, eir_len, scan_rsp, scan_rsp_len, + instant); + + /* Don't send events for a non-kernel initiated discovery. With + * LE one exception is if we have pend_le_reports > 0 in which + * case we're doing passive scanning and want these events. + */ + if (!hci_discovery_active(hdev)) { + if (link_type == ACL_LINK) + return; + if (link_type == LE_LINK && !list_empty(&hdev->pend_le_reports)) + report_device = true; + else if (!hci_is_adv_monitoring(hdev)) + return; + } + + if (hdev->discovery.result_filtering) { + /* We are using service discovery */ + if (!is_filter_match(hdev, rssi, eir, eir_len, scan_rsp, + scan_rsp_len)) + return; + } + + if (hdev->discovery.limited) { + /* Check for limited discoverable bit */ + if (dev_class) { + if (!(dev_class[1] & 0x20)) + return; + } else { + u8 *flags = eir_get_data(eir, eir_len, EIR_FLAGS, NULL); + if (!flags || !(flags[0] & LE_AD_LIMITED)) + return; + } + } + + /* Allocate skb. The 5 extra bytes are for the potential CoD field */ + skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND, + sizeof(*ev) + eir_len + scan_rsp_len + 5); + if (!skb) + return; + + ev = skb_put(skb, sizeof(*ev)); + + /* In case of device discovery with BR/EDR devices (pre 1.2), the + * RSSI value was reported as 0 when not available. This behavior + * is kept when using device discovery. This is required for full + * backwards compatibility with the API. + * + * However when using service discovery, the value 127 will be + * returned when the RSSI is not available. + */ + if (rssi == HCI_RSSI_INVALID && !hdev->discovery.report_invalid_rssi && + link_type == ACL_LINK) + rssi = 0; + + bacpy(&ev->addr.bdaddr, bdaddr); + ev->addr.type = link_to_bdaddr(link_type, addr_type); + ev->rssi = rssi; + ev->flags = cpu_to_le32(flags); + + if (eir_len > 0) + /* Copy EIR or advertising data into event */ + skb_put_data(skb, eir, eir_len); + + if (dev_class && !eir_get_data(eir, eir_len, EIR_CLASS_OF_DEV, NULL)) { + u8 eir_cod[5]; + + eir_len += eir_append_data(eir_cod, 0, EIR_CLASS_OF_DEV, + dev_class, 3); + skb_put_data(skb, eir_cod, sizeof(eir_cod)); + } + + if (scan_rsp_len > 0) + /* Append scan response data to event */ + skb_put_data(skb, scan_rsp, scan_rsp_len); + + ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len); + + mgmt_adv_monitor_device_found(hdev, bdaddr, report_device, skb, NULL); +} + +void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type, + u8 addr_type, s8 rssi, u8 *name, u8 name_len) +{ + struct sk_buff *skb; + struct mgmt_ev_device_found *ev; + u16 eir_len = 0; + u32 flags = 0; + + skb = mgmt_alloc_skb(hdev, MGMT_EV_DEVICE_FOUND, + sizeof(*ev) + (name ? eir_precalc_len(name_len) : 0)); + + ev = skb_put(skb, sizeof(*ev)); + bacpy(&ev->addr.bdaddr, bdaddr); + ev->addr.type = link_to_bdaddr(link_type, addr_type); + ev->rssi = rssi; + + if (name) + eir_len += eir_skb_put_data(skb, EIR_NAME_COMPLETE, name, name_len); + else + flags = MGMT_DEV_FOUND_NAME_REQUEST_FAILED; + + ev->eir_len = cpu_to_le16(eir_len); + ev->flags = cpu_to_le32(flags); + + mgmt_event_skb(skb, NULL); +} + +void mgmt_discovering(struct hci_dev *hdev, u8 discovering) +{ + struct mgmt_ev_discovering ev; + + bt_dev_dbg(hdev, "discovering %u", discovering); + + memset(&ev, 0, sizeof(ev)); + ev.type = hdev->discovery.type; + ev.discovering = discovering; + + mgmt_event(MGMT_EV_DISCOVERING, hdev, &ev, sizeof(ev), NULL); +} + +void mgmt_suspending(struct hci_dev *hdev, u8 state) +{ + struct mgmt_ev_controller_suspend ev; + + ev.suspend_state = state; + mgmt_event(MGMT_EV_CONTROLLER_SUSPEND, hdev, &ev, sizeof(ev), NULL); +} + +void mgmt_resuming(struct hci_dev *hdev, u8 reason, bdaddr_t *bdaddr, + u8 addr_type) +{ + struct mgmt_ev_controller_resume ev; + + ev.wake_reason = reason; + if (bdaddr) { + bacpy(&ev.addr.bdaddr, bdaddr); + ev.addr.type = addr_type; + } else { + memset(&ev.addr, 0, sizeof(ev.addr)); + } + + mgmt_event(MGMT_EV_CONTROLLER_RESUME, hdev, &ev, sizeof(ev), NULL); +} + +static struct hci_mgmt_chan chan = { + .channel = HCI_CHANNEL_CONTROL, + .handler_count = ARRAY_SIZE(mgmt_handlers), + .handlers = mgmt_handlers, + .hdev_init = mgmt_init_hdev, +}; + +int mgmt_init(void) +{ + return hci_mgmt_chan_register(&chan); +} + +void mgmt_exit(void) +{ + hci_mgmt_chan_unregister(&chan); +} + +void mgmt_cleanup(struct sock *sk) +{ + struct mgmt_mesh_tx *mesh_tx; + struct hci_dev *hdev; + + read_lock(&hci_dev_list_lock); + + list_for_each_entry(hdev, &hci_dev_list, list) { + do { + mesh_tx = mgmt_mesh_next(hdev, sk); + + if (mesh_tx) + mesh_send_complete(hdev, mesh_tx, true); + } while (mesh_tx); + } + + read_unlock(&hci_dev_list_lock); +} diff --git a/net/bluetooth/mgmt_config.c b/net/bluetooth/mgmt_config.c new file mode 100644 index 000000000..6ef701c27 --- /dev/null +++ b/net/bluetooth/mgmt_config.c @@ -0,0 +1,346 @@ +// SPDX-License-Identifier: GPL-2.0-only + +/* + * Copyright (C) 2020 Google Corporation + */ + +#include <net/bluetooth/bluetooth.h> +#include <net/bluetooth/hci_core.h> +#include <net/bluetooth/mgmt.h> + +#include "mgmt_util.h" +#include "mgmt_config.h" + +#define HDEV_PARAM_U16(_param_name_) \ + struct {\ + struct mgmt_tlv entry; \ + __le16 value; \ + } __packed _param_name_ + +#define HDEV_PARAM_U8(_param_name_) \ + struct {\ + struct mgmt_tlv entry; \ + __u8 value; \ + } __packed _param_name_ + +#define TLV_SET_U16(_param_code_, _param_name_) \ + { \ + { cpu_to_le16(_param_code_), sizeof(__u16) }, \ + cpu_to_le16(hdev->_param_name_) \ + } + +#define TLV_SET_U8(_param_code_, _param_name_) \ + { \ + { cpu_to_le16(_param_code_), sizeof(__u8) }, \ + hdev->_param_name_ \ + } + +#define TLV_SET_U16_JIFFIES_TO_MSECS(_param_code_, _param_name_) \ + { \ + { cpu_to_le16(_param_code_), sizeof(__u16) }, \ + cpu_to_le16(jiffies_to_msecs(hdev->_param_name_)) \ + } + +int read_def_system_config(struct sock *sk, struct hci_dev *hdev, void *data, + u16 data_len) +{ + int ret; + struct mgmt_rp_read_def_system_config { + /* Please see mgmt-api.txt for documentation of these values */ + HDEV_PARAM_U16(def_page_scan_type); + HDEV_PARAM_U16(def_page_scan_int); + HDEV_PARAM_U16(def_page_scan_window); + HDEV_PARAM_U16(def_inq_scan_type); + HDEV_PARAM_U16(def_inq_scan_int); + HDEV_PARAM_U16(def_inq_scan_window); + HDEV_PARAM_U16(def_br_lsto); + HDEV_PARAM_U16(def_page_timeout); + HDEV_PARAM_U16(sniff_min_interval); + HDEV_PARAM_U16(sniff_max_interval); + HDEV_PARAM_U16(le_adv_min_interval); + HDEV_PARAM_U16(le_adv_max_interval); + HDEV_PARAM_U16(def_multi_adv_rotation_duration); + HDEV_PARAM_U16(le_scan_interval); + HDEV_PARAM_U16(le_scan_window); + HDEV_PARAM_U16(le_scan_int_suspend); + HDEV_PARAM_U16(le_scan_window_suspend); + HDEV_PARAM_U16(le_scan_int_discovery); + HDEV_PARAM_U16(le_scan_window_discovery); + HDEV_PARAM_U16(le_scan_int_adv_monitor); + HDEV_PARAM_U16(le_scan_window_adv_monitor); + HDEV_PARAM_U16(le_scan_int_connect); + HDEV_PARAM_U16(le_scan_window_connect); + HDEV_PARAM_U16(le_conn_min_interval); + HDEV_PARAM_U16(le_conn_max_interval); + HDEV_PARAM_U16(le_conn_latency); + HDEV_PARAM_U16(le_supv_timeout); + HDEV_PARAM_U16(def_le_autoconnect_timeout); + HDEV_PARAM_U16(advmon_allowlist_duration); + HDEV_PARAM_U16(advmon_no_filter_duration); + HDEV_PARAM_U8(enable_advmon_interleave_scan); + } __packed rp = { + TLV_SET_U16(0x0000, def_page_scan_type), + TLV_SET_U16(0x0001, def_page_scan_int), + TLV_SET_U16(0x0002, def_page_scan_window), + TLV_SET_U16(0x0003, def_inq_scan_type), + TLV_SET_U16(0x0004, def_inq_scan_int), + TLV_SET_U16(0x0005, def_inq_scan_window), + TLV_SET_U16(0x0006, def_br_lsto), + TLV_SET_U16(0x0007, def_page_timeout), + TLV_SET_U16(0x0008, sniff_min_interval), + TLV_SET_U16(0x0009, sniff_max_interval), + TLV_SET_U16(0x000a, le_adv_min_interval), + TLV_SET_U16(0x000b, le_adv_max_interval), + TLV_SET_U16(0x000c, def_multi_adv_rotation_duration), + TLV_SET_U16(0x000d, le_scan_interval), + TLV_SET_U16(0x000e, le_scan_window), + TLV_SET_U16(0x000f, le_scan_int_suspend), + TLV_SET_U16(0x0010, le_scan_window_suspend), + TLV_SET_U16(0x0011, le_scan_int_discovery), + TLV_SET_U16(0x0012, le_scan_window_discovery), + TLV_SET_U16(0x0013, le_scan_int_adv_monitor), + TLV_SET_U16(0x0014, le_scan_window_adv_monitor), + TLV_SET_U16(0x0015, le_scan_int_connect), + TLV_SET_U16(0x0016, le_scan_window_connect), + TLV_SET_U16(0x0017, le_conn_min_interval), + TLV_SET_U16(0x0018, le_conn_max_interval), + TLV_SET_U16(0x0019, le_conn_latency), + TLV_SET_U16(0x001a, le_supv_timeout), + TLV_SET_U16_JIFFIES_TO_MSECS(0x001b, + def_le_autoconnect_timeout), + TLV_SET_U16(0x001d, advmon_allowlist_duration), + TLV_SET_U16(0x001e, advmon_no_filter_duration), + TLV_SET_U8(0x001f, enable_advmon_interleave_scan), + }; + + bt_dev_dbg(hdev, "sock %p", sk); + + ret = mgmt_cmd_complete(sk, hdev->id, + MGMT_OP_READ_DEF_SYSTEM_CONFIG, + 0, &rp, sizeof(rp)); + return ret; +} + +#define TO_TLV(x) ((struct mgmt_tlv *)(x)) +#define TLV_GET_LE16(tlv) le16_to_cpu(*((__le16 *)(TO_TLV(tlv)->value))) +#define TLV_GET_U8(tlv) (*((__u8 *)(TO_TLV(tlv)->value))) + +int set_def_system_config(struct sock *sk, struct hci_dev *hdev, void *data, + u16 data_len) +{ + u16 buffer_left = data_len; + u8 *buffer = data; + + if (buffer_left < sizeof(struct mgmt_tlv)) { + return mgmt_cmd_status(sk, hdev->id, + MGMT_OP_SET_DEF_SYSTEM_CONFIG, + MGMT_STATUS_INVALID_PARAMS); + } + + /* First pass to validate the tlv */ + while (buffer_left >= sizeof(struct mgmt_tlv)) { + const u8 len = TO_TLV(buffer)->length; + size_t exp_type_len; + const u16 exp_len = sizeof(struct mgmt_tlv) + + len; + const u16 type = le16_to_cpu(TO_TLV(buffer)->type); + + if (buffer_left < exp_len) { + bt_dev_warn(hdev, "invalid len left %u, exp >= %u", + buffer_left, exp_len); + + return mgmt_cmd_status(sk, hdev->id, + MGMT_OP_SET_DEF_SYSTEM_CONFIG, + MGMT_STATUS_INVALID_PARAMS); + } + + /* Please see mgmt-api.txt for documentation of these values */ + switch (type) { + case 0x0000: + case 0x0001: + case 0x0002: + case 0x0003: + case 0x0004: + case 0x0005: + case 0x0006: + case 0x0007: + case 0x0008: + case 0x0009: + case 0x000a: + case 0x000b: + case 0x000c: + case 0x000d: + case 0x000e: + case 0x000f: + case 0x0010: + case 0x0011: + case 0x0012: + case 0x0013: + case 0x0014: + case 0x0015: + case 0x0016: + case 0x0017: + case 0x0018: + case 0x0019: + case 0x001a: + case 0x001b: + case 0x001d: + case 0x001e: + exp_type_len = sizeof(u16); + break; + case 0x001f: + exp_type_len = sizeof(u8); + break; + default: + exp_type_len = 0; + bt_dev_warn(hdev, "unsupported parameter %u", type); + break; + } + + if (exp_type_len && len != exp_type_len) { + bt_dev_warn(hdev, "invalid length %d, exp %zu for type %u", + len, exp_type_len, type); + + return mgmt_cmd_status(sk, hdev->id, + MGMT_OP_SET_DEF_SYSTEM_CONFIG, + MGMT_STATUS_INVALID_PARAMS); + } + + buffer_left -= exp_len; + buffer += exp_len; + } + + buffer_left = data_len; + buffer = data; + while (buffer_left >= sizeof(struct mgmt_tlv)) { + const u8 len = TO_TLV(buffer)->length; + const u16 exp_len = sizeof(struct mgmt_tlv) + + len; + const u16 type = le16_to_cpu(TO_TLV(buffer)->type); + + switch (type) { + case 0x0000: + hdev->def_page_scan_type = TLV_GET_LE16(buffer); + break; + case 0x0001: + hdev->def_page_scan_int = TLV_GET_LE16(buffer); + break; + case 0x0002: + hdev->def_page_scan_window = TLV_GET_LE16(buffer); + break; + case 0x0003: + hdev->def_inq_scan_type = TLV_GET_LE16(buffer); + break; + case 0x0004: + hdev->def_inq_scan_int = TLV_GET_LE16(buffer); + break; + case 0x0005: + hdev->def_inq_scan_window = TLV_GET_LE16(buffer); + break; + case 0x0006: + hdev->def_br_lsto = TLV_GET_LE16(buffer); + break; + case 0x0007: + hdev->def_page_timeout = TLV_GET_LE16(buffer); + break; + case 0x0008: + hdev->sniff_min_interval = TLV_GET_LE16(buffer); + break; + case 0x0009: + hdev->sniff_max_interval = TLV_GET_LE16(buffer); + break; + case 0x000a: + hdev->le_adv_min_interval = TLV_GET_LE16(buffer); + break; + case 0x000b: + hdev->le_adv_max_interval = TLV_GET_LE16(buffer); + break; + case 0x000c: + hdev->def_multi_adv_rotation_duration = + TLV_GET_LE16(buffer); + break; + case 0x000d: + hdev->le_scan_interval = TLV_GET_LE16(buffer); + break; + case 0x000e: + hdev->le_scan_window = TLV_GET_LE16(buffer); + break; + case 0x000f: + hdev->le_scan_int_suspend = TLV_GET_LE16(buffer); + break; + case 0x0010: + hdev->le_scan_window_suspend = TLV_GET_LE16(buffer); + break; + case 0x0011: + hdev->le_scan_int_discovery = TLV_GET_LE16(buffer); + break; + case 0x00012: + hdev->le_scan_window_discovery = TLV_GET_LE16(buffer); + break; + case 0x00013: + hdev->le_scan_int_adv_monitor = TLV_GET_LE16(buffer); + break; + case 0x00014: + hdev->le_scan_window_adv_monitor = TLV_GET_LE16(buffer); + break; + case 0x00015: + hdev->le_scan_int_connect = TLV_GET_LE16(buffer); + break; + case 0x00016: + hdev->le_scan_window_connect = TLV_GET_LE16(buffer); + break; + case 0x00017: + hdev->le_conn_min_interval = TLV_GET_LE16(buffer); + break; + case 0x00018: + hdev->le_conn_max_interval = TLV_GET_LE16(buffer); + break; + case 0x00019: + hdev->le_conn_latency = TLV_GET_LE16(buffer); + break; + case 0x0001a: + hdev->le_supv_timeout = TLV_GET_LE16(buffer); + break; + case 0x0001b: + hdev->def_le_autoconnect_timeout = + msecs_to_jiffies(TLV_GET_LE16(buffer)); + break; + case 0x0001d: + hdev->advmon_allowlist_duration = TLV_GET_LE16(buffer); + break; + case 0x0001e: + hdev->advmon_no_filter_duration = TLV_GET_LE16(buffer); + break; + case 0x0001f: + hdev->enable_advmon_interleave_scan = TLV_GET_U8(buffer); + break; + default: + bt_dev_warn(hdev, "unsupported parameter %u", type); + break; + } + + buffer_left -= exp_len; + buffer += exp_len; + } + + return mgmt_cmd_complete(sk, hdev->id, + MGMT_OP_SET_DEF_SYSTEM_CONFIG, 0, NULL, 0); +} + +int read_def_runtime_config(struct sock *sk, struct hci_dev *hdev, void *data, + u16 data_len) +{ + bt_dev_dbg(hdev, "sock %p", sk); + + return mgmt_cmd_complete(sk, hdev->id, + MGMT_OP_READ_DEF_RUNTIME_CONFIG, 0, NULL, 0); +} + +int set_def_runtime_config(struct sock *sk, struct hci_dev *hdev, void *data, + u16 data_len) +{ + bt_dev_dbg(hdev, "sock %p", sk); + + return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DEF_SYSTEM_CONFIG, + MGMT_STATUS_INVALID_PARAMS); +} diff --git a/net/bluetooth/mgmt_config.h b/net/bluetooth/mgmt_config.h new file mode 100644 index 000000000..a4965f107 --- /dev/null +++ b/net/bluetooth/mgmt_config.h @@ -0,0 +1,17 @@ +// SPDX-License-Identifier: GPL-2.0-only + +/* + * Copyright (C) 2020 Google Corporation + */ + +int read_def_system_config(struct sock *sk, struct hci_dev *hdev, void *data, + u16 data_len); + +int set_def_system_config(struct sock *sk, struct hci_dev *hdev, void *data, + u16 data_len); + +int read_def_runtime_config(struct sock *sk, struct hci_dev *hdev, void *data, + u16 data_len); + +int set_def_runtime_config(struct sock *sk, struct hci_dev *hdev, void *data, + u16 data_len); diff --git a/net/bluetooth/mgmt_util.c b/net/bluetooth/mgmt_util.c new file mode 100644 index 000000000..0115f783b --- /dev/null +++ b/net/bluetooth/mgmt_util.c @@ -0,0 +1,390 @@ +/* + BlueZ - Bluetooth protocol stack for Linux + + Copyright (C) 2015 Intel Corporation + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License version 2 as + published by the Free Software Foundation; + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + SOFTWARE IS DISCLAIMED. +*/ + +#include <asm/unaligned.h> + +#include <net/bluetooth/bluetooth.h> +#include <net/bluetooth/hci_core.h> +#include <net/bluetooth/hci_mon.h> +#include <net/bluetooth/mgmt.h> + +#include "mgmt_util.h" + +static struct sk_buff *create_monitor_ctrl_event(__le16 index, u32 cookie, + u16 opcode, u16 len, void *buf) +{ + struct hci_mon_hdr *hdr; + struct sk_buff *skb; + + skb = bt_skb_alloc(6 + len, GFP_ATOMIC); + if (!skb) + return NULL; + + put_unaligned_le32(cookie, skb_put(skb, 4)); + put_unaligned_le16(opcode, skb_put(skb, 2)); + + if (buf) + skb_put_data(skb, buf, len); + + __net_timestamp(skb); + + hdr = skb_push(skb, HCI_MON_HDR_SIZE); + hdr->opcode = cpu_to_le16(HCI_MON_CTRL_EVENT); + hdr->index = index; + hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE); + + return skb; +} + +struct sk_buff *mgmt_alloc_skb(struct hci_dev *hdev, u16 opcode, + unsigned int size) +{ + struct sk_buff *skb; + + skb = alloc_skb(sizeof(struct mgmt_hdr) + size, GFP_KERNEL); + if (!skb) + return skb; + + skb_reserve(skb, sizeof(struct mgmt_hdr)); + bt_cb(skb)->mgmt.hdev = hdev; + bt_cb(skb)->mgmt.opcode = opcode; + + return skb; +} + +int mgmt_send_event_skb(unsigned short channel, struct sk_buff *skb, int flag, + struct sock *skip_sk) +{ + struct hci_dev *hdev; + struct mgmt_hdr *hdr; + int len; + + if (!skb) + return -EINVAL; + + len = skb->len; + hdev = bt_cb(skb)->mgmt.hdev; + + /* Time stamp */ + __net_timestamp(skb); + + /* Send just the data, without headers, to the monitor */ + if (channel == HCI_CHANNEL_CONTROL) + hci_send_monitor_ctrl_event(hdev, bt_cb(skb)->mgmt.opcode, + skb->data, skb->len, + skb_get_ktime(skb), flag, skip_sk); + + hdr = skb_push(skb, sizeof(*hdr)); + hdr->opcode = cpu_to_le16(bt_cb(skb)->mgmt.opcode); + if (hdev) + hdr->index = cpu_to_le16(hdev->id); + else + hdr->index = cpu_to_le16(MGMT_INDEX_NONE); + hdr->len = cpu_to_le16(len); + + hci_send_to_channel(channel, skb, flag, skip_sk); + + kfree_skb(skb); + return 0; +} + +int mgmt_send_event(u16 event, struct hci_dev *hdev, unsigned short channel, + void *data, u16 data_len, int flag, struct sock *skip_sk) +{ + struct sk_buff *skb; + + skb = mgmt_alloc_skb(hdev, event, data_len); + if (!skb) + return -ENOMEM; + + if (data) + skb_put_data(skb, data, data_len); + + return mgmt_send_event_skb(channel, skb, flag, skip_sk); +} + +int mgmt_cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status) +{ + struct sk_buff *skb, *mskb; + struct mgmt_hdr *hdr; + struct mgmt_ev_cmd_status *ev; + int err; + + BT_DBG("sock %p, index %u, cmd %u, status %u", sk, index, cmd, status); + + skb = alloc_skb(sizeof(*hdr) + sizeof(*ev), GFP_KERNEL); + if (!skb) + return -ENOMEM; + + hdr = skb_put(skb, sizeof(*hdr)); + + hdr->opcode = cpu_to_le16(MGMT_EV_CMD_STATUS); + hdr->index = cpu_to_le16(index); + hdr->len = cpu_to_le16(sizeof(*ev)); + + ev = skb_put(skb, sizeof(*ev)); + ev->status = status; + ev->opcode = cpu_to_le16(cmd); + + mskb = create_monitor_ctrl_event(hdr->index, hci_sock_get_cookie(sk), + MGMT_EV_CMD_STATUS, sizeof(*ev), ev); + if (mskb) + skb->tstamp = mskb->tstamp; + else + __net_timestamp(skb); + + err = sock_queue_rcv_skb(sk, skb); + if (err < 0) + kfree_skb(skb); + + if (mskb) { + hci_send_to_channel(HCI_CHANNEL_MONITOR, mskb, + HCI_SOCK_TRUSTED, NULL); + kfree_skb(mskb); + } + + return err; +} + +int mgmt_cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status, + void *rp, size_t rp_len) +{ + struct sk_buff *skb, *mskb; + struct mgmt_hdr *hdr; + struct mgmt_ev_cmd_complete *ev; + int err; + + BT_DBG("sock %p", sk); + + skb = alloc_skb(sizeof(*hdr) + sizeof(*ev) + rp_len, GFP_KERNEL); + if (!skb) + return -ENOMEM; + + hdr = skb_put(skb, sizeof(*hdr)); + + hdr->opcode = cpu_to_le16(MGMT_EV_CMD_COMPLETE); + hdr->index = cpu_to_le16(index); + hdr->len = cpu_to_le16(sizeof(*ev) + rp_len); + + ev = skb_put(skb, sizeof(*ev) + rp_len); + ev->opcode = cpu_to_le16(cmd); + ev->status = status; + + if (rp) + memcpy(ev->data, rp, rp_len); + + mskb = create_monitor_ctrl_event(hdr->index, hci_sock_get_cookie(sk), + MGMT_EV_CMD_COMPLETE, + sizeof(*ev) + rp_len, ev); + if (mskb) + skb->tstamp = mskb->tstamp; + else + __net_timestamp(skb); + + err = sock_queue_rcv_skb(sk, skb); + if (err < 0) + kfree_skb(skb); + + if (mskb) { + hci_send_to_channel(HCI_CHANNEL_MONITOR, mskb, + HCI_SOCK_TRUSTED, NULL); + kfree_skb(mskb); + } + + return err; +} + +struct mgmt_pending_cmd *mgmt_pending_find(unsigned short channel, u16 opcode, + struct hci_dev *hdev) +{ + struct mgmt_pending_cmd *cmd; + + list_for_each_entry(cmd, &hdev->mgmt_pending, list) { + if (hci_sock_get_channel(cmd->sk) != channel) + continue; + if (cmd->opcode == opcode) + return cmd; + } + + return NULL; +} + +struct mgmt_pending_cmd *mgmt_pending_find_data(unsigned short channel, + u16 opcode, + struct hci_dev *hdev, + const void *data) +{ + struct mgmt_pending_cmd *cmd; + + list_for_each_entry(cmd, &hdev->mgmt_pending, list) { + if (cmd->user_data != data) + continue; + if (cmd->opcode == opcode) + return cmd; + } + + return NULL; +} + +void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev, + void (*cb)(struct mgmt_pending_cmd *cmd, void *data), + void *data) +{ + struct mgmt_pending_cmd *cmd, *tmp; + + list_for_each_entry_safe(cmd, tmp, &hdev->mgmt_pending, list) { + if (opcode > 0 && cmd->opcode != opcode) + continue; + + cb(cmd, data); + } +} + +struct mgmt_pending_cmd *mgmt_pending_new(struct sock *sk, u16 opcode, + struct hci_dev *hdev, + void *data, u16 len) +{ + struct mgmt_pending_cmd *cmd; + + cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); + if (!cmd) + return NULL; + + cmd->opcode = opcode; + cmd->index = hdev->id; + + cmd->param = kmemdup(data, len, GFP_KERNEL); + if (!cmd->param) { + kfree(cmd); + return NULL; + } + + cmd->param_len = len; + + cmd->sk = sk; + sock_hold(sk); + + return cmd; +} + +struct mgmt_pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode, + struct hci_dev *hdev, + void *data, u16 len) +{ + struct mgmt_pending_cmd *cmd; + + cmd = mgmt_pending_new(sk, opcode, hdev, data, len); + if (!cmd) + return NULL; + + list_add_tail(&cmd->list, &hdev->mgmt_pending); + + return cmd; +} + +void mgmt_pending_free(struct mgmt_pending_cmd *cmd) +{ + sock_put(cmd->sk); + kfree(cmd->param); + kfree(cmd); +} + +void mgmt_pending_remove(struct mgmt_pending_cmd *cmd) +{ + list_del(&cmd->list); + mgmt_pending_free(cmd); +} + +void mgmt_mesh_foreach(struct hci_dev *hdev, + void (*cb)(struct mgmt_mesh_tx *mesh_tx, void *data), + void *data, struct sock *sk) +{ + struct mgmt_mesh_tx *mesh_tx, *tmp; + + list_for_each_entry_safe(mesh_tx, tmp, &hdev->mgmt_pending, list) { + if (!sk || mesh_tx->sk == sk) + cb(mesh_tx, data); + } +} + +struct mgmt_mesh_tx *mgmt_mesh_next(struct hci_dev *hdev, struct sock *sk) +{ + struct mgmt_mesh_tx *mesh_tx; + + if (list_empty(&hdev->mesh_pending)) + return NULL; + + list_for_each_entry(mesh_tx, &hdev->mesh_pending, list) { + if (!sk || mesh_tx->sk == sk) + return mesh_tx; + } + + return NULL; +} + +struct mgmt_mesh_tx *mgmt_mesh_find(struct hci_dev *hdev, u8 handle) +{ + struct mgmt_mesh_tx *mesh_tx; + + if (list_empty(&hdev->mesh_pending)) + return NULL; + + list_for_each_entry(mesh_tx, &hdev->mesh_pending, list) { + if (mesh_tx->handle == handle) + return mesh_tx; + } + + return NULL; +} + +struct mgmt_mesh_tx *mgmt_mesh_add(struct sock *sk, struct hci_dev *hdev, + void *data, u16 len) +{ + struct mgmt_mesh_tx *mesh_tx; + + mesh_tx = kzalloc(sizeof(*mesh_tx), GFP_KERNEL); + if (!mesh_tx) + return NULL; + + hdev->mesh_send_ref++; + if (!hdev->mesh_send_ref) + hdev->mesh_send_ref++; + + mesh_tx->handle = hdev->mesh_send_ref; + mesh_tx->index = hdev->id; + memcpy(mesh_tx->param, data, len); + mesh_tx->param_len = len; + mesh_tx->sk = sk; + sock_hold(sk); + + list_add_tail(&mesh_tx->list, &hdev->mesh_pending); + + return mesh_tx; +} + +void mgmt_mesh_remove(struct mgmt_mesh_tx *mesh_tx) +{ + list_del(&mesh_tx->list); + sock_put(mesh_tx->sk); + kfree(mesh_tx); +} diff --git a/net/bluetooth/mgmt_util.h b/net/bluetooth/mgmt_util.h new file mode 100644 index 000000000..bdf978605 --- /dev/null +++ b/net/bluetooth/mgmt_util.h @@ -0,0 +1,79 @@ +/* + BlueZ - Bluetooth protocol stack for Linux + Copyright (C) 2015 Intel Coropration + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License version 2 as + published by the Free Software Foundation; + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + SOFTWARE IS DISCLAIMED. +*/ + +struct mgmt_mesh_tx { + struct list_head list; + int index; + size_t param_len; + struct sock *sk; + u8 handle; + u8 instance; + u8 param[sizeof(struct mgmt_cp_mesh_send) + 31]; +}; + +struct mgmt_pending_cmd { + struct list_head list; + u16 opcode; + int index; + void *param; + size_t param_len; + struct sock *sk; + struct sk_buff *skb; + void *user_data; + int (*cmd_complete)(struct mgmt_pending_cmd *cmd, u8 status); +}; + +struct sk_buff *mgmt_alloc_skb(struct hci_dev *hdev, u16 opcode, + unsigned int size); +int mgmt_send_event_skb(unsigned short channel, struct sk_buff *skb, int flag, + struct sock *skip_sk); +int mgmt_send_event(u16 event, struct hci_dev *hdev, unsigned short channel, + void *data, u16 data_len, int flag, struct sock *skip_sk); +int mgmt_cmd_status(struct sock *sk, u16 index, u16 cmd, u8 status); +int mgmt_cmd_complete(struct sock *sk, u16 index, u16 cmd, u8 status, + void *rp, size_t rp_len); + +struct mgmt_pending_cmd *mgmt_pending_find(unsigned short channel, u16 opcode, + struct hci_dev *hdev); +struct mgmt_pending_cmd *mgmt_pending_find_data(unsigned short channel, + u16 opcode, + struct hci_dev *hdev, + const void *data); +void mgmt_pending_foreach(u16 opcode, struct hci_dev *hdev, + void (*cb)(struct mgmt_pending_cmd *cmd, void *data), + void *data); +struct mgmt_pending_cmd *mgmt_pending_add(struct sock *sk, u16 opcode, + struct hci_dev *hdev, + void *data, u16 len); +struct mgmt_pending_cmd *mgmt_pending_new(struct sock *sk, u16 opcode, + struct hci_dev *hdev, + void *data, u16 len); +void mgmt_pending_free(struct mgmt_pending_cmd *cmd); +void mgmt_pending_remove(struct mgmt_pending_cmd *cmd); +void mgmt_mesh_foreach(struct hci_dev *hdev, + void (*cb)(struct mgmt_mesh_tx *mesh_tx, void *data), + void *data, struct sock *sk); +struct mgmt_mesh_tx *mgmt_mesh_find(struct hci_dev *hdev, u8 handle); +struct mgmt_mesh_tx *mgmt_mesh_next(struct hci_dev *hdev, struct sock *sk); +struct mgmt_mesh_tx *mgmt_mesh_add(struct sock *sk, struct hci_dev *hdev, + void *data, u16 len); +void mgmt_mesh_remove(struct mgmt_mesh_tx *mesh_tx); diff --git a/net/bluetooth/msft.c b/net/bluetooth/msft.c new file mode 100644 index 000000000..bee6a4c65 --- /dev/null +++ b/net/bluetooth/msft.c @@ -0,0 +1,837 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2020 Google Corporation + */ + +#include <net/bluetooth/bluetooth.h> +#include <net/bluetooth/hci_core.h> +#include <net/bluetooth/mgmt.h> + +#include "hci_request.h" +#include "mgmt_util.h" +#include "msft.h" + +#define MSFT_RSSI_THRESHOLD_VALUE_MIN -127 +#define MSFT_RSSI_THRESHOLD_VALUE_MAX 20 +#define MSFT_RSSI_LOW_TIMEOUT_MAX 0x3C + +#define MSFT_OP_READ_SUPPORTED_FEATURES 0x00 +struct msft_cp_read_supported_features { + __u8 sub_opcode; +} __packed; + +struct msft_rp_read_supported_features { + __u8 status; + __u8 sub_opcode; + __le64 features; + __u8 evt_prefix_len; + __u8 evt_prefix[]; +} __packed; + +#define MSFT_OP_LE_MONITOR_ADVERTISEMENT 0x03 +#define MSFT_MONITOR_ADVERTISEMENT_TYPE_PATTERN 0x01 +struct msft_le_monitor_advertisement_pattern { + __u8 length; + __u8 data_type; + __u8 start_byte; + __u8 pattern[]; +}; + +struct msft_le_monitor_advertisement_pattern_data { + __u8 count; + __u8 data[]; +}; + +struct msft_cp_le_monitor_advertisement { + __u8 sub_opcode; + __s8 rssi_high; + __s8 rssi_low; + __u8 rssi_low_interval; + __u8 rssi_sampling_period; + __u8 cond_type; + __u8 data[]; +} __packed; + +struct msft_rp_le_monitor_advertisement { + __u8 status; + __u8 sub_opcode; + __u8 handle; +} __packed; + +#define MSFT_OP_LE_CANCEL_MONITOR_ADVERTISEMENT 0x04 +struct msft_cp_le_cancel_monitor_advertisement { + __u8 sub_opcode; + __u8 handle; +} __packed; + +struct msft_rp_le_cancel_monitor_advertisement { + __u8 status; + __u8 sub_opcode; +} __packed; + +#define MSFT_OP_LE_SET_ADVERTISEMENT_FILTER_ENABLE 0x05 +struct msft_cp_le_set_advertisement_filter_enable { + __u8 sub_opcode; + __u8 enable; +} __packed; + +struct msft_rp_le_set_advertisement_filter_enable { + __u8 status; + __u8 sub_opcode; +} __packed; + +#define MSFT_EV_LE_MONITOR_DEVICE 0x02 +struct msft_ev_le_monitor_device { + __u8 addr_type; + bdaddr_t bdaddr; + __u8 monitor_handle; + __u8 monitor_state; +} __packed; + +struct msft_monitor_advertisement_handle_data { + __u8 msft_handle; + __u16 mgmt_handle; + struct list_head list; +}; + +struct msft_data { + __u64 features; + __u8 evt_prefix_len; + __u8 *evt_prefix; + struct list_head handle_map; + __u8 resuming; + __u8 suspending; + __u8 filter_enabled; +}; + +bool msft_monitor_supported(struct hci_dev *hdev) +{ + return !!(msft_get_features(hdev) & MSFT_FEATURE_MASK_LE_ADV_MONITOR); +} + +static bool read_supported_features(struct hci_dev *hdev, + struct msft_data *msft) +{ + struct msft_cp_read_supported_features cp; + struct msft_rp_read_supported_features *rp; + struct sk_buff *skb; + + cp.sub_opcode = MSFT_OP_READ_SUPPORTED_FEATURES; + + skb = __hci_cmd_sync(hdev, hdev->msft_opcode, sizeof(cp), &cp, + HCI_CMD_TIMEOUT); + if (IS_ERR_OR_NULL(skb)) { + if (!skb) + skb = ERR_PTR(-EIO); + + bt_dev_err(hdev, "Failed to read MSFT supported features (%ld)", + PTR_ERR(skb)); + return false; + } + + if (skb->len < sizeof(*rp)) { + bt_dev_err(hdev, "MSFT supported features length mismatch"); + goto failed; + } + + rp = (struct msft_rp_read_supported_features *)skb->data; + + if (rp->sub_opcode != MSFT_OP_READ_SUPPORTED_FEATURES) + goto failed; + + if (rp->evt_prefix_len > 0) { + msft->evt_prefix = kmemdup(rp->evt_prefix, rp->evt_prefix_len, + GFP_KERNEL); + if (!msft->evt_prefix) + goto failed; + } + + msft->evt_prefix_len = rp->evt_prefix_len; + msft->features = __le64_to_cpu(rp->features); + + if (msft->features & MSFT_FEATURE_MASK_CURVE_VALIDITY) + hdev->msft_curve_validity = true; + + kfree_skb(skb); + return true; + +failed: + kfree_skb(skb); + return false; +} + +/* is_mgmt = true matches the handle exposed to userspace via mgmt. + * is_mgmt = false matches the handle used by the msft controller. + * This function requires the caller holds hdev->lock + */ +static struct msft_monitor_advertisement_handle_data *msft_find_handle_data + (struct hci_dev *hdev, u16 handle, bool is_mgmt) +{ + struct msft_monitor_advertisement_handle_data *entry; + struct msft_data *msft = hdev->msft_data; + + list_for_each_entry(entry, &msft->handle_map, list) { + if (is_mgmt && entry->mgmt_handle == handle) + return entry; + if (!is_mgmt && entry->msft_handle == handle) + return entry; + } + + return NULL; +} + +/* This function requires the caller holds hdev->lock */ +static int msft_monitor_device_del(struct hci_dev *hdev, __u16 mgmt_handle, + bdaddr_t *bdaddr, __u8 addr_type, + bool notify) +{ + struct monitored_device *dev, *tmp; + int count = 0; + + list_for_each_entry_safe(dev, tmp, &hdev->monitored_devices, list) { + /* mgmt_handle == 0 indicates remove all devices, whereas, + * bdaddr == NULL indicates remove all devices matching the + * mgmt_handle. + */ + if ((!mgmt_handle || dev->handle == mgmt_handle) && + (!bdaddr || (!bacmp(bdaddr, &dev->bdaddr) && + addr_type == dev->addr_type))) { + if (notify && dev->notified) { + mgmt_adv_monitor_device_lost(hdev, dev->handle, + &dev->bdaddr, + dev->addr_type); + } + + list_del(&dev->list); + kfree(dev); + count++; + } + } + + return count; +} + +static int msft_le_monitor_advertisement_cb(struct hci_dev *hdev, u16 opcode, + struct adv_monitor *monitor, + struct sk_buff *skb) +{ + struct msft_rp_le_monitor_advertisement *rp; + struct msft_monitor_advertisement_handle_data *handle_data; + struct msft_data *msft = hdev->msft_data; + int status = 0; + + hci_dev_lock(hdev); + + rp = (struct msft_rp_le_monitor_advertisement *)skb->data; + if (skb->len < sizeof(*rp)) { + status = HCI_ERROR_UNSPECIFIED; + goto unlock; + } + + status = rp->status; + if (status) + goto unlock; + + handle_data = kmalloc(sizeof(*handle_data), GFP_KERNEL); + if (!handle_data) { + status = HCI_ERROR_UNSPECIFIED; + goto unlock; + } + + handle_data->mgmt_handle = monitor->handle; + handle_data->msft_handle = rp->handle; + INIT_LIST_HEAD(&handle_data->list); + list_add(&handle_data->list, &msft->handle_map); + + monitor->state = ADV_MONITOR_STATE_OFFLOADED; + +unlock: + if (status) + hci_free_adv_monitor(hdev, monitor); + + hci_dev_unlock(hdev); + + return status; +} + +static int msft_le_cancel_monitor_advertisement_cb(struct hci_dev *hdev, + u16 opcode, + struct adv_monitor *monitor, + struct sk_buff *skb) +{ + struct msft_rp_le_cancel_monitor_advertisement *rp; + struct msft_monitor_advertisement_handle_data *handle_data; + struct msft_data *msft = hdev->msft_data; + int status = 0; + + rp = (struct msft_rp_le_cancel_monitor_advertisement *)skb->data; + if (skb->len < sizeof(*rp)) { + status = HCI_ERROR_UNSPECIFIED; + goto done; + } + + status = rp->status; + if (status) + goto done; + + hci_dev_lock(hdev); + + handle_data = msft_find_handle_data(hdev, monitor->handle, true); + + if (handle_data) { + if (monitor->state == ADV_MONITOR_STATE_OFFLOADED) + monitor->state = ADV_MONITOR_STATE_REGISTERED; + + /* Do not free the monitor if it is being removed due to + * suspend. It will be re-monitored on resume. + */ + if (!msft->suspending) { + hci_free_adv_monitor(hdev, monitor); + + /* Clear any monitored devices by this Adv Monitor */ + msft_monitor_device_del(hdev, handle_data->mgmt_handle, + NULL, 0, false); + } + + list_del(&handle_data->list); + kfree(handle_data); + } + + hci_dev_unlock(hdev); + +done: + return status; +} + +/* This function requires the caller holds hci_req_sync_lock */ +static int msft_remove_monitor_sync(struct hci_dev *hdev, + struct adv_monitor *monitor) +{ + struct msft_cp_le_cancel_monitor_advertisement cp; + struct msft_monitor_advertisement_handle_data *handle_data; + struct sk_buff *skb; + + handle_data = msft_find_handle_data(hdev, monitor->handle, true); + + /* If no matched handle, just remove without telling controller */ + if (!handle_data) + return -ENOENT; + + cp.sub_opcode = MSFT_OP_LE_CANCEL_MONITOR_ADVERTISEMENT; + cp.handle = handle_data->msft_handle; + + skb = __hci_cmd_sync(hdev, hdev->msft_opcode, sizeof(cp), &cp, + HCI_CMD_TIMEOUT); + if (IS_ERR_OR_NULL(skb)) { + if (!skb) + return -EIO; + return PTR_ERR(skb); + } + + return msft_le_cancel_monitor_advertisement_cb(hdev, hdev->msft_opcode, + monitor, skb); +} + +/* This function requires the caller holds hci_req_sync_lock */ +int msft_suspend_sync(struct hci_dev *hdev) +{ + struct msft_data *msft = hdev->msft_data; + struct adv_monitor *monitor; + int handle = 0; + + if (!msft || !msft_monitor_supported(hdev)) + return 0; + + msft->suspending = true; + + while (1) { + monitor = idr_get_next(&hdev->adv_monitors_idr, &handle); + if (!monitor) + break; + + msft_remove_monitor_sync(hdev, monitor); + + handle++; + } + + /* All monitors have been removed */ + msft->suspending = false; + + return 0; +} + +static bool msft_monitor_rssi_valid(struct adv_monitor *monitor) +{ + struct adv_rssi_thresholds *r = &monitor->rssi; + + if (r->high_threshold < MSFT_RSSI_THRESHOLD_VALUE_MIN || + r->high_threshold > MSFT_RSSI_THRESHOLD_VALUE_MAX || + r->low_threshold < MSFT_RSSI_THRESHOLD_VALUE_MIN || + r->low_threshold > MSFT_RSSI_THRESHOLD_VALUE_MAX) + return false; + + /* High_threshold_timeout is not supported, + * once high_threshold is reached, events are immediately reported. + */ + if (r->high_threshold_timeout != 0) + return false; + + if (r->low_threshold_timeout > MSFT_RSSI_LOW_TIMEOUT_MAX) + return false; + + /* Sampling period from 0x00 to 0xFF are all allowed */ + return true; +} + +static bool msft_monitor_pattern_valid(struct adv_monitor *monitor) +{ + return msft_monitor_rssi_valid(monitor); + /* No additional check needed for pattern-based monitor */ +} + +static int msft_add_monitor_sync(struct hci_dev *hdev, + struct adv_monitor *monitor) +{ + struct msft_cp_le_monitor_advertisement *cp; + struct msft_le_monitor_advertisement_pattern_data *pattern_data; + struct msft_le_monitor_advertisement_pattern *pattern; + struct adv_pattern *entry; + size_t total_size = sizeof(*cp) + sizeof(*pattern_data); + ptrdiff_t offset = 0; + u8 pattern_count = 0; + struct sk_buff *skb; + + if (!msft_monitor_pattern_valid(monitor)) + return -EINVAL; + + list_for_each_entry(entry, &monitor->patterns, list) { + pattern_count++; + total_size += sizeof(*pattern) + entry->length; + } + + cp = kmalloc(total_size, GFP_KERNEL); + if (!cp) + return -ENOMEM; + + cp->sub_opcode = MSFT_OP_LE_MONITOR_ADVERTISEMENT; + cp->rssi_high = monitor->rssi.high_threshold; + cp->rssi_low = monitor->rssi.low_threshold; + cp->rssi_low_interval = (u8)monitor->rssi.low_threshold_timeout; + cp->rssi_sampling_period = monitor->rssi.sampling_period; + + cp->cond_type = MSFT_MONITOR_ADVERTISEMENT_TYPE_PATTERN; + + pattern_data = (void *)cp->data; + pattern_data->count = pattern_count; + + list_for_each_entry(entry, &monitor->patterns, list) { + pattern = (void *)(pattern_data->data + offset); + /* the length also includes data_type and offset */ + pattern->length = entry->length + 2; + pattern->data_type = entry->ad_type; + pattern->start_byte = entry->offset; + memcpy(pattern->pattern, entry->value, entry->length); + offset += sizeof(*pattern) + entry->length; + } + + skb = __hci_cmd_sync(hdev, hdev->msft_opcode, total_size, cp, + HCI_CMD_TIMEOUT); + kfree(cp); + + if (IS_ERR_OR_NULL(skb)) { + if (!skb) + return -EIO; + return PTR_ERR(skb); + } + + return msft_le_monitor_advertisement_cb(hdev, hdev->msft_opcode, + monitor, skb); +} + +/* This function requires the caller holds hci_req_sync_lock */ +static void reregister_monitor(struct hci_dev *hdev) +{ + struct adv_monitor *monitor; + struct msft_data *msft = hdev->msft_data; + int handle = 0; + + if (!msft) + return; + + msft->resuming = true; + + while (1) { + monitor = idr_get_next(&hdev->adv_monitors_idr, &handle); + if (!monitor) + break; + + msft_add_monitor_sync(hdev, monitor); + + handle++; + } + + /* All monitors have been reregistered */ + msft->resuming = false; +} + +/* This function requires the caller holds hci_req_sync_lock */ +int msft_resume_sync(struct hci_dev *hdev) +{ + struct msft_data *msft = hdev->msft_data; + + if (!msft || !msft_monitor_supported(hdev)) + return 0; + + hci_dev_lock(hdev); + + /* Clear already tracked devices on resume. Once the monitors are + * reregistered, devices in range will be found again after resume. + */ + hdev->advmon_pend_notify = false; + msft_monitor_device_del(hdev, 0, NULL, 0, true); + + hci_dev_unlock(hdev); + + reregister_monitor(hdev); + + return 0; +} + +/* This function requires the caller holds hci_req_sync_lock */ +void msft_do_open(struct hci_dev *hdev) +{ + struct msft_data *msft = hdev->msft_data; + + if (hdev->msft_opcode == HCI_OP_NOP) + return; + + if (!msft) { + bt_dev_err(hdev, "MSFT extension not registered"); + return; + } + + bt_dev_dbg(hdev, "Initialize MSFT extension"); + + /* Reset existing MSFT data before re-reading */ + kfree(msft->evt_prefix); + msft->evt_prefix = NULL; + msft->evt_prefix_len = 0; + msft->features = 0; + + if (!read_supported_features(hdev, msft)) { + hdev->msft_data = NULL; + kfree(msft); + return; + } + + if (msft_monitor_supported(hdev)) { + msft->resuming = true; + msft_set_filter_enable(hdev, true); + /* Monitors get removed on power off, so we need to explicitly + * tell the controller to re-monitor. + */ + reregister_monitor(hdev); + } +} + +void msft_do_close(struct hci_dev *hdev) +{ + struct msft_data *msft = hdev->msft_data; + struct msft_monitor_advertisement_handle_data *handle_data, *tmp; + struct adv_monitor *monitor; + + if (!msft) + return; + + bt_dev_dbg(hdev, "Cleanup of MSFT extension"); + + /* The controller will silently remove all monitors on power off. + * Therefore, remove handle_data mapping and reset monitor state. + */ + list_for_each_entry_safe(handle_data, tmp, &msft->handle_map, list) { + monitor = idr_find(&hdev->adv_monitors_idr, + handle_data->mgmt_handle); + + if (monitor && monitor->state == ADV_MONITOR_STATE_OFFLOADED) + monitor->state = ADV_MONITOR_STATE_REGISTERED; + + list_del(&handle_data->list); + kfree(handle_data); + } + + hci_dev_lock(hdev); + + /* Clear any devices that are being monitored and notify device lost */ + hdev->advmon_pend_notify = false; + msft_monitor_device_del(hdev, 0, NULL, 0, true); + + hci_dev_unlock(hdev); +} + +void msft_register(struct hci_dev *hdev) +{ + struct msft_data *msft = NULL; + + bt_dev_dbg(hdev, "Register MSFT extension"); + + msft = kzalloc(sizeof(*msft), GFP_KERNEL); + if (!msft) { + bt_dev_err(hdev, "Failed to register MSFT extension"); + return; + } + + INIT_LIST_HEAD(&msft->handle_map); + hdev->msft_data = msft; +} + +void msft_unregister(struct hci_dev *hdev) +{ + struct msft_data *msft = hdev->msft_data; + + if (!msft) + return; + + bt_dev_dbg(hdev, "Unregister MSFT extension"); + + hdev->msft_data = NULL; + + kfree(msft->evt_prefix); + kfree(msft); +} + +/* This function requires the caller holds hdev->lock */ +static void msft_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, + __u8 addr_type, __u16 mgmt_handle) +{ + struct monitored_device *dev; + + dev = kmalloc(sizeof(*dev), GFP_KERNEL); + if (!dev) { + bt_dev_err(hdev, "MSFT vendor event %u: no memory", + MSFT_EV_LE_MONITOR_DEVICE); + return; + } + + bacpy(&dev->bdaddr, bdaddr); + dev->addr_type = addr_type; + dev->handle = mgmt_handle; + dev->notified = false; + + INIT_LIST_HEAD(&dev->list); + list_add(&dev->list, &hdev->monitored_devices); + hdev->advmon_pend_notify = true; +} + +/* This function requires the caller holds hdev->lock */ +static void msft_device_lost(struct hci_dev *hdev, bdaddr_t *bdaddr, + __u8 addr_type, __u16 mgmt_handle) +{ + if (!msft_monitor_device_del(hdev, mgmt_handle, bdaddr, addr_type, + true)) { + bt_dev_err(hdev, "MSFT vendor event %u: dev %pMR not in list", + MSFT_EV_LE_MONITOR_DEVICE, bdaddr); + } +} + +static void *msft_skb_pull(struct hci_dev *hdev, struct sk_buff *skb, + u8 ev, size_t len) +{ + void *data; + + data = skb_pull_data(skb, len); + if (!data) + bt_dev_err(hdev, "Malformed MSFT vendor event: 0x%02x", ev); + + return data; +} + +/* This function requires the caller holds hdev->lock */ +static void msft_monitor_device_evt(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct msft_ev_le_monitor_device *ev; + struct msft_monitor_advertisement_handle_data *handle_data; + u8 addr_type; + + ev = msft_skb_pull(hdev, skb, MSFT_EV_LE_MONITOR_DEVICE, sizeof(*ev)); + if (!ev) + return; + + bt_dev_dbg(hdev, + "MSFT vendor event 0x%02x: handle 0x%04x state %d addr %pMR", + MSFT_EV_LE_MONITOR_DEVICE, ev->monitor_handle, + ev->monitor_state, &ev->bdaddr); + + handle_data = msft_find_handle_data(hdev, ev->monitor_handle, false); + if (!handle_data) + return; + + switch (ev->addr_type) { + case ADDR_LE_DEV_PUBLIC: + addr_type = BDADDR_LE_PUBLIC; + break; + + case ADDR_LE_DEV_RANDOM: + addr_type = BDADDR_LE_RANDOM; + break; + + default: + bt_dev_err(hdev, + "MSFT vendor event 0x%02x: unknown addr type 0x%02x", + MSFT_EV_LE_MONITOR_DEVICE, ev->addr_type); + return; + } + + if (ev->monitor_state) + msft_device_found(hdev, &ev->bdaddr, addr_type, + handle_data->mgmt_handle); + else + msft_device_lost(hdev, &ev->bdaddr, addr_type, + handle_data->mgmt_handle); +} + +void msft_vendor_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb) +{ + struct msft_data *msft = hdev->msft_data; + u8 *evt_prefix; + u8 *evt; + + if (!msft) + return; + + /* When the extension has defined an event prefix, check that it + * matches, and otherwise just return. + */ + if (msft->evt_prefix_len > 0) { + evt_prefix = msft_skb_pull(hdev, skb, 0, msft->evt_prefix_len); + if (!evt_prefix) + return; + + if (memcmp(evt_prefix, msft->evt_prefix, msft->evt_prefix_len)) + return; + } + + /* Every event starts at least with an event code and the rest of + * the data is variable and depends on the event code. + */ + if (skb->len < 1) + return; + + evt = msft_skb_pull(hdev, skb, 0, sizeof(*evt)); + if (!evt) + return; + + hci_dev_lock(hdev); + + switch (*evt) { + case MSFT_EV_LE_MONITOR_DEVICE: + msft_monitor_device_evt(hdev, skb); + break; + + default: + bt_dev_dbg(hdev, "MSFT vendor event 0x%02x", *evt); + break; + } + + hci_dev_unlock(hdev); +} + +__u64 msft_get_features(struct hci_dev *hdev) +{ + struct msft_data *msft = hdev->msft_data; + + return msft ? msft->features : 0; +} + +static void msft_le_set_advertisement_filter_enable_cb(struct hci_dev *hdev, + u8 status, u16 opcode, + struct sk_buff *skb) +{ + struct msft_cp_le_set_advertisement_filter_enable *cp; + struct msft_rp_le_set_advertisement_filter_enable *rp; + struct msft_data *msft = hdev->msft_data; + + rp = (struct msft_rp_le_set_advertisement_filter_enable *)skb->data; + if (skb->len < sizeof(*rp)) + return; + + /* Error 0x0C would be returned if the filter enabled status is + * already set to whatever we were trying to set. + * Although the default state should be disabled, some controller set + * the initial value to enabled. Because there is no way to know the + * actual initial value before sending this command, here we also treat + * error 0x0C as success. + */ + if (status != 0x00 && status != 0x0C) + return; + + hci_dev_lock(hdev); + + cp = hci_sent_cmd_data(hdev, hdev->msft_opcode); + msft->filter_enabled = cp->enable; + + if (status == 0x0C) + bt_dev_warn(hdev, "MSFT filter_enable is already %s", + cp->enable ? "on" : "off"); + + hci_dev_unlock(hdev); +} + +/* This function requires the caller holds hci_req_sync_lock */ +int msft_add_monitor_pattern(struct hci_dev *hdev, struct adv_monitor *monitor) +{ + struct msft_data *msft = hdev->msft_data; + + if (!msft) + return -EOPNOTSUPP; + + if (msft->resuming || msft->suspending) + return -EBUSY; + + return msft_add_monitor_sync(hdev, monitor); +} + +/* This function requires the caller holds hci_req_sync_lock */ +int msft_remove_monitor(struct hci_dev *hdev, struct adv_monitor *monitor) +{ + struct msft_data *msft = hdev->msft_data; + + if (!msft) + return -EOPNOTSUPP; + + if (msft->resuming || msft->suspending) + return -EBUSY; + + return msft_remove_monitor_sync(hdev, monitor); +} + +void msft_req_add_set_filter_enable(struct hci_request *req, bool enable) +{ + struct hci_dev *hdev = req->hdev; + struct msft_cp_le_set_advertisement_filter_enable cp; + + cp.sub_opcode = MSFT_OP_LE_SET_ADVERTISEMENT_FILTER_ENABLE; + cp.enable = enable; + + hci_req_add(req, hdev->msft_opcode, sizeof(cp), &cp); +} + +int msft_set_filter_enable(struct hci_dev *hdev, bool enable) +{ + struct hci_request req; + struct msft_data *msft = hdev->msft_data; + int err; + + if (!msft) + return -EOPNOTSUPP; + + hci_req_init(&req, hdev); + msft_req_add_set_filter_enable(&req, enable); + err = hci_req_run_skb(&req, msft_le_set_advertisement_filter_enable_cb); + + return err; +} + +bool msft_curve_validity(struct hci_dev *hdev) +{ + return hdev->msft_curve_validity; +} diff --git a/net/bluetooth/msft.h b/net/bluetooth/msft.h new file mode 100644 index 000000000..2a63205b3 --- /dev/null +++ b/net/bluetooth/msft.h @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2020 Google Corporation + */ + +#define MSFT_FEATURE_MASK_BREDR_RSSI_MONITOR BIT(0) +#define MSFT_FEATURE_MASK_LE_CONN_RSSI_MONITOR BIT(1) +#define MSFT_FEATURE_MASK_LE_ADV_RSSI_MONITOR BIT(2) +#define MSFT_FEATURE_MASK_LE_ADV_MONITOR BIT(3) +#define MSFT_FEATURE_MASK_CURVE_VALIDITY BIT(4) +#define MSFT_FEATURE_MASK_CONCURRENT_ADV_MONITOR BIT(5) + +#if IS_ENABLED(CONFIG_BT_MSFTEXT) + +bool msft_monitor_supported(struct hci_dev *hdev); +void msft_register(struct hci_dev *hdev); +void msft_unregister(struct hci_dev *hdev); +void msft_do_open(struct hci_dev *hdev); +void msft_do_close(struct hci_dev *hdev); +void msft_vendor_evt(struct hci_dev *hdev, void *data, struct sk_buff *skb); +__u64 msft_get_features(struct hci_dev *hdev); +int msft_add_monitor_pattern(struct hci_dev *hdev, struct adv_monitor *monitor); +int msft_remove_monitor(struct hci_dev *hdev, struct adv_monitor *monitor); +void msft_req_add_set_filter_enable(struct hci_request *req, bool enable); +int msft_set_filter_enable(struct hci_dev *hdev, bool enable); +int msft_suspend_sync(struct hci_dev *hdev); +int msft_resume_sync(struct hci_dev *hdev); +bool msft_curve_validity(struct hci_dev *hdev); + +#else + +static inline bool msft_monitor_supported(struct hci_dev *hdev) +{ + return false; +} + +static inline void msft_register(struct hci_dev *hdev) {} +static inline void msft_unregister(struct hci_dev *hdev) {} +static inline void msft_do_open(struct hci_dev *hdev) {} +static inline void msft_do_close(struct hci_dev *hdev) {} +static inline void msft_vendor_evt(struct hci_dev *hdev, void *data, + struct sk_buff *skb) {} +static inline __u64 msft_get_features(struct hci_dev *hdev) { return 0; } +static inline int msft_add_monitor_pattern(struct hci_dev *hdev, + struct adv_monitor *monitor) +{ + return -EOPNOTSUPP; +} + +static inline int msft_remove_monitor(struct hci_dev *hdev, + struct adv_monitor *monitor) +{ + return -EOPNOTSUPP; +} + +static inline void msft_req_add_set_filter_enable(struct hci_request *req, + bool enable) {} +static inline int msft_set_filter_enable(struct hci_dev *hdev, bool enable) +{ + return -EOPNOTSUPP; +} + +static inline int msft_suspend_sync(struct hci_dev *hdev) +{ + return -EOPNOTSUPP; +} + +static inline int msft_resume_sync(struct hci_dev *hdev) +{ + return -EOPNOTSUPP; +} + +static inline bool msft_curve_validity(struct hci_dev *hdev) +{ + return false; +} + +#endif diff --git a/net/bluetooth/rfcomm/Kconfig b/net/bluetooth/rfcomm/Kconfig new file mode 100644 index 000000000..9b9953ebf --- /dev/null +++ b/net/bluetooth/rfcomm/Kconfig @@ -0,0 +1,19 @@ +# SPDX-License-Identifier: GPL-2.0-only +config BT_RFCOMM + tristate "RFCOMM protocol support" + depends on BT_BREDR + help + RFCOMM provides connection oriented stream transport. RFCOMM + support is required for Dialup Networking, OBEX and other Bluetooth + applications. + + Say Y here to compile RFCOMM support into the kernel or say M to + compile it as module (rfcomm). + +config BT_RFCOMM_TTY + bool "RFCOMM TTY support" + depends on BT_RFCOMM + depends on TTY + help + This option enables TTY emulation support for RFCOMM channels. + diff --git a/net/bluetooth/rfcomm/Makefile b/net/bluetooth/rfcomm/Makefile new file mode 100644 index 000000000..593e5c48c --- /dev/null +++ b/net/bluetooth/rfcomm/Makefile @@ -0,0 +1,9 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Makefile for the Linux Bluetooth RFCOMM layer. +# + +obj-$(CONFIG_BT_RFCOMM) += rfcomm.o + +rfcomm-y := core.o sock.o +rfcomm-$(CONFIG_BT_RFCOMM_TTY) += tty.o diff --git a/net/bluetooth/rfcomm/core.c b/net/bluetooth/rfcomm/core.c new file mode 100644 index 000000000..8d6fce900 --- /dev/null +++ b/net/bluetooth/rfcomm/core.c @@ -0,0 +1,2281 @@ +/* + RFCOMM implementation for Linux Bluetooth stack (BlueZ). + Copyright (C) 2002 Maxim Krasnyansky <maxk@qualcomm.com> + Copyright (C) 2002 Marcel Holtmann <marcel@holtmann.org> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License version 2 as + published by the Free Software Foundation; + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + SOFTWARE IS DISCLAIMED. +*/ + +/* + * Bluetooth RFCOMM core. + */ + +#include <linux/module.h> +#include <linux/debugfs.h> +#include <linux/kthread.h> +#include <asm/unaligned.h> + +#include <net/bluetooth/bluetooth.h> +#include <net/bluetooth/hci_core.h> +#include <net/bluetooth/l2cap.h> +#include <net/bluetooth/rfcomm.h> + +#define VERSION "1.11" + +static bool disable_cfc; +static bool l2cap_ertm; +static int channel_mtu = -1; + +static struct task_struct *rfcomm_thread; + +static DEFINE_MUTEX(rfcomm_mutex); +#define rfcomm_lock() mutex_lock(&rfcomm_mutex) +#define rfcomm_unlock() mutex_unlock(&rfcomm_mutex) + + +static LIST_HEAD(session_list); + +static int rfcomm_send_frame(struct rfcomm_session *s, u8 *data, int len); +static int rfcomm_send_sabm(struct rfcomm_session *s, u8 dlci); +static int rfcomm_send_disc(struct rfcomm_session *s, u8 dlci); +static int rfcomm_queue_disc(struct rfcomm_dlc *d); +static int rfcomm_send_nsc(struct rfcomm_session *s, int cr, u8 type); +static int rfcomm_send_pn(struct rfcomm_session *s, int cr, struct rfcomm_dlc *d); +static int rfcomm_send_msc(struct rfcomm_session *s, int cr, u8 dlci, u8 v24_sig); +static int rfcomm_send_test(struct rfcomm_session *s, int cr, u8 *pattern, int len); +static int rfcomm_send_credits(struct rfcomm_session *s, u8 addr, u8 credits); +static void rfcomm_make_uih(struct sk_buff *skb, u8 addr); + +static void rfcomm_process_connect(struct rfcomm_session *s); + +static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src, + bdaddr_t *dst, + u8 sec_level, + int *err); +static struct rfcomm_session *rfcomm_session_get(bdaddr_t *src, bdaddr_t *dst); +static struct rfcomm_session *rfcomm_session_del(struct rfcomm_session *s); + +/* ---- RFCOMM frame parsing macros ---- */ +#define __get_dlci(b) ((b & 0xfc) >> 2) +#define __get_type(b) ((b & 0xef)) + +#define __test_ea(b) ((b & 0x01)) +#define __test_cr(b) (!!(b & 0x02)) +#define __test_pf(b) (!!(b & 0x10)) + +#define __session_dir(s) ((s)->initiator ? 0x00 : 0x01) + +#define __addr(cr, dlci) (((dlci & 0x3f) << 2) | (cr << 1) | 0x01) +#define __ctrl(type, pf) (((type & 0xef) | (pf << 4))) +#define __dlci(dir, chn) (((chn & 0x1f) << 1) | dir) +#define __srv_channel(dlci) (dlci >> 1) + +#define __len8(len) (((len) << 1) | 1) +#define __len16(len) ((len) << 1) + +/* MCC macros */ +#define __mcc_type(cr, type) (((type << 2) | (cr << 1) | 0x01)) +#define __get_mcc_type(b) ((b & 0xfc) >> 2) +#define __get_mcc_len(b) ((b & 0xfe) >> 1) + +/* RPN macros */ +#define __rpn_line_settings(data, stop, parity) ((data & 0x3) | ((stop & 0x1) << 2) | ((parity & 0x7) << 3)) +#define __get_rpn_data_bits(line) ((line) & 0x3) +#define __get_rpn_stop_bits(line) (((line) >> 2) & 0x1) +#define __get_rpn_parity(line) (((line) >> 3) & 0x7) + +static DECLARE_WAIT_QUEUE_HEAD(rfcomm_wq); + +static void rfcomm_schedule(void) +{ + wake_up_all(&rfcomm_wq); +} + +/* ---- RFCOMM FCS computation ---- */ + +/* reversed, 8-bit, poly=0x07 */ +static unsigned char rfcomm_crc_table[256] = { + 0x00, 0x91, 0xe3, 0x72, 0x07, 0x96, 0xe4, 0x75, + 0x0e, 0x9f, 0xed, 0x7c, 0x09, 0x98, 0xea, 0x7b, + 0x1c, 0x8d, 0xff, 0x6e, 0x1b, 0x8a, 0xf8, 0x69, + 0x12, 0x83, 0xf1, 0x60, 0x15, 0x84, 0xf6, 0x67, + + 0x38, 0xa9, 0xdb, 0x4a, 0x3f, 0xae, 0xdc, 0x4d, + 0x36, 0xa7, 0xd5, 0x44, 0x31, 0xa0, 0xd2, 0x43, + 0x24, 0xb5, 0xc7, 0x56, 0x23, 0xb2, 0xc0, 0x51, + 0x2a, 0xbb, 0xc9, 0x58, 0x2d, 0xbc, 0xce, 0x5f, + + 0x70, 0xe1, 0x93, 0x02, 0x77, 0xe6, 0x94, 0x05, + 0x7e, 0xef, 0x9d, 0x0c, 0x79, 0xe8, 0x9a, 0x0b, + 0x6c, 0xfd, 0x8f, 0x1e, 0x6b, 0xfa, 0x88, 0x19, + 0x62, 0xf3, 0x81, 0x10, 0x65, 0xf4, 0x86, 0x17, + + 0x48, 0xd9, 0xab, 0x3a, 0x4f, 0xde, 0xac, 0x3d, + 0x46, 0xd7, 0xa5, 0x34, 0x41, 0xd0, 0xa2, 0x33, + 0x54, 0xc5, 0xb7, 0x26, 0x53, 0xc2, 0xb0, 0x21, + 0x5a, 0xcb, 0xb9, 0x28, 0x5d, 0xcc, 0xbe, 0x2f, + + 0xe0, 0x71, 0x03, 0x92, 0xe7, 0x76, 0x04, 0x95, + 0xee, 0x7f, 0x0d, 0x9c, 0xe9, 0x78, 0x0a, 0x9b, + 0xfc, 0x6d, 0x1f, 0x8e, 0xfb, 0x6a, 0x18, 0x89, + 0xf2, 0x63, 0x11, 0x80, 0xf5, 0x64, 0x16, 0x87, + + 0xd8, 0x49, 0x3b, 0xaa, 0xdf, 0x4e, 0x3c, 0xad, + 0xd6, 0x47, 0x35, 0xa4, 0xd1, 0x40, 0x32, 0xa3, + 0xc4, 0x55, 0x27, 0xb6, 0xc3, 0x52, 0x20, 0xb1, + 0xca, 0x5b, 0x29, 0xb8, 0xcd, 0x5c, 0x2e, 0xbf, + + 0x90, 0x01, 0x73, 0xe2, 0x97, 0x06, 0x74, 0xe5, + 0x9e, 0x0f, 0x7d, 0xec, 0x99, 0x08, 0x7a, 0xeb, + 0x8c, 0x1d, 0x6f, 0xfe, 0x8b, 0x1a, 0x68, 0xf9, + 0x82, 0x13, 0x61, 0xf0, 0x85, 0x14, 0x66, 0xf7, + + 0xa8, 0x39, 0x4b, 0xda, 0xaf, 0x3e, 0x4c, 0xdd, + 0xa6, 0x37, 0x45, 0xd4, 0xa1, 0x30, 0x42, 0xd3, + 0xb4, 0x25, 0x57, 0xc6, 0xb3, 0x22, 0x50, 0xc1, + 0xba, 0x2b, 0x59, 0xc8, 0xbd, 0x2c, 0x5e, 0xcf +}; + +/* CRC on 2 bytes */ +#define __crc(data) (rfcomm_crc_table[rfcomm_crc_table[0xff ^ data[0]] ^ data[1]]) + +/* FCS on 2 bytes */ +static inline u8 __fcs(u8 *data) +{ + return 0xff - __crc(data); +} + +/* FCS on 3 bytes */ +static inline u8 __fcs2(u8 *data) +{ + return 0xff - rfcomm_crc_table[__crc(data) ^ data[2]]; +} + +/* Check FCS */ +static inline int __check_fcs(u8 *data, int type, u8 fcs) +{ + u8 f = __crc(data); + + if (type != RFCOMM_UIH) + f = rfcomm_crc_table[f ^ data[2]]; + + return rfcomm_crc_table[f ^ fcs] != 0xcf; +} + +/* ---- L2CAP callbacks ---- */ +static void rfcomm_l2state_change(struct sock *sk) +{ + BT_DBG("%p state %d", sk, sk->sk_state); + rfcomm_schedule(); +} + +static void rfcomm_l2data_ready(struct sock *sk) +{ + BT_DBG("%p", sk); + rfcomm_schedule(); +} + +static int rfcomm_l2sock_create(struct socket **sock) +{ + int err; + + BT_DBG(""); + + err = sock_create_kern(&init_net, PF_BLUETOOTH, SOCK_SEQPACKET, BTPROTO_L2CAP, sock); + if (!err) { + struct sock *sk = (*sock)->sk; + sk->sk_data_ready = rfcomm_l2data_ready; + sk->sk_state_change = rfcomm_l2state_change; + } + return err; +} + +static int rfcomm_check_security(struct rfcomm_dlc *d) +{ + struct sock *sk = d->session->sock->sk; + struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn; + + __u8 auth_type; + + switch (d->sec_level) { + case BT_SECURITY_HIGH: + case BT_SECURITY_FIPS: + auth_type = HCI_AT_GENERAL_BONDING_MITM; + break; + case BT_SECURITY_MEDIUM: + auth_type = HCI_AT_GENERAL_BONDING; + break; + default: + auth_type = HCI_AT_NO_BONDING; + break; + } + + return hci_conn_security(conn->hcon, d->sec_level, auth_type, + d->out); +} + +static void rfcomm_session_timeout(struct timer_list *t) +{ + struct rfcomm_session *s = from_timer(s, t, timer); + + BT_DBG("session %p state %ld", s, s->state); + + set_bit(RFCOMM_TIMED_OUT, &s->flags); + rfcomm_schedule(); +} + +static void rfcomm_session_set_timer(struct rfcomm_session *s, long timeout) +{ + BT_DBG("session %p state %ld timeout %ld", s, s->state, timeout); + + mod_timer(&s->timer, jiffies + timeout); +} + +static void rfcomm_session_clear_timer(struct rfcomm_session *s) +{ + BT_DBG("session %p state %ld", s, s->state); + + del_timer_sync(&s->timer); +} + +/* ---- RFCOMM DLCs ---- */ +static void rfcomm_dlc_timeout(struct timer_list *t) +{ + struct rfcomm_dlc *d = from_timer(d, t, timer); + + BT_DBG("dlc %p state %ld", d, d->state); + + set_bit(RFCOMM_TIMED_OUT, &d->flags); + rfcomm_dlc_put(d); + rfcomm_schedule(); +} + +static void rfcomm_dlc_set_timer(struct rfcomm_dlc *d, long timeout) +{ + BT_DBG("dlc %p state %ld timeout %ld", d, d->state, timeout); + + if (!mod_timer(&d->timer, jiffies + timeout)) + rfcomm_dlc_hold(d); +} + +static void rfcomm_dlc_clear_timer(struct rfcomm_dlc *d) +{ + BT_DBG("dlc %p state %ld", d, d->state); + + if (del_timer(&d->timer)) + rfcomm_dlc_put(d); +} + +static void rfcomm_dlc_clear_state(struct rfcomm_dlc *d) +{ + BT_DBG("%p", d); + + d->state = BT_OPEN; + d->flags = 0; + d->mscex = 0; + d->sec_level = BT_SECURITY_LOW; + d->mtu = RFCOMM_DEFAULT_MTU; + d->v24_sig = RFCOMM_V24_RTC | RFCOMM_V24_RTR | RFCOMM_V24_DV; + + d->cfc = RFCOMM_CFC_DISABLED; + d->rx_credits = RFCOMM_DEFAULT_CREDITS; +} + +struct rfcomm_dlc *rfcomm_dlc_alloc(gfp_t prio) +{ + struct rfcomm_dlc *d = kzalloc(sizeof(*d), prio); + + if (!d) + return NULL; + + timer_setup(&d->timer, rfcomm_dlc_timeout, 0); + + skb_queue_head_init(&d->tx_queue); + mutex_init(&d->lock); + refcount_set(&d->refcnt, 1); + + rfcomm_dlc_clear_state(d); + + BT_DBG("%p", d); + + return d; +} + +void rfcomm_dlc_free(struct rfcomm_dlc *d) +{ + BT_DBG("%p", d); + + skb_queue_purge(&d->tx_queue); + kfree(d); +} + +static void rfcomm_dlc_link(struct rfcomm_session *s, struct rfcomm_dlc *d) +{ + BT_DBG("dlc %p session %p", d, s); + + rfcomm_session_clear_timer(s); + rfcomm_dlc_hold(d); + list_add(&d->list, &s->dlcs); + d->session = s; +} + +static void rfcomm_dlc_unlink(struct rfcomm_dlc *d) +{ + struct rfcomm_session *s = d->session; + + BT_DBG("dlc %p refcnt %d session %p", d, refcount_read(&d->refcnt), s); + + list_del(&d->list); + d->session = NULL; + rfcomm_dlc_put(d); + + if (list_empty(&s->dlcs)) + rfcomm_session_set_timer(s, RFCOMM_IDLE_TIMEOUT); +} + +static struct rfcomm_dlc *rfcomm_dlc_get(struct rfcomm_session *s, u8 dlci) +{ + struct rfcomm_dlc *d; + + list_for_each_entry(d, &s->dlcs, list) + if (d->dlci == dlci) + return d; + + return NULL; +} + +static int rfcomm_check_channel(u8 channel) +{ + return channel < 1 || channel > 30; +} + +static int __rfcomm_dlc_open(struct rfcomm_dlc *d, bdaddr_t *src, bdaddr_t *dst, u8 channel) +{ + struct rfcomm_session *s; + int err = 0; + u8 dlci; + + BT_DBG("dlc %p state %ld %pMR -> %pMR channel %d", + d, d->state, src, dst, channel); + + if (rfcomm_check_channel(channel)) + return -EINVAL; + + if (d->state != BT_OPEN && d->state != BT_CLOSED) + return 0; + + s = rfcomm_session_get(src, dst); + if (!s) { + s = rfcomm_session_create(src, dst, d->sec_level, &err); + if (!s) + return err; + } + + dlci = __dlci(__session_dir(s), channel); + + /* Check if DLCI already exists */ + if (rfcomm_dlc_get(s, dlci)) + return -EBUSY; + + rfcomm_dlc_clear_state(d); + + d->dlci = dlci; + d->addr = __addr(s->initiator, dlci); + d->priority = 7; + + d->state = BT_CONFIG; + rfcomm_dlc_link(s, d); + + d->out = 1; + + d->mtu = s->mtu; + d->cfc = (s->cfc == RFCOMM_CFC_UNKNOWN) ? 0 : s->cfc; + + if (s->state == BT_CONNECTED) { + if (rfcomm_check_security(d)) + rfcomm_send_pn(s, 1, d); + else + set_bit(RFCOMM_AUTH_PENDING, &d->flags); + } + + rfcomm_dlc_set_timer(d, RFCOMM_CONN_TIMEOUT); + + return 0; +} + +int rfcomm_dlc_open(struct rfcomm_dlc *d, bdaddr_t *src, bdaddr_t *dst, u8 channel) +{ + int r; + + rfcomm_lock(); + + r = __rfcomm_dlc_open(d, src, dst, channel); + + rfcomm_unlock(); + return r; +} + +static void __rfcomm_dlc_disconn(struct rfcomm_dlc *d) +{ + struct rfcomm_session *s = d->session; + + d->state = BT_DISCONN; + if (skb_queue_empty(&d->tx_queue)) { + rfcomm_send_disc(s, d->dlci); + rfcomm_dlc_set_timer(d, RFCOMM_DISC_TIMEOUT); + } else { + rfcomm_queue_disc(d); + rfcomm_dlc_set_timer(d, RFCOMM_DISC_TIMEOUT * 2); + } +} + +static int __rfcomm_dlc_close(struct rfcomm_dlc *d, int err) +{ + struct rfcomm_session *s = d->session; + if (!s) + return 0; + + BT_DBG("dlc %p state %ld dlci %d err %d session %p", + d, d->state, d->dlci, err, s); + + switch (d->state) { + case BT_CONNECT: + case BT_CONFIG: + case BT_OPEN: + case BT_CONNECT2: + if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) { + set_bit(RFCOMM_AUTH_REJECT, &d->flags); + rfcomm_schedule(); + return 0; + } + } + + switch (d->state) { + case BT_CONNECT: + case BT_CONNECTED: + __rfcomm_dlc_disconn(d); + break; + + case BT_CONFIG: + if (s->state != BT_BOUND) { + __rfcomm_dlc_disconn(d); + break; + } + /* if closing a dlc in a session that hasn't been started, + * just close and unlink the dlc + */ + fallthrough; + + default: + rfcomm_dlc_clear_timer(d); + + rfcomm_dlc_lock(d); + d->state = BT_CLOSED; + d->state_change(d, err); + rfcomm_dlc_unlock(d); + + skb_queue_purge(&d->tx_queue); + rfcomm_dlc_unlink(d); + } + + return 0; +} + +int rfcomm_dlc_close(struct rfcomm_dlc *d, int err) +{ + int r = 0; + struct rfcomm_dlc *d_list; + struct rfcomm_session *s, *s_list; + + BT_DBG("dlc %p state %ld dlci %d err %d", d, d->state, d->dlci, err); + + rfcomm_lock(); + + s = d->session; + if (!s) + goto no_session; + + /* after waiting on the mutex check the session still exists + * then check the dlc still exists + */ + list_for_each_entry(s_list, &session_list, list) { + if (s_list == s) { + list_for_each_entry(d_list, &s->dlcs, list) { + if (d_list == d) { + r = __rfcomm_dlc_close(d, err); + break; + } + } + break; + } + } + +no_session: + rfcomm_unlock(); + return r; +} + +struct rfcomm_dlc *rfcomm_dlc_exists(bdaddr_t *src, bdaddr_t *dst, u8 channel) +{ + struct rfcomm_session *s; + struct rfcomm_dlc *dlc = NULL; + u8 dlci; + + if (rfcomm_check_channel(channel)) + return ERR_PTR(-EINVAL); + + rfcomm_lock(); + s = rfcomm_session_get(src, dst); + if (s) { + dlci = __dlci(__session_dir(s), channel); + dlc = rfcomm_dlc_get(s, dlci); + } + rfcomm_unlock(); + return dlc; +} + +static int rfcomm_dlc_send_frag(struct rfcomm_dlc *d, struct sk_buff *frag) +{ + int len = frag->len; + + BT_DBG("dlc %p mtu %d len %d", d, d->mtu, len); + + if (len > d->mtu) + return -EINVAL; + + rfcomm_make_uih(frag, d->addr); + __skb_queue_tail(&d->tx_queue, frag); + + return len; +} + +int rfcomm_dlc_send(struct rfcomm_dlc *d, struct sk_buff *skb) +{ + unsigned long flags; + struct sk_buff *frag, *next; + int len; + + if (d->state != BT_CONNECTED) + return -ENOTCONN; + + frag = skb_shinfo(skb)->frag_list; + skb_shinfo(skb)->frag_list = NULL; + + /* Queue all fragments atomically. */ + spin_lock_irqsave(&d->tx_queue.lock, flags); + + len = rfcomm_dlc_send_frag(d, skb); + if (len < 0 || !frag) + goto unlock; + + for (; frag; frag = next) { + int ret; + + next = frag->next; + + ret = rfcomm_dlc_send_frag(d, frag); + if (ret < 0) { + dev_kfree_skb_irq(frag); + goto unlock; + } + + len += ret; + } + +unlock: + spin_unlock_irqrestore(&d->tx_queue.lock, flags); + + if (len > 0 && !test_bit(RFCOMM_TX_THROTTLED, &d->flags)) + rfcomm_schedule(); + return len; +} + +void rfcomm_dlc_send_noerror(struct rfcomm_dlc *d, struct sk_buff *skb) +{ + int len = skb->len; + + BT_DBG("dlc %p mtu %d len %d", d, d->mtu, len); + + rfcomm_make_uih(skb, d->addr); + skb_queue_tail(&d->tx_queue, skb); + + if (d->state == BT_CONNECTED && + !test_bit(RFCOMM_TX_THROTTLED, &d->flags)) + rfcomm_schedule(); +} + +void __rfcomm_dlc_throttle(struct rfcomm_dlc *d) +{ + BT_DBG("dlc %p state %ld", d, d->state); + + if (!d->cfc) { + d->v24_sig |= RFCOMM_V24_FC; + set_bit(RFCOMM_MSC_PENDING, &d->flags); + } + rfcomm_schedule(); +} + +void __rfcomm_dlc_unthrottle(struct rfcomm_dlc *d) +{ + BT_DBG("dlc %p state %ld", d, d->state); + + if (!d->cfc) { + d->v24_sig &= ~RFCOMM_V24_FC; + set_bit(RFCOMM_MSC_PENDING, &d->flags); + } + rfcomm_schedule(); +} + +/* + Set/get modem status functions use _local_ status i.e. what we report + to the other side. + Remote status is provided by dlc->modem_status() callback. + */ +int rfcomm_dlc_set_modem_status(struct rfcomm_dlc *d, u8 v24_sig) +{ + BT_DBG("dlc %p state %ld v24_sig 0x%x", + d, d->state, v24_sig); + + if (test_bit(RFCOMM_RX_THROTTLED, &d->flags)) + v24_sig |= RFCOMM_V24_FC; + else + v24_sig &= ~RFCOMM_V24_FC; + + d->v24_sig = v24_sig; + + if (!test_and_set_bit(RFCOMM_MSC_PENDING, &d->flags)) + rfcomm_schedule(); + + return 0; +} + +int rfcomm_dlc_get_modem_status(struct rfcomm_dlc *d, u8 *v24_sig) +{ + BT_DBG("dlc %p state %ld v24_sig 0x%x", + d, d->state, d->v24_sig); + + *v24_sig = d->v24_sig; + return 0; +} + +/* ---- RFCOMM sessions ---- */ +static struct rfcomm_session *rfcomm_session_add(struct socket *sock, int state) +{ + struct rfcomm_session *s = kzalloc(sizeof(*s), GFP_KERNEL); + + if (!s) + return NULL; + + BT_DBG("session %p sock %p", s, sock); + + timer_setup(&s->timer, rfcomm_session_timeout, 0); + + INIT_LIST_HEAD(&s->dlcs); + s->state = state; + s->sock = sock; + + s->mtu = RFCOMM_DEFAULT_MTU; + s->cfc = disable_cfc ? RFCOMM_CFC_DISABLED : RFCOMM_CFC_UNKNOWN; + + /* Do not increment module usage count for listening sessions. + * Otherwise we won't be able to unload the module. */ + if (state != BT_LISTEN) + if (!try_module_get(THIS_MODULE)) { + kfree(s); + return NULL; + } + + list_add(&s->list, &session_list); + + return s; +} + +static struct rfcomm_session *rfcomm_session_del(struct rfcomm_session *s) +{ + int state = s->state; + + BT_DBG("session %p state %ld", s, s->state); + + list_del(&s->list); + + rfcomm_session_clear_timer(s); + sock_release(s->sock); + kfree(s); + + if (state != BT_LISTEN) + module_put(THIS_MODULE); + + return NULL; +} + +static struct rfcomm_session *rfcomm_session_get(bdaddr_t *src, bdaddr_t *dst) +{ + struct rfcomm_session *s, *n; + struct l2cap_chan *chan; + list_for_each_entry_safe(s, n, &session_list, list) { + chan = l2cap_pi(s->sock->sk)->chan; + + if ((!bacmp(src, BDADDR_ANY) || !bacmp(&chan->src, src)) && + !bacmp(&chan->dst, dst)) + return s; + } + return NULL; +} + +static struct rfcomm_session *rfcomm_session_close(struct rfcomm_session *s, + int err) +{ + struct rfcomm_dlc *d, *n; + + s->state = BT_CLOSED; + + BT_DBG("session %p state %ld err %d", s, s->state, err); + + /* Close all dlcs */ + list_for_each_entry_safe(d, n, &s->dlcs, list) { + d->state = BT_CLOSED; + __rfcomm_dlc_close(d, err); + } + + rfcomm_session_clear_timer(s); + return rfcomm_session_del(s); +} + +static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src, + bdaddr_t *dst, + u8 sec_level, + int *err) +{ + struct rfcomm_session *s = NULL; + struct sockaddr_l2 addr; + struct socket *sock; + struct sock *sk; + + BT_DBG("%pMR -> %pMR", src, dst); + + *err = rfcomm_l2sock_create(&sock); + if (*err < 0) + return NULL; + + bacpy(&addr.l2_bdaddr, src); + addr.l2_family = AF_BLUETOOTH; + addr.l2_psm = 0; + addr.l2_cid = 0; + addr.l2_bdaddr_type = BDADDR_BREDR; + *err = kernel_bind(sock, (struct sockaddr *) &addr, sizeof(addr)); + if (*err < 0) + goto failed; + + /* Set L2CAP options */ + sk = sock->sk; + lock_sock(sk); + /* Set MTU to 0 so L2CAP can auto select the MTU */ + l2cap_pi(sk)->chan->imtu = 0; + l2cap_pi(sk)->chan->sec_level = sec_level; + if (l2cap_ertm) + l2cap_pi(sk)->chan->mode = L2CAP_MODE_ERTM; + release_sock(sk); + + s = rfcomm_session_add(sock, BT_BOUND); + if (!s) { + *err = -ENOMEM; + goto failed; + } + + s->initiator = 1; + + bacpy(&addr.l2_bdaddr, dst); + addr.l2_family = AF_BLUETOOTH; + addr.l2_psm = cpu_to_le16(L2CAP_PSM_RFCOMM); + addr.l2_cid = 0; + addr.l2_bdaddr_type = BDADDR_BREDR; + *err = kernel_connect(sock, (struct sockaddr *) &addr, sizeof(addr), O_NONBLOCK); + if (*err == 0 || *err == -EINPROGRESS) + return s; + + return rfcomm_session_del(s); + +failed: + sock_release(sock); + return NULL; +} + +void rfcomm_session_getaddr(struct rfcomm_session *s, bdaddr_t *src, bdaddr_t *dst) +{ + struct l2cap_chan *chan = l2cap_pi(s->sock->sk)->chan; + if (src) + bacpy(src, &chan->src); + if (dst) + bacpy(dst, &chan->dst); +} + +/* ---- RFCOMM frame sending ---- */ +static int rfcomm_send_frame(struct rfcomm_session *s, u8 *data, int len) +{ + struct kvec iv = { data, len }; + struct msghdr msg; + + BT_DBG("session %p len %d", s, len); + + memset(&msg, 0, sizeof(msg)); + + return kernel_sendmsg(s->sock, &msg, &iv, 1, len); +} + +static int rfcomm_send_cmd(struct rfcomm_session *s, struct rfcomm_cmd *cmd) +{ + BT_DBG("%p cmd %u", s, cmd->ctrl); + + return rfcomm_send_frame(s, (void *) cmd, sizeof(*cmd)); +} + +static int rfcomm_send_sabm(struct rfcomm_session *s, u8 dlci) +{ + struct rfcomm_cmd cmd; + + BT_DBG("%p dlci %d", s, dlci); + + cmd.addr = __addr(s->initiator, dlci); + cmd.ctrl = __ctrl(RFCOMM_SABM, 1); + cmd.len = __len8(0); + cmd.fcs = __fcs2((u8 *) &cmd); + + return rfcomm_send_cmd(s, &cmd); +} + +static int rfcomm_send_ua(struct rfcomm_session *s, u8 dlci) +{ + struct rfcomm_cmd cmd; + + BT_DBG("%p dlci %d", s, dlci); + + cmd.addr = __addr(!s->initiator, dlci); + cmd.ctrl = __ctrl(RFCOMM_UA, 1); + cmd.len = __len8(0); + cmd.fcs = __fcs2((u8 *) &cmd); + + return rfcomm_send_cmd(s, &cmd); +} + +static int rfcomm_send_disc(struct rfcomm_session *s, u8 dlci) +{ + struct rfcomm_cmd cmd; + + BT_DBG("%p dlci %d", s, dlci); + + cmd.addr = __addr(s->initiator, dlci); + cmd.ctrl = __ctrl(RFCOMM_DISC, 1); + cmd.len = __len8(0); + cmd.fcs = __fcs2((u8 *) &cmd); + + return rfcomm_send_cmd(s, &cmd); +} + +static int rfcomm_queue_disc(struct rfcomm_dlc *d) +{ + struct rfcomm_cmd *cmd; + struct sk_buff *skb; + + BT_DBG("dlc %p dlci %d", d, d->dlci); + + skb = alloc_skb(sizeof(*cmd), GFP_KERNEL); + if (!skb) + return -ENOMEM; + + cmd = __skb_put(skb, sizeof(*cmd)); + cmd->addr = d->addr; + cmd->ctrl = __ctrl(RFCOMM_DISC, 1); + cmd->len = __len8(0); + cmd->fcs = __fcs2((u8 *) cmd); + + skb_queue_tail(&d->tx_queue, skb); + rfcomm_schedule(); + return 0; +} + +static int rfcomm_send_dm(struct rfcomm_session *s, u8 dlci) +{ + struct rfcomm_cmd cmd; + + BT_DBG("%p dlci %d", s, dlci); + + cmd.addr = __addr(!s->initiator, dlci); + cmd.ctrl = __ctrl(RFCOMM_DM, 1); + cmd.len = __len8(0); + cmd.fcs = __fcs2((u8 *) &cmd); + + return rfcomm_send_cmd(s, &cmd); +} + +static int rfcomm_send_nsc(struct rfcomm_session *s, int cr, u8 type) +{ + struct rfcomm_hdr *hdr; + struct rfcomm_mcc *mcc; + u8 buf[16], *ptr = buf; + + BT_DBG("%p cr %d type %d", s, cr, type); + + hdr = (void *) ptr; ptr += sizeof(*hdr); + hdr->addr = __addr(s->initiator, 0); + hdr->ctrl = __ctrl(RFCOMM_UIH, 0); + hdr->len = __len8(sizeof(*mcc) + 1); + + mcc = (void *) ptr; ptr += sizeof(*mcc); + mcc->type = __mcc_type(0, RFCOMM_NSC); + mcc->len = __len8(1); + + /* Type that we didn't like */ + *ptr = __mcc_type(cr, type); ptr++; + + *ptr = __fcs(buf); ptr++; + + return rfcomm_send_frame(s, buf, ptr - buf); +} + +static int rfcomm_send_pn(struct rfcomm_session *s, int cr, struct rfcomm_dlc *d) +{ + struct rfcomm_hdr *hdr; + struct rfcomm_mcc *mcc; + struct rfcomm_pn *pn; + u8 buf[16], *ptr = buf; + + BT_DBG("%p cr %d dlci %d mtu %d", s, cr, d->dlci, d->mtu); + + hdr = (void *) ptr; ptr += sizeof(*hdr); + hdr->addr = __addr(s->initiator, 0); + hdr->ctrl = __ctrl(RFCOMM_UIH, 0); + hdr->len = __len8(sizeof(*mcc) + sizeof(*pn)); + + mcc = (void *) ptr; ptr += sizeof(*mcc); + mcc->type = __mcc_type(cr, RFCOMM_PN); + mcc->len = __len8(sizeof(*pn)); + + pn = (void *) ptr; ptr += sizeof(*pn); + pn->dlci = d->dlci; + pn->priority = d->priority; + pn->ack_timer = 0; + pn->max_retrans = 0; + + if (s->cfc) { + pn->flow_ctrl = cr ? 0xf0 : 0xe0; + pn->credits = RFCOMM_DEFAULT_CREDITS; + } else { + pn->flow_ctrl = 0; + pn->credits = 0; + } + + if (cr && channel_mtu >= 0) + pn->mtu = cpu_to_le16(channel_mtu); + else + pn->mtu = cpu_to_le16(d->mtu); + + *ptr = __fcs(buf); ptr++; + + return rfcomm_send_frame(s, buf, ptr - buf); +} + +int rfcomm_send_rpn(struct rfcomm_session *s, int cr, u8 dlci, + u8 bit_rate, u8 data_bits, u8 stop_bits, + u8 parity, u8 flow_ctrl_settings, + u8 xon_char, u8 xoff_char, u16 param_mask) +{ + struct rfcomm_hdr *hdr; + struct rfcomm_mcc *mcc; + struct rfcomm_rpn *rpn; + u8 buf[16], *ptr = buf; + + BT_DBG("%p cr %d dlci %d bit_r 0x%x data_b 0x%x stop_b 0x%x parity 0x%x" + " flwc_s 0x%x xon_c 0x%x xoff_c 0x%x p_mask 0x%x", + s, cr, dlci, bit_rate, data_bits, stop_bits, parity, + flow_ctrl_settings, xon_char, xoff_char, param_mask); + + hdr = (void *) ptr; ptr += sizeof(*hdr); + hdr->addr = __addr(s->initiator, 0); + hdr->ctrl = __ctrl(RFCOMM_UIH, 0); + hdr->len = __len8(sizeof(*mcc) + sizeof(*rpn)); + + mcc = (void *) ptr; ptr += sizeof(*mcc); + mcc->type = __mcc_type(cr, RFCOMM_RPN); + mcc->len = __len8(sizeof(*rpn)); + + rpn = (void *) ptr; ptr += sizeof(*rpn); + rpn->dlci = __addr(1, dlci); + rpn->bit_rate = bit_rate; + rpn->line_settings = __rpn_line_settings(data_bits, stop_bits, parity); + rpn->flow_ctrl = flow_ctrl_settings; + rpn->xon_char = xon_char; + rpn->xoff_char = xoff_char; + rpn->param_mask = cpu_to_le16(param_mask); + + *ptr = __fcs(buf); ptr++; + + return rfcomm_send_frame(s, buf, ptr - buf); +} + +static int rfcomm_send_rls(struct rfcomm_session *s, int cr, u8 dlci, u8 status) +{ + struct rfcomm_hdr *hdr; + struct rfcomm_mcc *mcc; + struct rfcomm_rls *rls; + u8 buf[16], *ptr = buf; + + BT_DBG("%p cr %d status 0x%x", s, cr, status); + + hdr = (void *) ptr; ptr += sizeof(*hdr); + hdr->addr = __addr(s->initiator, 0); + hdr->ctrl = __ctrl(RFCOMM_UIH, 0); + hdr->len = __len8(sizeof(*mcc) + sizeof(*rls)); + + mcc = (void *) ptr; ptr += sizeof(*mcc); + mcc->type = __mcc_type(cr, RFCOMM_RLS); + mcc->len = __len8(sizeof(*rls)); + + rls = (void *) ptr; ptr += sizeof(*rls); + rls->dlci = __addr(1, dlci); + rls->status = status; + + *ptr = __fcs(buf); ptr++; + + return rfcomm_send_frame(s, buf, ptr - buf); +} + +static int rfcomm_send_msc(struct rfcomm_session *s, int cr, u8 dlci, u8 v24_sig) +{ + struct rfcomm_hdr *hdr; + struct rfcomm_mcc *mcc; + struct rfcomm_msc *msc; + u8 buf[16], *ptr = buf; + + BT_DBG("%p cr %d v24 0x%x", s, cr, v24_sig); + + hdr = (void *) ptr; ptr += sizeof(*hdr); + hdr->addr = __addr(s->initiator, 0); + hdr->ctrl = __ctrl(RFCOMM_UIH, 0); + hdr->len = __len8(sizeof(*mcc) + sizeof(*msc)); + + mcc = (void *) ptr; ptr += sizeof(*mcc); + mcc->type = __mcc_type(cr, RFCOMM_MSC); + mcc->len = __len8(sizeof(*msc)); + + msc = (void *) ptr; ptr += sizeof(*msc); + msc->dlci = __addr(1, dlci); + msc->v24_sig = v24_sig | 0x01; + + *ptr = __fcs(buf); ptr++; + + return rfcomm_send_frame(s, buf, ptr - buf); +} + +static int rfcomm_send_fcoff(struct rfcomm_session *s, int cr) +{ + struct rfcomm_hdr *hdr; + struct rfcomm_mcc *mcc; + u8 buf[16], *ptr = buf; + + BT_DBG("%p cr %d", s, cr); + + hdr = (void *) ptr; ptr += sizeof(*hdr); + hdr->addr = __addr(s->initiator, 0); + hdr->ctrl = __ctrl(RFCOMM_UIH, 0); + hdr->len = __len8(sizeof(*mcc)); + + mcc = (void *) ptr; ptr += sizeof(*mcc); + mcc->type = __mcc_type(cr, RFCOMM_FCOFF); + mcc->len = __len8(0); + + *ptr = __fcs(buf); ptr++; + + return rfcomm_send_frame(s, buf, ptr - buf); +} + +static int rfcomm_send_fcon(struct rfcomm_session *s, int cr) +{ + struct rfcomm_hdr *hdr; + struct rfcomm_mcc *mcc; + u8 buf[16], *ptr = buf; + + BT_DBG("%p cr %d", s, cr); + + hdr = (void *) ptr; ptr += sizeof(*hdr); + hdr->addr = __addr(s->initiator, 0); + hdr->ctrl = __ctrl(RFCOMM_UIH, 0); + hdr->len = __len8(sizeof(*mcc)); + + mcc = (void *) ptr; ptr += sizeof(*mcc); + mcc->type = __mcc_type(cr, RFCOMM_FCON); + mcc->len = __len8(0); + + *ptr = __fcs(buf); ptr++; + + return rfcomm_send_frame(s, buf, ptr - buf); +} + +static int rfcomm_send_test(struct rfcomm_session *s, int cr, u8 *pattern, int len) +{ + struct socket *sock = s->sock; + struct kvec iv[3]; + struct msghdr msg; + unsigned char hdr[5], crc[1]; + + if (len > 125) + return -EINVAL; + + BT_DBG("%p cr %d", s, cr); + + hdr[0] = __addr(s->initiator, 0); + hdr[1] = __ctrl(RFCOMM_UIH, 0); + hdr[2] = 0x01 | ((len + 2) << 1); + hdr[3] = 0x01 | ((cr & 0x01) << 1) | (RFCOMM_TEST << 2); + hdr[4] = 0x01 | (len << 1); + + crc[0] = __fcs(hdr); + + iv[0].iov_base = hdr; + iv[0].iov_len = 5; + iv[1].iov_base = pattern; + iv[1].iov_len = len; + iv[2].iov_base = crc; + iv[2].iov_len = 1; + + memset(&msg, 0, sizeof(msg)); + + return kernel_sendmsg(sock, &msg, iv, 3, 6 + len); +} + +static int rfcomm_send_credits(struct rfcomm_session *s, u8 addr, u8 credits) +{ + struct rfcomm_hdr *hdr; + u8 buf[16], *ptr = buf; + + BT_DBG("%p addr %d credits %d", s, addr, credits); + + hdr = (void *) ptr; ptr += sizeof(*hdr); + hdr->addr = addr; + hdr->ctrl = __ctrl(RFCOMM_UIH, 1); + hdr->len = __len8(0); + + *ptr = credits; ptr++; + + *ptr = __fcs(buf); ptr++; + + return rfcomm_send_frame(s, buf, ptr - buf); +} + +static void rfcomm_make_uih(struct sk_buff *skb, u8 addr) +{ + struct rfcomm_hdr *hdr; + int len = skb->len; + u8 *crc; + + if (len > 127) { + hdr = skb_push(skb, 4); + put_unaligned(cpu_to_le16(__len16(len)), (__le16 *) &hdr->len); + } else { + hdr = skb_push(skb, 3); + hdr->len = __len8(len); + } + hdr->addr = addr; + hdr->ctrl = __ctrl(RFCOMM_UIH, 0); + + crc = skb_put(skb, 1); + *crc = __fcs((void *) hdr); +} + +/* ---- RFCOMM frame reception ---- */ +static struct rfcomm_session *rfcomm_recv_ua(struct rfcomm_session *s, u8 dlci) +{ + BT_DBG("session %p state %ld dlci %d", s, s->state, dlci); + + if (dlci) { + /* Data channel */ + struct rfcomm_dlc *d = rfcomm_dlc_get(s, dlci); + if (!d) { + rfcomm_send_dm(s, dlci); + return s; + } + + switch (d->state) { + case BT_CONNECT: + rfcomm_dlc_clear_timer(d); + + rfcomm_dlc_lock(d); + d->state = BT_CONNECTED; + d->state_change(d, 0); + rfcomm_dlc_unlock(d); + + rfcomm_send_msc(s, 1, dlci, d->v24_sig); + break; + + case BT_DISCONN: + d->state = BT_CLOSED; + __rfcomm_dlc_close(d, 0); + + if (list_empty(&s->dlcs)) { + s->state = BT_DISCONN; + rfcomm_send_disc(s, 0); + rfcomm_session_clear_timer(s); + } + + break; + } + } else { + /* Control channel */ + switch (s->state) { + case BT_CONNECT: + s->state = BT_CONNECTED; + rfcomm_process_connect(s); + break; + + case BT_DISCONN: + s = rfcomm_session_close(s, ECONNRESET); + break; + } + } + return s; +} + +static struct rfcomm_session *rfcomm_recv_dm(struct rfcomm_session *s, u8 dlci) +{ + int err = 0; + + BT_DBG("session %p state %ld dlci %d", s, s->state, dlci); + + if (dlci) { + /* Data DLC */ + struct rfcomm_dlc *d = rfcomm_dlc_get(s, dlci); + if (d) { + if (d->state == BT_CONNECT || d->state == BT_CONFIG) + err = ECONNREFUSED; + else + err = ECONNRESET; + + d->state = BT_CLOSED; + __rfcomm_dlc_close(d, err); + } + } else { + if (s->state == BT_CONNECT) + err = ECONNREFUSED; + else + err = ECONNRESET; + + s = rfcomm_session_close(s, err); + } + return s; +} + +static struct rfcomm_session *rfcomm_recv_disc(struct rfcomm_session *s, + u8 dlci) +{ + int err = 0; + + BT_DBG("session %p state %ld dlci %d", s, s->state, dlci); + + if (dlci) { + struct rfcomm_dlc *d = rfcomm_dlc_get(s, dlci); + if (d) { + rfcomm_send_ua(s, dlci); + + if (d->state == BT_CONNECT || d->state == BT_CONFIG) + err = ECONNREFUSED; + else + err = ECONNRESET; + + d->state = BT_CLOSED; + __rfcomm_dlc_close(d, err); + } else + rfcomm_send_dm(s, dlci); + + } else { + rfcomm_send_ua(s, 0); + + if (s->state == BT_CONNECT) + err = ECONNREFUSED; + else + err = ECONNRESET; + + s = rfcomm_session_close(s, err); + } + return s; +} + +void rfcomm_dlc_accept(struct rfcomm_dlc *d) +{ + struct sock *sk = d->session->sock->sk; + struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn; + + BT_DBG("dlc %p", d); + + rfcomm_send_ua(d->session, d->dlci); + + rfcomm_dlc_clear_timer(d); + + rfcomm_dlc_lock(d); + d->state = BT_CONNECTED; + d->state_change(d, 0); + rfcomm_dlc_unlock(d); + + if (d->role_switch) + hci_conn_switch_role(conn->hcon, 0x00); + + rfcomm_send_msc(d->session, 1, d->dlci, d->v24_sig); +} + +static void rfcomm_check_accept(struct rfcomm_dlc *d) +{ + if (rfcomm_check_security(d)) { + if (d->defer_setup) { + set_bit(RFCOMM_DEFER_SETUP, &d->flags); + rfcomm_dlc_set_timer(d, RFCOMM_AUTH_TIMEOUT); + + rfcomm_dlc_lock(d); + d->state = BT_CONNECT2; + d->state_change(d, 0); + rfcomm_dlc_unlock(d); + } else + rfcomm_dlc_accept(d); + } else { + set_bit(RFCOMM_AUTH_PENDING, &d->flags); + rfcomm_dlc_set_timer(d, RFCOMM_AUTH_TIMEOUT); + } +} + +static int rfcomm_recv_sabm(struct rfcomm_session *s, u8 dlci) +{ + struct rfcomm_dlc *d; + u8 channel; + + BT_DBG("session %p state %ld dlci %d", s, s->state, dlci); + + if (!dlci) { + rfcomm_send_ua(s, 0); + + if (s->state == BT_OPEN) { + s->state = BT_CONNECTED; + rfcomm_process_connect(s); + } + return 0; + } + + /* Check if DLC exists */ + d = rfcomm_dlc_get(s, dlci); + if (d) { + if (d->state == BT_OPEN) { + /* DLC was previously opened by PN request */ + rfcomm_check_accept(d); + } + return 0; + } + + /* Notify socket layer about incoming connection */ + channel = __srv_channel(dlci); + if (rfcomm_connect_ind(s, channel, &d)) { + d->dlci = dlci; + d->addr = __addr(s->initiator, dlci); + rfcomm_dlc_link(s, d); + + rfcomm_check_accept(d); + } else { + rfcomm_send_dm(s, dlci); + } + + return 0; +} + +static int rfcomm_apply_pn(struct rfcomm_dlc *d, int cr, struct rfcomm_pn *pn) +{ + struct rfcomm_session *s = d->session; + + BT_DBG("dlc %p state %ld dlci %d mtu %d fc 0x%x credits %d", + d, d->state, d->dlci, pn->mtu, pn->flow_ctrl, pn->credits); + + if ((pn->flow_ctrl == 0xf0 && s->cfc != RFCOMM_CFC_DISABLED) || + pn->flow_ctrl == 0xe0) { + d->cfc = RFCOMM_CFC_ENABLED; + d->tx_credits = pn->credits; + } else { + d->cfc = RFCOMM_CFC_DISABLED; + set_bit(RFCOMM_TX_THROTTLED, &d->flags); + } + + if (s->cfc == RFCOMM_CFC_UNKNOWN) + s->cfc = d->cfc; + + d->priority = pn->priority; + + d->mtu = __le16_to_cpu(pn->mtu); + + if (cr && d->mtu > s->mtu) + d->mtu = s->mtu; + + return 0; +} + +static int rfcomm_recv_pn(struct rfcomm_session *s, int cr, struct sk_buff *skb) +{ + struct rfcomm_pn *pn = (void *) skb->data; + struct rfcomm_dlc *d; + u8 dlci = pn->dlci; + + BT_DBG("session %p state %ld dlci %d", s, s->state, dlci); + + if (!dlci) + return 0; + + d = rfcomm_dlc_get(s, dlci); + if (d) { + if (cr) { + /* PN request */ + rfcomm_apply_pn(d, cr, pn); + rfcomm_send_pn(s, 0, d); + } else { + /* PN response */ + switch (d->state) { + case BT_CONFIG: + rfcomm_apply_pn(d, cr, pn); + + d->state = BT_CONNECT; + rfcomm_send_sabm(s, d->dlci); + break; + } + } + } else { + u8 channel = __srv_channel(dlci); + + if (!cr) + return 0; + + /* PN request for non existing DLC. + * Assume incoming connection. */ + if (rfcomm_connect_ind(s, channel, &d)) { + d->dlci = dlci; + d->addr = __addr(s->initiator, dlci); + rfcomm_dlc_link(s, d); + + rfcomm_apply_pn(d, cr, pn); + + d->state = BT_OPEN; + rfcomm_send_pn(s, 0, d); + } else { + rfcomm_send_dm(s, dlci); + } + } + return 0; +} + +static int rfcomm_recv_rpn(struct rfcomm_session *s, int cr, int len, struct sk_buff *skb) +{ + struct rfcomm_rpn *rpn = (void *) skb->data; + u8 dlci = __get_dlci(rpn->dlci); + + u8 bit_rate = 0; + u8 data_bits = 0; + u8 stop_bits = 0; + u8 parity = 0; + u8 flow_ctrl = 0; + u8 xon_char = 0; + u8 xoff_char = 0; + u16 rpn_mask = RFCOMM_RPN_PM_ALL; + + BT_DBG("dlci %d cr %d len 0x%x bitr 0x%x line 0x%x flow 0x%x xonc 0x%x xoffc 0x%x pm 0x%x", + dlci, cr, len, rpn->bit_rate, rpn->line_settings, rpn->flow_ctrl, + rpn->xon_char, rpn->xoff_char, rpn->param_mask); + + if (!cr) + return 0; + + if (len == 1) { + /* This is a request, return default (according to ETSI TS 07.10) settings */ + bit_rate = RFCOMM_RPN_BR_9600; + data_bits = RFCOMM_RPN_DATA_8; + stop_bits = RFCOMM_RPN_STOP_1; + parity = RFCOMM_RPN_PARITY_NONE; + flow_ctrl = RFCOMM_RPN_FLOW_NONE; + xon_char = RFCOMM_RPN_XON_CHAR; + xoff_char = RFCOMM_RPN_XOFF_CHAR; + goto rpn_out; + } + + /* Check for sane values, ignore/accept bit_rate, 8 bits, 1 stop bit, + * no parity, no flow control lines, normal XON/XOFF chars */ + + if (rpn->param_mask & cpu_to_le16(RFCOMM_RPN_PM_BITRATE)) { + bit_rate = rpn->bit_rate; + if (bit_rate > RFCOMM_RPN_BR_230400) { + BT_DBG("RPN bit rate mismatch 0x%x", bit_rate); + bit_rate = RFCOMM_RPN_BR_9600; + rpn_mask ^= RFCOMM_RPN_PM_BITRATE; + } + } + + if (rpn->param_mask & cpu_to_le16(RFCOMM_RPN_PM_DATA)) { + data_bits = __get_rpn_data_bits(rpn->line_settings); + if (data_bits != RFCOMM_RPN_DATA_8) { + BT_DBG("RPN data bits mismatch 0x%x", data_bits); + data_bits = RFCOMM_RPN_DATA_8; + rpn_mask ^= RFCOMM_RPN_PM_DATA; + } + } + + if (rpn->param_mask & cpu_to_le16(RFCOMM_RPN_PM_STOP)) { + stop_bits = __get_rpn_stop_bits(rpn->line_settings); + if (stop_bits != RFCOMM_RPN_STOP_1) { + BT_DBG("RPN stop bits mismatch 0x%x", stop_bits); + stop_bits = RFCOMM_RPN_STOP_1; + rpn_mask ^= RFCOMM_RPN_PM_STOP; + } + } + + if (rpn->param_mask & cpu_to_le16(RFCOMM_RPN_PM_PARITY)) { + parity = __get_rpn_parity(rpn->line_settings); + if (parity != RFCOMM_RPN_PARITY_NONE) { + BT_DBG("RPN parity mismatch 0x%x", parity); + parity = RFCOMM_RPN_PARITY_NONE; + rpn_mask ^= RFCOMM_RPN_PM_PARITY; + } + } + + if (rpn->param_mask & cpu_to_le16(RFCOMM_RPN_PM_FLOW)) { + flow_ctrl = rpn->flow_ctrl; + if (flow_ctrl != RFCOMM_RPN_FLOW_NONE) { + BT_DBG("RPN flow ctrl mismatch 0x%x", flow_ctrl); + flow_ctrl = RFCOMM_RPN_FLOW_NONE; + rpn_mask ^= RFCOMM_RPN_PM_FLOW; + } + } + + if (rpn->param_mask & cpu_to_le16(RFCOMM_RPN_PM_XON)) { + xon_char = rpn->xon_char; + if (xon_char != RFCOMM_RPN_XON_CHAR) { + BT_DBG("RPN XON char mismatch 0x%x", xon_char); + xon_char = RFCOMM_RPN_XON_CHAR; + rpn_mask ^= RFCOMM_RPN_PM_XON; + } + } + + if (rpn->param_mask & cpu_to_le16(RFCOMM_RPN_PM_XOFF)) { + xoff_char = rpn->xoff_char; + if (xoff_char != RFCOMM_RPN_XOFF_CHAR) { + BT_DBG("RPN XOFF char mismatch 0x%x", xoff_char); + xoff_char = RFCOMM_RPN_XOFF_CHAR; + rpn_mask ^= RFCOMM_RPN_PM_XOFF; + } + } + +rpn_out: + rfcomm_send_rpn(s, 0, dlci, bit_rate, data_bits, stop_bits, + parity, flow_ctrl, xon_char, xoff_char, rpn_mask); + + return 0; +} + +static int rfcomm_recv_rls(struct rfcomm_session *s, int cr, struct sk_buff *skb) +{ + struct rfcomm_rls *rls = (void *) skb->data; + u8 dlci = __get_dlci(rls->dlci); + + BT_DBG("dlci %d cr %d status 0x%x", dlci, cr, rls->status); + + if (!cr) + return 0; + + /* We should probably do something with this information here. But + * for now it's sufficient just to reply -- Bluetooth 1.1 says it's + * mandatory to recognise and respond to RLS */ + + rfcomm_send_rls(s, 0, dlci, rls->status); + + return 0; +} + +static int rfcomm_recv_msc(struct rfcomm_session *s, int cr, struct sk_buff *skb) +{ + struct rfcomm_msc *msc = (void *) skb->data; + struct rfcomm_dlc *d; + u8 dlci = __get_dlci(msc->dlci); + + BT_DBG("dlci %d cr %d v24 0x%x", dlci, cr, msc->v24_sig); + + d = rfcomm_dlc_get(s, dlci); + if (!d) + return 0; + + if (cr) { + if (msc->v24_sig & RFCOMM_V24_FC && !d->cfc) + set_bit(RFCOMM_TX_THROTTLED, &d->flags); + else + clear_bit(RFCOMM_TX_THROTTLED, &d->flags); + + rfcomm_dlc_lock(d); + + d->remote_v24_sig = msc->v24_sig; + + if (d->modem_status) + d->modem_status(d, msc->v24_sig); + + rfcomm_dlc_unlock(d); + + rfcomm_send_msc(s, 0, dlci, msc->v24_sig); + + d->mscex |= RFCOMM_MSCEX_RX; + } else + d->mscex |= RFCOMM_MSCEX_TX; + + return 0; +} + +static int rfcomm_recv_mcc(struct rfcomm_session *s, struct sk_buff *skb) +{ + struct rfcomm_mcc *mcc = (void *) skb->data; + u8 type, cr, len; + + cr = __test_cr(mcc->type); + type = __get_mcc_type(mcc->type); + len = __get_mcc_len(mcc->len); + + BT_DBG("%p type 0x%x cr %d", s, type, cr); + + skb_pull(skb, 2); + + switch (type) { + case RFCOMM_PN: + rfcomm_recv_pn(s, cr, skb); + break; + + case RFCOMM_RPN: + rfcomm_recv_rpn(s, cr, len, skb); + break; + + case RFCOMM_RLS: + rfcomm_recv_rls(s, cr, skb); + break; + + case RFCOMM_MSC: + rfcomm_recv_msc(s, cr, skb); + break; + + case RFCOMM_FCOFF: + if (cr) { + set_bit(RFCOMM_TX_THROTTLED, &s->flags); + rfcomm_send_fcoff(s, 0); + } + break; + + case RFCOMM_FCON: + if (cr) { + clear_bit(RFCOMM_TX_THROTTLED, &s->flags); + rfcomm_send_fcon(s, 0); + } + break; + + case RFCOMM_TEST: + if (cr) + rfcomm_send_test(s, 0, skb->data, skb->len); + break; + + case RFCOMM_NSC: + break; + + default: + BT_ERR("Unknown control type 0x%02x", type); + rfcomm_send_nsc(s, cr, type); + break; + } + return 0; +} + +static int rfcomm_recv_data(struct rfcomm_session *s, u8 dlci, int pf, struct sk_buff *skb) +{ + struct rfcomm_dlc *d; + + BT_DBG("session %p state %ld dlci %d pf %d", s, s->state, dlci, pf); + + d = rfcomm_dlc_get(s, dlci); + if (!d) { + rfcomm_send_dm(s, dlci); + goto drop; + } + + if (pf && d->cfc) { + u8 credits = *(u8 *) skb->data; skb_pull(skb, 1); + + d->tx_credits += credits; + if (d->tx_credits) + clear_bit(RFCOMM_TX_THROTTLED, &d->flags); + } + + if (skb->len && d->state == BT_CONNECTED) { + rfcomm_dlc_lock(d); + d->rx_credits--; + d->data_ready(d, skb); + rfcomm_dlc_unlock(d); + return 0; + } + +drop: + kfree_skb(skb); + return 0; +} + +static struct rfcomm_session *rfcomm_recv_frame(struct rfcomm_session *s, + struct sk_buff *skb) +{ + struct rfcomm_hdr *hdr = (void *) skb->data; + u8 type, dlci, fcs; + + if (!s) { + /* no session, so free socket data */ + kfree_skb(skb); + return s; + } + + dlci = __get_dlci(hdr->addr); + type = __get_type(hdr->ctrl); + + /* Trim FCS */ + skb->len--; skb->tail--; + fcs = *(u8 *)skb_tail_pointer(skb); + + if (__check_fcs(skb->data, type, fcs)) { + BT_ERR("bad checksum in packet"); + kfree_skb(skb); + return s; + } + + if (__test_ea(hdr->len)) + skb_pull(skb, 3); + else + skb_pull(skb, 4); + + switch (type) { + case RFCOMM_SABM: + if (__test_pf(hdr->ctrl)) + rfcomm_recv_sabm(s, dlci); + break; + + case RFCOMM_DISC: + if (__test_pf(hdr->ctrl)) + s = rfcomm_recv_disc(s, dlci); + break; + + case RFCOMM_UA: + if (__test_pf(hdr->ctrl)) + s = rfcomm_recv_ua(s, dlci); + break; + + case RFCOMM_DM: + s = rfcomm_recv_dm(s, dlci); + break; + + case RFCOMM_UIH: + if (dlci) { + rfcomm_recv_data(s, dlci, __test_pf(hdr->ctrl), skb); + return s; + } + rfcomm_recv_mcc(s, skb); + break; + + default: + BT_ERR("Unknown packet type 0x%02x", type); + break; + } + kfree_skb(skb); + return s; +} + +/* ---- Connection and data processing ---- */ + +static void rfcomm_process_connect(struct rfcomm_session *s) +{ + struct rfcomm_dlc *d, *n; + + BT_DBG("session %p state %ld", s, s->state); + + list_for_each_entry_safe(d, n, &s->dlcs, list) { + if (d->state == BT_CONFIG) { + d->mtu = s->mtu; + if (rfcomm_check_security(d)) { + rfcomm_send_pn(s, 1, d); + } else { + set_bit(RFCOMM_AUTH_PENDING, &d->flags); + rfcomm_dlc_set_timer(d, RFCOMM_AUTH_TIMEOUT); + } + } + } +} + +/* Send data queued for the DLC. + * Return number of frames left in the queue. + */ +static int rfcomm_process_tx(struct rfcomm_dlc *d) +{ + struct sk_buff *skb; + int err; + + BT_DBG("dlc %p state %ld cfc %d rx_credits %d tx_credits %d", + d, d->state, d->cfc, d->rx_credits, d->tx_credits); + + /* Send pending MSC */ + if (test_and_clear_bit(RFCOMM_MSC_PENDING, &d->flags)) + rfcomm_send_msc(d->session, 1, d->dlci, d->v24_sig); + + if (d->cfc) { + /* CFC enabled. + * Give them some credits */ + if (!test_bit(RFCOMM_RX_THROTTLED, &d->flags) && + d->rx_credits <= (d->cfc >> 2)) { + rfcomm_send_credits(d->session, d->addr, d->cfc - d->rx_credits); + d->rx_credits = d->cfc; + } + } else { + /* CFC disabled. + * Give ourselves some credits */ + d->tx_credits = 5; + } + + if (test_bit(RFCOMM_TX_THROTTLED, &d->flags)) + return skb_queue_len(&d->tx_queue); + + while (d->tx_credits && (skb = skb_dequeue(&d->tx_queue))) { + err = rfcomm_send_frame(d->session, skb->data, skb->len); + if (err < 0) { + skb_queue_head(&d->tx_queue, skb); + break; + } + kfree_skb(skb); + d->tx_credits--; + } + + if (d->cfc && !d->tx_credits) { + /* We're out of TX credits. + * Set TX_THROTTLED flag to avoid unnesary wakeups by dlc_send. */ + set_bit(RFCOMM_TX_THROTTLED, &d->flags); + } + + return skb_queue_len(&d->tx_queue); +} + +static void rfcomm_process_dlcs(struct rfcomm_session *s) +{ + struct rfcomm_dlc *d, *n; + + BT_DBG("session %p state %ld", s, s->state); + + list_for_each_entry_safe(d, n, &s->dlcs, list) { + if (test_bit(RFCOMM_TIMED_OUT, &d->flags)) { + __rfcomm_dlc_close(d, ETIMEDOUT); + continue; + } + + if (test_bit(RFCOMM_ENC_DROP, &d->flags)) { + __rfcomm_dlc_close(d, ECONNREFUSED); + continue; + } + + if (test_and_clear_bit(RFCOMM_AUTH_ACCEPT, &d->flags)) { + rfcomm_dlc_clear_timer(d); + if (d->out) { + rfcomm_send_pn(s, 1, d); + rfcomm_dlc_set_timer(d, RFCOMM_CONN_TIMEOUT); + } else { + if (d->defer_setup) { + set_bit(RFCOMM_DEFER_SETUP, &d->flags); + rfcomm_dlc_set_timer(d, RFCOMM_AUTH_TIMEOUT); + + rfcomm_dlc_lock(d); + d->state = BT_CONNECT2; + d->state_change(d, 0); + rfcomm_dlc_unlock(d); + } else + rfcomm_dlc_accept(d); + } + continue; + } else if (test_and_clear_bit(RFCOMM_AUTH_REJECT, &d->flags)) { + rfcomm_dlc_clear_timer(d); + if (!d->out) + rfcomm_send_dm(s, d->dlci); + else + d->state = BT_CLOSED; + __rfcomm_dlc_close(d, ECONNREFUSED); + continue; + } + + if (test_bit(RFCOMM_SEC_PENDING, &d->flags)) + continue; + + if (test_bit(RFCOMM_TX_THROTTLED, &s->flags)) + continue; + + if ((d->state == BT_CONNECTED || d->state == BT_DISCONN) && + d->mscex == RFCOMM_MSCEX_OK) + rfcomm_process_tx(d); + } +} + +static struct rfcomm_session *rfcomm_process_rx(struct rfcomm_session *s) +{ + struct socket *sock = s->sock; + struct sock *sk = sock->sk; + struct sk_buff *skb; + + BT_DBG("session %p state %ld qlen %d", s, s->state, skb_queue_len(&sk->sk_receive_queue)); + + /* Get data directly from socket receive queue without copying it. */ + while ((skb = skb_dequeue(&sk->sk_receive_queue))) { + skb_orphan(skb); + if (!skb_linearize(skb)) { + s = rfcomm_recv_frame(s, skb); + if (!s) + break; + } else { + kfree_skb(skb); + } + } + + if (s && (sk->sk_state == BT_CLOSED)) + s = rfcomm_session_close(s, sk->sk_err); + + return s; +} + +static void rfcomm_accept_connection(struct rfcomm_session *s) +{ + struct socket *sock = s->sock, *nsock; + int err; + + /* Fast check for a new connection. + * Avoids unnesesary socket allocations. */ + if (list_empty(&bt_sk(sock->sk)->accept_q)) + return; + + BT_DBG("session %p", s); + + err = kernel_accept(sock, &nsock, O_NONBLOCK); + if (err < 0) + return; + + /* Set our callbacks */ + nsock->sk->sk_data_ready = rfcomm_l2data_ready; + nsock->sk->sk_state_change = rfcomm_l2state_change; + + s = rfcomm_session_add(nsock, BT_OPEN); + if (s) { + /* We should adjust MTU on incoming sessions. + * L2CAP MTU minus UIH header and FCS. */ + s->mtu = min(l2cap_pi(nsock->sk)->chan->omtu, + l2cap_pi(nsock->sk)->chan->imtu) - 5; + + rfcomm_schedule(); + } else + sock_release(nsock); +} + +static struct rfcomm_session *rfcomm_check_connection(struct rfcomm_session *s) +{ + struct sock *sk = s->sock->sk; + + BT_DBG("%p state %ld", s, s->state); + + switch (sk->sk_state) { + case BT_CONNECTED: + s->state = BT_CONNECT; + + /* We can adjust MTU on outgoing sessions. + * L2CAP MTU minus UIH header and FCS. */ + s->mtu = min(l2cap_pi(sk)->chan->omtu, l2cap_pi(sk)->chan->imtu) - 5; + + rfcomm_send_sabm(s, 0); + break; + + case BT_CLOSED: + s = rfcomm_session_close(s, sk->sk_err); + break; + } + return s; +} + +static void rfcomm_process_sessions(void) +{ + struct rfcomm_session *s, *n; + + rfcomm_lock(); + + list_for_each_entry_safe(s, n, &session_list, list) { + if (test_and_clear_bit(RFCOMM_TIMED_OUT, &s->flags)) { + s->state = BT_DISCONN; + rfcomm_send_disc(s, 0); + continue; + } + + switch (s->state) { + case BT_LISTEN: + rfcomm_accept_connection(s); + continue; + + case BT_BOUND: + s = rfcomm_check_connection(s); + break; + + default: + s = rfcomm_process_rx(s); + break; + } + + if (s) + rfcomm_process_dlcs(s); + } + + rfcomm_unlock(); +} + +static int rfcomm_add_listener(bdaddr_t *ba) +{ + struct sockaddr_l2 addr; + struct socket *sock; + struct sock *sk; + struct rfcomm_session *s; + int err = 0; + + /* Create socket */ + err = rfcomm_l2sock_create(&sock); + if (err < 0) { + BT_ERR("Create socket failed %d", err); + return err; + } + + /* Bind socket */ + bacpy(&addr.l2_bdaddr, ba); + addr.l2_family = AF_BLUETOOTH; + addr.l2_psm = cpu_to_le16(L2CAP_PSM_RFCOMM); + addr.l2_cid = 0; + addr.l2_bdaddr_type = BDADDR_BREDR; + err = kernel_bind(sock, (struct sockaddr *) &addr, sizeof(addr)); + if (err < 0) { + BT_ERR("Bind failed %d", err); + goto failed; + } + + /* Set L2CAP options */ + sk = sock->sk; + lock_sock(sk); + /* Set MTU to 0 so L2CAP can auto select the MTU */ + l2cap_pi(sk)->chan->imtu = 0; + release_sock(sk); + + /* Start listening on the socket */ + err = kernel_listen(sock, 10); + if (err) { + BT_ERR("Listen failed %d", err); + goto failed; + } + + /* Add listening session */ + s = rfcomm_session_add(sock, BT_LISTEN); + if (!s) { + err = -ENOMEM; + goto failed; + } + + return 0; +failed: + sock_release(sock); + return err; +} + +static void rfcomm_kill_listener(void) +{ + struct rfcomm_session *s, *n; + + BT_DBG(""); + + list_for_each_entry_safe(s, n, &session_list, list) + rfcomm_session_del(s); +} + +static int rfcomm_run(void *unused) +{ + DEFINE_WAIT_FUNC(wait, woken_wake_function); + BT_DBG(""); + + set_user_nice(current, -10); + + rfcomm_add_listener(BDADDR_ANY); + + add_wait_queue(&rfcomm_wq, &wait); + while (!kthread_should_stop()) { + + /* Process stuff */ + rfcomm_process_sessions(); + + wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); + } + remove_wait_queue(&rfcomm_wq, &wait); + + rfcomm_kill_listener(); + + return 0; +} + +static void rfcomm_security_cfm(struct hci_conn *conn, u8 status, u8 encrypt) +{ + struct rfcomm_session *s; + struct rfcomm_dlc *d, *n; + + BT_DBG("conn %p status 0x%02x encrypt 0x%02x", conn, status, encrypt); + + s = rfcomm_session_get(&conn->hdev->bdaddr, &conn->dst); + if (!s) + return; + + list_for_each_entry_safe(d, n, &s->dlcs, list) { + if (test_and_clear_bit(RFCOMM_SEC_PENDING, &d->flags)) { + rfcomm_dlc_clear_timer(d); + if (status || encrypt == 0x00) { + set_bit(RFCOMM_ENC_DROP, &d->flags); + continue; + } + } + + if (d->state == BT_CONNECTED && !status && encrypt == 0x00) { + if (d->sec_level == BT_SECURITY_MEDIUM) { + set_bit(RFCOMM_SEC_PENDING, &d->flags); + rfcomm_dlc_set_timer(d, RFCOMM_AUTH_TIMEOUT); + continue; + } else if (d->sec_level == BT_SECURITY_HIGH || + d->sec_level == BT_SECURITY_FIPS) { + set_bit(RFCOMM_ENC_DROP, &d->flags); + continue; + } + } + + if (!test_and_clear_bit(RFCOMM_AUTH_PENDING, &d->flags)) + continue; + + if (!status && hci_conn_check_secure(conn, d->sec_level)) + set_bit(RFCOMM_AUTH_ACCEPT, &d->flags); + else + set_bit(RFCOMM_AUTH_REJECT, &d->flags); + } + + rfcomm_schedule(); +} + +static struct hci_cb rfcomm_cb = { + .name = "RFCOMM", + .security_cfm = rfcomm_security_cfm +}; + +static int rfcomm_dlc_debugfs_show(struct seq_file *f, void *x) +{ + struct rfcomm_session *s; + + rfcomm_lock(); + + list_for_each_entry(s, &session_list, list) { + struct l2cap_chan *chan = l2cap_pi(s->sock->sk)->chan; + struct rfcomm_dlc *d; + list_for_each_entry(d, &s->dlcs, list) { + seq_printf(f, "%pMR %pMR %ld %d %d %d %d\n", + &chan->src, &chan->dst, + d->state, d->dlci, d->mtu, + d->rx_credits, d->tx_credits); + } + } + + rfcomm_unlock(); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(rfcomm_dlc_debugfs); + +static struct dentry *rfcomm_dlc_debugfs; + +/* ---- Initialization ---- */ +static int __init rfcomm_init(void) +{ + int err; + + hci_register_cb(&rfcomm_cb); + + rfcomm_thread = kthread_run(rfcomm_run, NULL, "krfcommd"); + if (IS_ERR(rfcomm_thread)) { + err = PTR_ERR(rfcomm_thread); + goto unregister; + } + + err = rfcomm_init_ttys(); + if (err < 0) + goto stop; + + err = rfcomm_init_sockets(); + if (err < 0) + goto cleanup; + + BT_INFO("RFCOMM ver %s", VERSION); + + if (IS_ERR_OR_NULL(bt_debugfs)) + return 0; + + rfcomm_dlc_debugfs = debugfs_create_file("rfcomm_dlc", 0444, + bt_debugfs, NULL, + &rfcomm_dlc_debugfs_fops); + + return 0; + +cleanup: + rfcomm_cleanup_ttys(); + +stop: + kthread_stop(rfcomm_thread); + +unregister: + hci_unregister_cb(&rfcomm_cb); + + return err; +} + +static void __exit rfcomm_exit(void) +{ + debugfs_remove(rfcomm_dlc_debugfs); + + hci_unregister_cb(&rfcomm_cb); + + kthread_stop(rfcomm_thread); + + rfcomm_cleanup_ttys(); + + rfcomm_cleanup_sockets(); +} + +module_init(rfcomm_init); +module_exit(rfcomm_exit); + +module_param(disable_cfc, bool, 0644); +MODULE_PARM_DESC(disable_cfc, "Disable credit based flow control"); + +module_param(channel_mtu, int, 0644); +MODULE_PARM_DESC(channel_mtu, "Default MTU for the RFCOMM channel"); + +module_param(l2cap_ertm, bool, 0644); +MODULE_PARM_DESC(l2cap_ertm, "Use L2CAP ERTM mode for connection"); + +MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>"); +MODULE_DESCRIPTION("Bluetooth RFCOMM ver " VERSION); +MODULE_VERSION(VERSION); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("bt-proto-3"); diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c new file mode 100644 index 000000000..4397e14ff --- /dev/null +++ b/net/bluetooth/rfcomm/sock.c @@ -0,0 +1,1093 @@ +/* + RFCOMM implementation for Linux Bluetooth stack (BlueZ). + Copyright (C) 2002 Maxim Krasnyansky <maxk@qualcomm.com> + Copyright (C) 2002 Marcel Holtmann <marcel@holtmann.org> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License version 2 as + published by the Free Software Foundation; + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + SOFTWARE IS DISCLAIMED. +*/ + +/* + * RFCOMM sockets. + */ +#include <linux/compat.h> +#include <linux/export.h> +#include <linux/debugfs.h> +#include <linux/sched/signal.h> + +#include <net/bluetooth/bluetooth.h> +#include <net/bluetooth/hci_core.h> +#include <net/bluetooth/l2cap.h> +#include <net/bluetooth/rfcomm.h> + +static const struct proto_ops rfcomm_sock_ops; + +static struct bt_sock_list rfcomm_sk_list = { + .lock = __RW_LOCK_UNLOCKED(rfcomm_sk_list.lock) +}; + +static void rfcomm_sock_close(struct sock *sk); +static void rfcomm_sock_kill(struct sock *sk); + +/* ---- DLC callbacks ---- + * + * called under rfcomm_dlc_lock() + */ +static void rfcomm_sk_data_ready(struct rfcomm_dlc *d, struct sk_buff *skb) +{ + struct sock *sk = d->owner; + if (!sk) + return; + + atomic_add(skb->len, &sk->sk_rmem_alloc); + skb_queue_tail(&sk->sk_receive_queue, skb); + sk->sk_data_ready(sk); + + if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) + rfcomm_dlc_throttle(d); +} + +static void rfcomm_sk_state_change(struct rfcomm_dlc *d, int err) +{ + struct sock *sk = d->owner, *parent; + + if (!sk) + return; + + BT_DBG("dlc %p state %ld err %d", d, d->state, err); + + lock_sock(sk); + + if (err) + sk->sk_err = err; + + sk->sk_state = d->state; + + parent = bt_sk(sk)->parent; + if (parent) { + if (d->state == BT_CLOSED) { + sock_set_flag(sk, SOCK_ZAPPED); + bt_accept_unlink(sk); + } + parent->sk_data_ready(parent); + } else { + if (d->state == BT_CONNECTED) + rfcomm_session_getaddr(d->session, + &rfcomm_pi(sk)->src, NULL); + sk->sk_state_change(sk); + } + + release_sock(sk); + + if (parent && sock_flag(sk, SOCK_ZAPPED)) { + /* We have to drop DLC lock here, otherwise + * rfcomm_sock_destruct() will dead lock. */ + rfcomm_dlc_unlock(d); + rfcomm_sock_kill(sk); + rfcomm_dlc_lock(d); + } +} + +/* ---- Socket functions ---- */ +static struct sock *__rfcomm_get_listen_sock_by_addr(u8 channel, bdaddr_t *src) +{ + struct sock *sk = NULL; + + sk_for_each(sk, &rfcomm_sk_list.head) { + if (rfcomm_pi(sk)->channel != channel) + continue; + + if (bacmp(&rfcomm_pi(sk)->src, src)) + continue; + + if (sk->sk_state == BT_BOUND || sk->sk_state == BT_LISTEN) + break; + } + + return sk ? sk : NULL; +} + +/* Find socket with channel and source bdaddr. + * Returns closest match. + */ +static struct sock *rfcomm_get_sock_by_channel(int state, u8 channel, bdaddr_t *src) +{ + struct sock *sk = NULL, *sk1 = NULL; + + read_lock(&rfcomm_sk_list.lock); + + sk_for_each(sk, &rfcomm_sk_list.head) { + if (state && sk->sk_state != state) + continue; + + if (rfcomm_pi(sk)->channel == channel) { + /* Exact match. */ + if (!bacmp(&rfcomm_pi(sk)->src, src)) + break; + + /* Closest match */ + if (!bacmp(&rfcomm_pi(sk)->src, BDADDR_ANY)) + sk1 = sk; + } + } + + read_unlock(&rfcomm_sk_list.lock); + + return sk ? sk : sk1; +} + +static void rfcomm_sock_destruct(struct sock *sk) +{ + struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc; + + BT_DBG("sk %p dlc %p", sk, d); + + skb_queue_purge(&sk->sk_receive_queue); + skb_queue_purge(&sk->sk_write_queue); + + rfcomm_dlc_lock(d); + rfcomm_pi(sk)->dlc = NULL; + + /* Detach DLC if it's owned by this socket */ + if (d->owner == sk) + d->owner = NULL; + rfcomm_dlc_unlock(d); + + rfcomm_dlc_put(d); +} + +static void rfcomm_sock_cleanup_listen(struct sock *parent) +{ + struct sock *sk; + + BT_DBG("parent %p", parent); + + /* Close not yet accepted dlcs */ + while ((sk = bt_accept_dequeue(parent, NULL))) { + rfcomm_sock_close(sk); + rfcomm_sock_kill(sk); + } + + parent->sk_state = BT_CLOSED; + sock_set_flag(parent, SOCK_ZAPPED); +} + +/* Kill socket (only if zapped and orphan) + * Must be called on unlocked socket. + */ +static void rfcomm_sock_kill(struct sock *sk) +{ + if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket) + return; + + BT_DBG("sk %p state %d refcnt %d", sk, sk->sk_state, refcount_read(&sk->sk_refcnt)); + + /* Kill poor orphan */ + bt_sock_unlink(&rfcomm_sk_list, sk); + sock_set_flag(sk, SOCK_DEAD); + sock_put(sk); +} + +static void __rfcomm_sock_close(struct sock *sk) +{ + struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc; + + BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket); + + switch (sk->sk_state) { + case BT_LISTEN: + rfcomm_sock_cleanup_listen(sk); + break; + + case BT_CONNECT: + case BT_CONNECT2: + case BT_CONFIG: + case BT_CONNECTED: + rfcomm_dlc_close(d, 0); + fallthrough; + + default: + sock_set_flag(sk, SOCK_ZAPPED); + break; + } +} + +/* Close socket. + * Must be called on unlocked socket. + */ +static void rfcomm_sock_close(struct sock *sk) +{ + lock_sock(sk); + __rfcomm_sock_close(sk); + release_sock(sk); +} + +static void rfcomm_sock_init(struct sock *sk, struct sock *parent) +{ + struct rfcomm_pinfo *pi = rfcomm_pi(sk); + + BT_DBG("sk %p", sk); + + if (parent) { + sk->sk_type = parent->sk_type; + pi->dlc->defer_setup = test_bit(BT_SK_DEFER_SETUP, + &bt_sk(parent)->flags); + + pi->sec_level = rfcomm_pi(parent)->sec_level; + pi->role_switch = rfcomm_pi(parent)->role_switch; + + security_sk_clone(parent, sk); + } else { + pi->dlc->defer_setup = 0; + + pi->sec_level = BT_SECURITY_LOW; + pi->role_switch = 0; + } + + pi->dlc->sec_level = pi->sec_level; + pi->dlc->role_switch = pi->role_switch; +} + +static struct proto rfcomm_proto = { + .name = "RFCOMM", + .owner = THIS_MODULE, + .obj_size = sizeof(struct rfcomm_pinfo) +}; + +static struct sock *rfcomm_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio, int kern) +{ + struct rfcomm_dlc *d; + struct sock *sk; + + sk = sk_alloc(net, PF_BLUETOOTH, prio, &rfcomm_proto, kern); + if (!sk) + return NULL; + + sock_init_data(sock, sk); + INIT_LIST_HEAD(&bt_sk(sk)->accept_q); + + d = rfcomm_dlc_alloc(prio); + if (!d) { + sk_free(sk); + return NULL; + } + + d->data_ready = rfcomm_sk_data_ready; + d->state_change = rfcomm_sk_state_change; + + rfcomm_pi(sk)->dlc = d; + d->owner = sk; + + sk->sk_destruct = rfcomm_sock_destruct; + sk->sk_sndtimeo = RFCOMM_CONN_TIMEOUT; + + sk->sk_sndbuf = RFCOMM_MAX_CREDITS * RFCOMM_DEFAULT_MTU * 10; + sk->sk_rcvbuf = RFCOMM_MAX_CREDITS * RFCOMM_DEFAULT_MTU * 10; + + sock_reset_flag(sk, SOCK_ZAPPED); + + sk->sk_protocol = proto; + sk->sk_state = BT_OPEN; + + bt_sock_link(&rfcomm_sk_list, sk); + + BT_DBG("sk %p", sk); + return sk; +} + +static int rfcomm_sock_create(struct net *net, struct socket *sock, + int protocol, int kern) +{ + struct sock *sk; + + BT_DBG("sock %p", sock); + + sock->state = SS_UNCONNECTED; + + if (sock->type != SOCK_STREAM && sock->type != SOCK_RAW) + return -ESOCKTNOSUPPORT; + + sock->ops = &rfcomm_sock_ops; + + sk = rfcomm_sock_alloc(net, sock, protocol, GFP_ATOMIC, kern); + if (!sk) + return -ENOMEM; + + rfcomm_sock_init(sk, NULL); + return 0; +} + +static int rfcomm_sock_bind(struct socket *sock, struct sockaddr *addr, int addr_len) +{ + struct sockaddr_rc sa; + struct sock *sk = sock->sk; + int len, err = 0; + + if (!addr || addr_len < offsetofend(struct sockaddr, sa_family) || + addr->sa_family != AF_BLUETOOTH) + return -EINVAL; + + memset(&sa, 0, sizeof(sa)); + len = min_t(unsigned int, sizeof(sa), addr_len); + memcpy(&sa, addr, len); + + BT_DBG("sk %p %pMR", sk, &sa.rc_bdaddr); + + lock_sock(sk); + + if (sk->sk_state != BT_OPEN) { + err = -EBADFD; + goto done; + } + + if (sk->sk_type != SOCK_STREAM) { + err = -EINVAL; + goto done; + } + + write_lock(&rfcomm_sk_list.lock); + + if (sa.rc_channel && + __rfcomm_get_listen_sock_by_addr(sa.rc_channel, &sa.rc_bdaddr)) { + err = -EADDRINUSE; + } else { + /* Save source address */ + bacpy(&rfcomm_pi(sk)->src, &sa.rc_bdaddr); + rfcomm_pi(sk)->channel = sa.rc_channel; + sk->sk_state = BT_BOUND; + } + + write_unlock(&rfcomm_sk_list.lock); + +done: + release_sock(sk); + return err; +} + +static int rfcomm_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags) +{ + struct sockaddr_rc *sa = (struct sockaddr_rc *) addr; + struct sock *sk = sock->sk; + struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc; + int err = 0; + + BT_DBG("sk %p", sk); + + if (alen < sizeof(struct sockaddr_rc) || + addr->sa_family != AF_BLUETOOTH) + return -EINVAL; + + sock_hold(sk); + lock_sock(sk); + + if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND) { + err = -EBADFD; + goto done; + } + + if (sk->sk_type != SOCK_STREAM) { + err = -EINVAL; + goto done; + } + + sk->sk_state = BT_CONNECT; + bacpy(&rfcomm_pi(sk)->dst, &sa->rc_bdaddr); + rfcomm_pi(sk)->channel = sa->rc_channel; + + d->sec_level = rfcomm_pi(sk)->sec_level; + d->role_switch = rfcomm_pi(sk)->role_switch; + + /* Drop sock lock to avoid potential deadlock with the RFCOMM lock */ + release_sock(sk); + err = rfcomm_dlc_open(d, &rfcomm_pi(sk)->src, &sa->rc_bdaddr, + sa->rc_channel); + lock_sock(sk); + if (!err && !sock_flag(sk, SOCK_ZAPPED)) + err = bt_sock_wait_state(sk, BT_CONNECTED, + sock_sndtimeo(sk, flags & O_NONBLOCK)); + +done: + release_sock(sk); + sock_put(sk); + return err; +} + +static int rfcomm_sock_listen(struct socket *sock, int backlog) +{ + struct sock *sk = sock->sk; + int err = 0; + + BT_DBG("sk %p backlog %d", sk, backlog); + + lock_sock(sk); + + if (sk->sk_state != BT_BOUND) { + err = -EBADFD; + goto done; + } + + if (sk->sk_type != SOCK_STREAM) { + err = -EINVAL; + goto done; + } + + if (!rfcomm_pi(sk)->channel) { + bdaddr_t *src = &rfcomm_pi(sk)->src; + u8 channel; + + err = -EINVAL; + + write_lock(&rfcomm_sk_list.lock); + + for (channel = 1; channel < 31; channel++) + if (!__rfcomm_get_listen_sock_by_addr(channel, src)) { + rfcomm_pi(sk)->channel = channel; + err = 0; + break; + } + + write_unlock(&rfcomm_sk_list.lock); + + if (err < 0) + goto done; + } + + sk->sk_max_ack_backlog = backlog; + sk->sk_ack_backlog = 0; + sk->sk_state = BT_LISTEN; + +done: + release_sock(sk); + return err; +} + +static int rfcomm_sock_accept(struct socket *sock, struct socket *newsock, int flags, + bool kern) +{ + DEFINE_WAIT_FUNC(wait, woken_wake_function); + struct sock *sk = sock->sk, *nsk; + long timeo; + int err = 0; + + lock_sock_nested(sk, SINGLE_DEPTH_NESTING); + + if (sk->sk_type != SOCK_STREAM) { + err = -EINVAL; + goto done; + } + + timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); + + BT_DBG("sk %p timeo %ld", sk, timeo); + + /* Wait for an incoming connection. (wake-one). */ + add_wait_queue_exclusive(sk_sleep(sk), &wait); + while (1) { + if (sk->sk_state != BT_LISTEN) { + err = -EBADFD; + break; + } + + nsk = bt_accept_dequeue(sk, newsock); + if (nsk) + break; + + if (!timeo) { + err = -EAGAIN; + break; + } + + if (signal_pending(current)) { + err = sock_intr_errno(timeo); + break; + } + + release_sock(sk); + + timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo); + + lock_sock_nested(sk, SINGLE_DEPTH_NESTING); + } + remove_wait_queue(sk_sleep(sk), &wait); + + if (err) + goto done; + + newsock->state = SS_CONNECTED; + + BT_DBG("new socket %p", nsk); + +done: + release_sock(sk); + return err; +} + +static int rfcomm_sock_getname(struct socket *sock, struct sockaddr *addr, int peer) +{ + struct sockaddr_rc *sa = (struct sockaddr_rc *) addr; + struct sock *sk = sock->sk; + + BT_DBG("sock %p, sk %p", sock, sk); + + if (peer && sk->sk_state != BT_CONNECTED && + sk->sk_state != BT_CONNECT && sk->sk_state != BT_CONNECT2) + return -ENOTCONN; + + memset(sa, 0, sizeof(*sa)); + sa->rc_family = AF_BLUETOOTH; + sa->rc_channel = rfcomm_pi(sk)->channel; + if (peer) + bacpy(&sa->rc_bdaddr, &rfcomm_pi(sk)->dst); + else + bacpy(&sa->rc_bdaddr, &rfcomm_pi(sk)->src); + + return sizeof(struct sockaddr_rc); +} + +static int rfcomm_sock_sendmsg(struct socket *sock, struct msghdr *msg, + size_t len) +{ + struct sock *sk = sock->sk; + struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc; + struct sk_buff *skb; + int sent; + + if (test_bit(RFCOMM_DEFER_SETUP, &d->flags)) + return -ENOTCONN; + + if (msg->msg_flags & MSG_OOB) + return -EOPNOTSUPP; + + if (sk->sk_shutdown & SEND_SHUTDOWN) + return -EPIPE; + + BT_DBG("sock %p, sk %p", sock, sk); + + lock_sock(sk); + + sent = bt_sock_wait_ready(sk, msg->msg_flags); + + release_sock(sk); + + if (sent) + return sent; + + skb = bt_skb_sendmmsg(sk, msg, len, d->mtu, RFCOMM_SKB_HEAD_RESERVE, + RFCOMM_SKB_TAIL_RESERVE); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + sent = rfcomm_dlc_send(d, skb); + if (sent < 0) + kfree_skb(skb); + + return sent; +} + +static int rfcomm_sock_recvmsg(struct socket *sock, struct msghdr *msg, + size_t size, int flags) +{ + struct sock *sk = sock->sk; + struct rfcomm_dlc *d = rfcomm_pi(sk)->dlc; + int len; + + if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) { + rfcomm_dlc_accept(d); + return 0; + } + + len = bt_sock_stream_recvmsg(sock, msg, size, flags); + + lock_sock(sk); + if (!(flags & MSG_PEEK) && len > 0) + atomic_sub(len, &sk->sk_rmem_alloc); + + if (atomic_read(&sk->sk_rmem_alloc) <= (sk->sk_rcvbuf >> 2)) + rfcomm_dlc_unthrottle(rfcomm_pi(sk)->dlc); + release_sock(sk); + + return len; +} + +static int rfcomm_sock_setsockopt_old(struct socket *sock, int optname, + sockptr_t optval, unsigned int optlen) +{ + struct sock *sk = sock->sk; + int err = 0; + u32 opt; + + BT_DBG("sk %p", sk); + + lock_sock(sk); + + switch (optname) { + case RFCOMM_LM: + if (copy_from_sockptr(&opt, optval, sizeof(u32))) { + err = -EFAULT; + break; + } + + if (opt & RFCOMM_LM_FIPS) { + err = -EINVAL; + break; + } + + if (opt & RFCOMM_LM_AUTH) + rfcomm_pi(sk)->sec_level = BT_SECURITY_LOW; + if (opt & RFCOMM_LM_ENCRYPT) + rfcomm_pi(sk)->sec_level = BT_SECURITY_MEDIUM; + if (opt & RFCOMM_LM_SECURE) + rfcomm_pi(sk)->sec_level = BT_SECURITY_HIGH; + + rfcomm_pi(sk)->role_switch = (opt & RFCOMM_LM_MASTER); + break; + + default: + err = -ENOPROTOOPT; + break; + } + + release_sock(sk); + return err; +} + +static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, + sockptr_t optval, unsigned int optlen) +{ + struct sock *sk = sock->sk; + struct bt_security sec; + int err = 0; + size_t len; + u32 opt; + + BT_DBG("sk %p", sk); + + if (level == SOL_RFCOMM) + return rfcomm_sock_setsockopt_old(sock, optname, optval, optlen); + + if (level != SOL_BLUETOOTH) + return -ENOPROTOOPT; + + lock_sock(sk); + + switch (optname) { + case BT_SECURITY: + if (sk->sk_type != SOCK_STREAM) { + err = -EINVAL; + break; + } + + sec.level = BT_SECURITY_LOW; + + len = min_t(unsigned int, sizeof(sec), optlen); + if (copy_from_sockptr(&sec, optval, len)) { + err = -EFAULT; + break; + } + + if (sec.level > BT_SECURITY_HIGH) { + err = -EINVAL; + break; + } + + rfcomm_pi(sk)->sec_level = sec.level; + break; + + case BT_DEFER_SETUP: + if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) { + err = -EINVAL; + break; + } + + if (copy_from_sockptr(&opt, optval, sizeof(u32))) { + err = -EFAULT; + break; + } + + if (opt) + set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags); + else + clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags); + + break; + + default: + err = -ENOPROTOOPT; + break; + } + + release_sock(sk); + return err; +} + +static int rfcomm_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen) +{ + struct sock *sk = sock->sk; + struct sock *l2cap_sk; + struct l2cap_conn *conn; + struct rfcomm_conninfo cinfo; + int len, err = 0; + u32 opt; + + BT_DBG("sk %p", sk); + + if (get_user(len, optlen)) + return -EFAULT; + + lock_sock(sk); + + switch (optname) { + case RFCOMM_LM: + switch (rfcomm_pi(sk)->sec_level) { + case BT_SECURITY_LOW: + opt = RFCOMM_LM_AUTH; + break; + case BT_SECURITY_MEDIUM: + opt = RFCOMM_LM_AUTH | RFCOMM_LM_ENCRYPT; + break; + case BT_SECURITY_HIGH: + opt = RFCOMM_LM_AUTH | RFCOMM_LM_ENCRYPT | + RFCOMM_LM_SECURE; + break; + case BT_SECURITY_FIPS: + opt = RFCOMM_LM_AUTH | RFCOMM_LM_ENCRYPT | + RFCOMM_LM_SECURE | RFCOMM_LM_FIPS; + break; + default: + opt = 0; + break; + } + + if (rfcomm_pi(sk)->role_switch) + opt |= RFCOMM_LM_MASTER; + + if (put_user(opt, (u32 __user *) optval)) + err = -EFAULT; + + break; + + case RFCOMM_CONNINFO: + if (sk->sk_state != BT_CONNECTED && + !rfcomm_pi(sk)->dlc->defer_setup) { + err = -ENOTCONN; + break; + } + + l2cap_sk = rfcomm_pi(sk)->dlc->session->sock->sk; + conn = l2cap_pi(l2cap_sk)->chan->conn; + + memset(&cinfo, 0, sizeof(cinfo)); + cinfo.hci_handle = conn->hcon->handle; + memcpy(cinfo.dev_class, conn->hcon->dev_class, 3); + + len = min_t(unsigned int, len, sizeof(cinfo)); + if (copy_to_user(optval, (char *) &cinfo, len)) + err = -EFAULT; + + break; + + default: + err = -ENOPROTOOPT; + break; + } + + release_sock(sk); + return err; +} + +static int rfcomm_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) +{ + struct sock *sk = sock->sk; + struct bt_security sec; + int len, err = 0; + + BT_DBG("sk %p", sk); + + if (level == SOL_RFCOMM) + return rfcomm_sock_getsockopt_old(sock, optname, optval, optlen); + + if (level != SOL_BLUETOOTH) + return -ENOPROTOOPT; + + if (get_user(len, optlen)) + return -EFAULT; + + lock_sock(sk); + + switch (optname) { + case BT_SECURITY: + if (sk->sk_type != SOCK_STREAM) { + err = -EINVAL; + break; + } + + sec.level = rfcomm_pi(sk)->sec_level; + sec.key_size = 0; + + len = min_t(unsigned int, len, sizeof(sec)); + if (copy_to_user(optval, (char *) &sec, len)) + err = -EFAULT; + + break; + + case BT_DEFER_SETUP: + if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) { + err = -EINVAL; + break; + } + + if (put_user(test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags), + (u32 __user *) optval)) + err = -EFAULT; + + break; + + default: + err = -ENOPROTOOPT; + break; + } + + release_sock(sk); + return err; +} + +static int rfcomm_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) +{ + struct sock *sk __maybe_unused = sock->sk; + int err; + + BT_DBG("sk %p cmd %x arg %lx", sk, cmd, arg); + + err = bt_sock_ioctl(sock, cmd, arg); + + if (err == -ENOIOCTLCMD) { +#ifdef CONFIG_BT_RFCOMM_TTY + lock_sock(sk); + err = rfcomm_dev_ioctl(sk, cmd, (void __user *) arg); + release_sock(sk); +#else + err = -EOPNOTSUPP; +#endif + } + + return err; +} + +#ifdef CONFIG_COMPAT +static int rfcomm_sock_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) +{ + return rfcomm_sock_ioctl(sock, cmd, (unsigned long)compat_ptr(arg)); +} +#endif + +static int rfcomm_sock_shutdown(struct socket *sock, int how) +{ + struct sock *sk = sock->sk; + int err = 0; + + BT_DBG("sock %p, sk %p", sock, sk); + + if (!sk) + return 0; + + lock_sock(sk); + if (!sk->sk_shutdown) { + sk->sk_shutdown = SHUTDOWN_MASK; + + release_sock(sk); + __rfcomm_sock_close(sk); + lock_sock(sk); + + if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime && + !(current->flags & PF_EXITING)) + err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime); + } + release_sock(sk); + return err; +} + +static int rfcomm_sock_release(struct socket *sock) +{ + struct sock *sk = sock->sk; + int err; + + BT_DBG("sock %p, sk %p", sock, sk); + + if (!sk) + return 0; + + err = rfcomm_sock_shutdown(sock, 2); + + sock_orphan(sk); + rfcomm_sock_kill(sk); + return err; +} + +/* ---- RFCOMM core layer callbacks ---- + * + * called under rfcomm_lock() + */ +int rfcomm_connect_ind(struct rfcomm_session *s, u8 channel, struct rfcomm_dlc **d) +{ + struct sock *sk, *parent; + bdaddr_t src, dst; + int result = 0; + + BT_DBG("session %p channel %d", s, channel); + + rfcomm_session_getaddr(s, &src, &dst); + + /* Check if we have socket listening on channel */ + parent = rfcomm_get_sock_by_channel(BT_LISTEN, channel, &src); + if (!parent) + return 0; + + lock_sock(parent); + + /* Check for backlog size */ + if (sk_acceptq_is_full(parent)) { + BT_DBG("backlog full %d", parent->sk_ack_backlog); + goto done; + } + + sk = rfcomm_sock_alloc(sock_net(parent), NULL, BTPROTO_RFCOMM, GFP_ATOMIC, 0); + if (!sk) + goto done; + + bt_sock_reclassify_lock(sk, BTPROTO_RFCOMM); + + rfcomm_sock_init(sk, parent); + bacpy(&rfcomm_pi(sk)->src, &src); + bacpy(&rfcomm_pi(sk)->dst, &dst); + rfcomm_pi(sk)->channel = channel; + + sk->sk_state = BT_CONFIG; + bt_accept_enqueue(parent, sk, true); + + /* Accept connection and return socket DLC */ + *d = rfcomm_pi(sk)->dlc; + result = 1; + +done: + release_sock(parent); + + if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags)) + parent->sk_state_change(parent); + + return result; +} + +static int rfcomm_sock_debugfs_show(struct seq_file *f, void *p) +{ + struct sock *sk; + + read_lock(&rfcomm_sk_list.lock); + + sk_for_each(sk, &rfcomm_sk_list.head) { + seq_printf(f, "%pMR %pMR %d %d\n", + &rfcomm_pi(sk)->src, &rfcomm_pi(sk)->dst, + sk->sk_state, rfcomm_pi(sk)->channel); + } + + read_unlock(&rfcomm_sk_list.lock); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(rfcomm_sock_debugfs); + +static struct dentry *rfcomm_sock_debugfs; + +static const struct proto_ops rfcomm_sock_ops = { + .family = PF_BLUETOOTH, + .owner = THIS_MODULE, + .release = rfcomm_sock_release, + .bind = rfcomm_sock_bind, + .connect = rfcomm_sock_connect, + .listen = rfcomm_sock_listen, + .accept = rfcomm_sock_accept, + .getname = rfcomm_sock_getname, + .sendmsg = rfcomm_sock_sendmsg, + .recvmsg = rfcomm_sock_recvmsg, + .shutdown = rfcomm_sock_shutdown, + .setsockopt = rfcomm_sock_setsockopt, + .getsockopt = rfcomm_sock_getsockopt, + .ioctl = rfcomm_sock_ioctl, + .gettstamp = sock_gettstamp, + .poll = bt_sock_poll, + .socketpair = sock_no_socketpair, + .mmap = sock_no_mmap, +#ifdef CONFIG_COMPAT + .compat_ioctl = rfcomm_sock_compat_ioctl, +#endif +}; + +static const struct net_proto_family rfcomm_sock_family_ops = { + .family = PF_BLUETOOTH, + .owner = THIS_MODULE, + .create = rfcomm_sock_create +}; + +int __init rfcomm_init_sockets(void) +{ + int err; + + BUILD_BUG_ON(sizeof(struct sockaddr_rc) > sizeof(struct sockaddr)); + + err = proto_register(&rfcomm_proto, 0); + if (err < 0) + return err; + + err = bt_sock_register(BTPROTO_RFCOMM, &rfcomm_sock_family_ops); + if (err < 0) { + BT_ERR("RFCOMM socket layer registration failed"); + goto error; + } + + err = bt_procfs_init(&init_net, "rfcomm", &rfcomm_sk_list, NULL); + if (err < 0) { + BT_ERR("Failed to create RFCOMM proc file"); + bt_sock_unregister(BTPROTO_RFCOMM); + goto error; + } + + BT_INFO("RFCOMM socket layer initialized"); + + if (IS_ERR_OR_NULL(bt_debugfs)) + return 0; + + rfcomm_sock_debugfs = debugfs_create_file("rfcomm", 0444, + bt_debugfs, NULL, + &rfcomm_sock_debugfs_fops); + + return 0; + +error: + proto_unregister(&rfcomm_proto); + return err; +} + +void __exit rfcomm_cleanup_sockets(void) +{ + bt_procfs_cleanup(&init_net, "rfcomm"); + + debugfs_remove(rfcomm_sock_debugfs); + + bt_sock_unregister(BTPROTO_RFCOMM); + + proto_unregister(&rfcomm_proto); +} diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c new file mode 100644 index 000000000..8009e0e93 --- /dev/null +++ b/net/bluetooth/rfcomm/tty.c @@ -0,0 +1,1162 @@ +/* + RFCOMM implementation for Linux Bluetooth stack (BlueZ). + Copyright (C) 2002 Maxim Krasnyansky <maxk@qualcomm.com> + Copyright (C) 2002 Marcel Holtmann <marcel@holtmann.org> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License version 2 as + published by the Free Software Foundation; + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + SOFTWARE IS DISCLAIMED. +*/ + +/* + * RFCOMM TTY. + */ + +#include <linux/module.h> + +#include <linux/tty.h> +#include <linux/tty_driver.h> +#include <linux/tty_flip.h> + +#include <net/bluetooth/bluetooth.h> +#include <net/bluetooth/hci_core.h> +#include <net/bluetooth/rfcomm.h> + +#define RFCOMM_TTY_PORTS RFCOMM_MAX_DEV /* whole lotta rfcomm devices */ +#define RFCOMM_TTY_MAJOR 216 /* device node major id of the usb/bluetooth.c driver */ +#define RFCOMM_TTY_MINOR 0 + +static DEFINE_MUTEX(rfcomm_ioctl_mutex); +static struct tty_driver *rfcomm_tty_driver; + +struct rfcomm_dev { + struct tty_port port; + struct list_head list; + + char name[12]; + int id; + unsigned long flags; + int err; + + unsigned long status; /* don't export to userspace */ + + bdaddr_t src; + bdaddr_t dst; + u8 channel; + + uint modem_status; + + struct rfcomm_dlc *dlc; + + struct device *tty_dev; + + atomic_t wmem_alloc; + + struct sk_buff_head pending; +}; + +static LIST_HEAD(rfcomm_dev_list); +static DEFINE_MUTEX(rfcomm_dev_lock); + +static void rfcomm_dev_data_ready(struct rfcomm_dlc *dlc, struct sk_buff *skb); +static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err); +static void rfcomm_dev_modem_status(struct rfcomm_dlc *dlc, u8 v24_sig); + +/* ---- Device functions ---- */ + +static void rfcomm_dev_destruct(struct tty_port *port) +{ + struct rfcomm_dev *dev = container_of(port, struct rfcomm_dev, port); + struct rfcomm_dlc *dlc = dev->dlc; + + BT_DBG("dev %p dlc %p", dev, dlc); + + rfcomm_dlc_lock(dlc); + /* Detach DLC if it's owned by this dev */ + if (dlc->owner == dev) + dlc->owner = NULL; + rfcomm_dlc_unlock(dlc); + + rfcomm_dlc_put(dlc); + + if (dev->tty_dev) + tty_unregister_device(rfcomm_tty_driver, dev->id); + + mutex_lock(&rfcomm_dev_lock); + list_del(&dev->list); + mutex_unlock(&rfcomm_dev_lock); + + kfree(dev); + + /* It's safe to call module_put() here because socket still + holds reference to this module. */ + module_put(THIS_MODULE); +} + +/* device-specific initialization: open the dlc */ +static int rfcomm_dev_activate(struct tty_port *port, struct tty_struct *tty) +{ + struct rfcomm_dev *dev = container_of(port, struct rfcomm_dev, port); + int err; + + err = rfcomm_dlc_open(dev->dlc, &dev->src, &dev->dst, dev->channel); + if (err) + set_bit(TTY_IO_ERROR, &tty->flags); + return err; +} + +/* we block the open until the dlc->state becomes BT_CONNECTED */ +static int rfcomm_dev_carrier_raised(struct tty_port *port) +{ + struct rfcomm_dev *dev = container_of(port, struct rfcomm_dev, port); + + return (dev->dlc->state == BT_CONNECTED); +} + +/* device-specific cleanup: close the dlc */ +static void rfcomm_dev_shutdown(struct tty_port *port) +{ + struct rfcomm_dev *dev = container_of(port, struct rfcomm_dev, port); + + if (dev->tty_dev->parent) + device_move(dev->tty_dev, NULL, DPM_ORDER_DEV_LAST); + + /* close the dlc */ + rfcomm_dlc_close(dev->dlc, 0); +} + +static const struct tty_port_operations rfcomm_port_ops = { + .destruct = rfcomm_dev_destruct, + .activate = rfcomm_dev_activate, + .shutdown = rfcomm_dev_shutdown, + .carrier_raised = rfcomm_dev_carrier_raised, +}; + +static struct rfcomm_dev *__rfcomm_dev_lookup(int id) +{ + struct rfcomm_dev *dev; + + list_for_each_entry(dev, &rfcomm_dev_list, list) + if (dev->id == id) + return dev; + + return NULL; +} + +static struct rfcomm_dev *rfcomm_dev_get(int id) +{ + struct rfcomm_dev *dev; + + mutex_lock(&rfcomm_dev_lock); + + dev = __rfcomm_dev_lookup(id); + + if (dev && !tty_port_get(&dev->port)) + dev = NULL; + + mutex_unlock(&rfcomm_dev_lock); + + return dev; +} + +static void rfcomm_reparent_device(struct rfcomm_dev *dev) +{ + struct hci_dev *hdev; + struct hci_conn *conn; + + hdev = hci_get_route(&dev->dst, &dev->src, BDADDR_BREDR); + if (!hdev) + return; + + /* The lookup results are unsafe to access without the + * hci device lock (FIXME: why is this not documented?) + */ + hci_dev_lock(hdev); + conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &dev->dst); + + /* Just because the acl link is in the hash table is no + * guarantee the sysfs device has been added ... + */ + if (conn && device_is_registered(&conn->dev)) + device_move(dev->tty_dev, &conn->dev, DPM_ORDER_DEV_AFTER_PARENT); + + hci_dev_unlock(hdev); + hci_dev_put(hdev); +} + +static ssize_t address_show(struct device *tty_dev, + struct device_attribute *attr, char *buf) +{ + struct rfcomm_dev *dev = dev_get_drvdata(tty_dev); + return sprintf(buf, "%pMR\n", &dev->dst); +} + +static ssize_t channel_show(struct device *tty_dev, + struct device_attribute *attr, char *buf) +{ + struct rfcomm_dev *dev = dev_get_drvdata(tty_dev); + return sprintf(buf, "%d\n", dev->channel); +} + +static DEVICE_ATTR_RO(address); +static DEVICE_ATTR_RO(channel); + +static struct rfcomm_dev *__rfcomm_dev_add(struct rfcomm_dev_req *req, + struct rfcomm_dlc *dlc) +{ + struct rfcomm_dev *dev, *entry; + struct list_head *head = &rfcomm_dev_list; + int err = 0; + + dev = kzalloc(sizeof(struct rfcomm_dev), GFP_KERNEL); + if (!dev) + return ERR_PTR(-ENOMEM); + + mutex_lock(&rfcomm_dev_lock); + + if (req->dev_id < 0) { + dev->id = 0; + + list_for_each_entry(entry, &rfcomm_dev_list, list) { + if (entry->id != dev->id) + break; + + dev->id++; + head = &entry->list; + } + } else { + dev->id = req->dev_id; + + list_for_each_entry(entry, &rfcomm_dev_list, list) { + if (entry->id == dev->id) { + err = -EADDRINUSE; + goto out; + } + + if (entry->id > dev->id - 1) + break; + + head = &entry->list; + } + } + + if ((dev->id < 0) || (dev->id > RFCOMM_MAX_DEV - 1)) { + err = -ENFILE; + goto out; + } + + sprintf(dev->name, "rfcomm%d", dev->id); + + list_add(&dev->list, head); + + bacpy(&dev->src, &req->src); + bacpy(&dev->dst, &req->dst); + dev->channel = req->channel; + + dev->flags = req->flags & + ((1 << RFCOMM_RELEASE_ONHUP) | (1 << RFCOMM_REUSE_DLC)); + + tty_port_init(&dev->port); + dev->port.ops = &rfcomm_port_ops; + + skb_queue_head_init(&dev->pending); + + rfcomm_dlc_lock(dlc); + + if (req->flags & (1 << RFCOMM_REUSE_DLC)) { + struct sock *sk = dlc->owner; + struct sk_buff *skb; + + BUG_ON(!sk); + + rfcomm_dlc_throttle(dlc); + + while ((skb = skb_dequeue(&sk->sk_receive_queue))) { + skb_orphan(skb); + skb_queue_tail(&dev->pending, skb); + atomic_sub(skb->len, &sk->sk_rmem_alloc); + } + } + + dlc->data_ready = rfcomm_dev_data_ready; + dlc->state_change = rfcomm_dev_state_change; + dlc->modem_status = rfcomm_dev_modem_status; + + dlc->owner = dev; + dev->dlc = dlc; + + rfcomm_dev_modem_status(dlc, dlc->remote_v24_sig); + + rfcomm_dlc_unlock(dlc); + + /* It's safe to call __module_get() here because socket already + holds reference to this module. */ + __module_get(THIS_MODULE); + + mutex_unlock(&rfcomm_dev_lock); + return dev; + +out: + mutex_unlock(&rfcomm_dev_lock); + kfree(dev); + return ERR_PTR(err); +} + +static int rfcomm_dev_add(struct rfcomm_dev_req *req, struct rfcomm_dlc *dlc) +{ + struct rfcomm_dev *dev; + struct device *tty; + + BT_DBG("id %d channel %d", req->dev_id, req->channel); + + dev = __rfcomm_dev_add(req, dlc); + if (IS_ERR(dev)) { + rfcomm_dlc_put(dlc); + return PTR_ERR(dev); + } + + tty = tty_port_register_device(&dev->port, rfcomm_tty_driver, + dev->id, NULL); + if (IS_ERR(tty)) { + tty_port_put(&dev->port); + return PTR_ERR(tty); + } + + dev->tty_dev = tty; + rfcomm_reparent_device(dev); + dev_set_drvdata(dev->tty_dev, dev); + + if (device_create_file(dev->tty_dev, &dev_attr_address) < 0) + BT_ERR("Failed to create address attribute"); + + if (device_create_file(dev->tty_dev, &dev_attr_channel) < 0) + BT_ERR("Failed to create channel attribute"); + + return dev->id; +} + +/* ---- Send buffer ---- */ +static inline unsigned int rfcomm_room(struct rfcomm_dev *dev) +{ + struct rfcomm_dlc *dlc = dev->dlc; + + /* Limit the outstanding number of packets not yet sent to 40 */ + int pending = 40 - atomic_read(&dev->wmem_alloc); + + return max(0, pending) * dlc->mtu; +} + +static void rfcomm_wfree(struct sk_buff *skb) +{ + struct rfcomm_dev *dev = (void *) skb->sk; + atomic_dec(&dev->wmem_alloc); + if (test_bit(RFCOMM_TTY_ATTACHED, &dev->flags)) + tty_port_tty_wakeup(&dev->port); + tty_port_put(&dev->port); +} + +static void rfcomm_set_owner_w(struct sk_buff *skb, struct rfcomm_dev *dev) +{ + tty_port_get(&dev->port); + atomic_inc(&dev->wmem_alloc); + skb->sk = (void *) dev; + skb->destructor = rfcomm_wfree; +} + +static struct sk_buff *rfcomm_wmalloc(struct rfcomm_dev *dev, unsigned long size, gfp_t priority) +{ + struct sk_buff *skb = alloc_skb(size, priority); + if (skb) + rfcomm_set_owner_w(skb, dev); + return skb; +} + +/* ---- Device IOCTLs ---- */ + +#define NOCAP_FLAGS ((1 << RFCOMM_REUSE_DLC) | (1 << RFCOMM_RELEASE_ONHUP)) + +static int __rfcomm_create_dev(struct sock *sk, void __user *arg) +{ + struct rfcomm_dev_req req; + struct rfcomm_dlc *dlc; + int id; + + if (copy_from_user(&req, arg, sizeof(req))) + return -EFAULT; + + BT_DBG("sk %p dev_id %d flags 0x%x", sk, req.dev_id, req.flags); + + if (req.flags != NOCAP_FLAGS && !capable(CAP_NET_ADMIN)) + return -EPERM; + + if (req.flags & (1 << RFCOMM_REUSE_DLC)) { + /* Socket must be connected */ + if (sk->sk_state != BT_CONNECTED) + return -EBADFD; + + dlc = rfcomm_pi(sk)->dlc; + rfcomm_dlc_hold(dlc); + } else { + /* Validate the channel is unused */ + dlc = rfcomm_dlc_exists(&req.src, &req.dst, req.channel); + if (IS_ERR(dlc)) + return PTR_ERR(dlc); + if (dlc) + return -EBUSY; + dlc = rfcomm_dlc_alloc(GFP_KERNEL); + if (!dlc) + return -ENOMEM; + } + + id = rfcomm_dev_add(&req, dlc); + if (id < 0) + return id; + + if (req.flags & (1 << RFCOMM_REUSE_DLC)) { + /* DLC is now used by device. + * Socket must be disconnected */ + sk->sk_state = BT_CLOSED; + } + + return id; +} + +static int __rfcomm_release_dev(void __user *arg) +{ + struct rfcomm_dev_req req; + struct rfcomm_dev *dev; + struct tty_struct *tty; + + if (copy_from_user(&req, arg, sizeof(req))) + return -EFAULT; + + BT_DBG("dev_id %d flags 0x%x", req.dev_id, req.flags); + + dev = rfcomm_dev_get(req.dev_id); + if (!dev) + return -ENODEV; + + if (dev->flags != NOCAP_FLAGS && !capable(CAP_NET_ADMIN)) { + tty_port_put(&dev->port); + return -EPERM; + } + + /* only release once */ + if (test_and_set_bit(RFCOMM_DEV_RELEASED, &dev->status)) { + tty_port_put(&dev->port); + return -EALREADY; + } + + if (req.flags & (1 << RFCOMM_HANGUP_NOW)) + rfcomm_dlc_close(dev->dlc, 0); + + /* Shut down TTY synchronously before freeing rfcomm_dev */ + tty = tty_port_tty_get(&dev->port); + if (tty) { + tty_vhangup(tty); + tty_kref_put(tty); + } + + if (!test_bit(RFCOMM_TTY_OWNED, &dev->status)) + tty_port_put(&dev->port); + + tty_port_put(&dev->port); + return 0; +} + +static int rfcomm_create_dev(struct sock *sk, void __user *arg) +{ + int ret; + + mutex_lock(&rfcomm_ioctl_mutex); + ret = __rfcomm_create_dev(sk, arg); + mutex_unlock(&rfcomm_ioctl_mutex); + + return ret; +} + +static int rfcomm_release_dev(void __user *arg) +{ + int ret; + + mutex_lock(&rfcomm_ioctl_mutex); + ret = __rfcomm_release_dev(arg); + mutex_unlock(&rfcomm_ioctl_mutex); + + return ret; +} + +static int rfcomm_get_dev_list(void __user *arg) +{ + struct rfcomm_dev *dev; + struct rfcomm_dev_list_req *dl; + struct rfcomm_dev_info *di; + int n = 0, size, err; + u16 dev_num; + + BT_DBG(""); + + if (get_user(dev_num, (u16 __user *) arg)) + return -EFAULT; + + if (!dev_num || dev_num > (PAGE_SIZE * 4) / sizeof(*di)) + return -EINVAL; + + size = sizeof(*dl) + dev_num * sizeof(*di); + + dl = kzalloc(size, GFP_KERNEL); + if (!dl) + return -ENOMEM; + + di = dl->dev_info; + + mutex_lock(&rfcomm_dev_lock); + + list_for_each_entry(dev, &rfcomm_dev_list, list) { + if (!tty_port_get(&dev->port)) + continue; + (di + n)->id = dev->id; + (di + n)->flags = dev->flags; + (di + n)->state = dev->dlc->state; + (di + n)->channel = dev->channel; + bacpy(&(di + n)->src, &dev->src); + bacpy(&(di + n)->dst, &dev->dst); + tty_port_put(&dev->port); + if (++n >= dev_num) + break; + } + + mutex_unlock(&rfcomm_dev_lock); + + dl->dev_num = n; + size = sizeof(*dl) + n * sizeof(*di); + + err = copy_to_user(arg, dl, size); + kfree(dl); + + return err ? -EFAULT : 0; +} + +static int rfcomm_get_dev_info(void __user *arg) +{ + struct rfcomm_dev *dev; + struct rfcomm_dev_info di; + int err = 0; + + BT_DBG(""); + + if (copy_from_user(&di, arg, sizeof(di))) + return -EFAULT; + + dev = rfcomm_dev_get(di.id); + if (!dev) + return -ENODEV; + + di.flags = dev->flags; + di.channel = dev->channel; + di.state = dev->dlc->state; + bacpy(&di.src, &dev->src); + bacpy(&di.dst, &dev->dst); + + if (copy_to_user(arg, &di, sizeof(di))) + err = -EFAULT; + + tty_port_put(&dev->port); + return err; +} + +int rfcomm_dev_ioctl(struct sock *sk, unsigned int cmd, void __user *arg) +{ + BT_DBG("cmd %d arg %p", cmd, arg); + + switch (cmd) { + case RFCOMMCREATEDEV: + return rfcomm_create_dev(sk, arg); + + case RFCOMMRELEASEDEV: + return rfcomm_release_dev(arg); + + case RFCOMMGETDEVLIST: + return rfcomm_get_dev_list(arg); + + case RFCOMMGETDEVINFO: + return rfcomm_get_dev_info(arg); + } + + return -EINVAL; +} + +/* ---- DLC callbacks ---- */ +static void rfcomm_dev_data_ready(struct rfcomm_dlc *dlc, struct sk_buff *skb) +{ + struct rfcomm_dev *dev = dlc->owner; + + if (!dev) { + kfree_skb(skb); + return; + } + + if (!skb_queue_empty(&dev->pending)) { + skb_queue_tail(&dev->pending, skb); + return; + } + + BT_DBG("dlc %p len %d", dlc, skb->len); + + tty_insert_flip_string(&dev->port, skb->data, skb->len); + tty_flip_buffer_push(&dev->port); + + kfree_skb(skb); +} + +static void rfcomm_dev_state_change(struct rfcomm_dlc *dlc, int err) +{ + struct rfcomm_dev *dev = dlc->owner; + if (!dev) + return; + + BT_DBG("dlc %p dev %p err %d", dlc, dev, err); + + dev->err = err; + if (dlc->state == BT_CONNECTED) { + rfcomm_reparent_device(dev); + + wake_up_interruptible(&dev->port.open_wait); + } else if (dlc->state == BT_CLOSED) + tty_port_tty_hangup(&dev->port, false); +} + +static void rfcomm_dev_modem_status(struct rfcomm_dlc *dlc, u8 v24_sig) +{ + struct rfcomm_dev *dev = dlc->owner; + if (!dev) + return; + + BT_DBG("dlc %p dev %p v24_sig 0x%02x", dlc, dev, v24_sig); + + if ((dev->modem_status & TIOCM_CD) && !(v24_sig & RFCOMM_V24_DV)) + tty_port_tty_hangup(&dev->port, true); + + dev->modem_status = + ((v24_sig & RFCOMM_V24_RTC) ? (TIOCM_DSR | TIOCM_DTR) : 0) | + ((v24_sig & RFCOMM_V24_RTR) ? (TIOCM_RTS | TIOCM_CTS) : 0) | + ((v24_sig & RFCOMM_V24_IC) ? TIOCM_RI : 0) | + ((v24_sig & RFCOMM_V24_DV) ? TIOCM_CD : 0); +} + +/* ---- TTY functions ---- */ +static void rfcomm_tty_copy_pending(struct rfcomm_dev *dev) +{ + struct sk_buff *skb; + int inserted = 0; + + BT_DBG("dev %p", dev); + + rfcomm_dlc_lock(dev->dlc); + + while ((skb = skb_dequeue(&dev->pending))) { + inserted += tty_insert_flip_string(&dev->port, skb->data, + skb->len); + kfree_skb(skb); + } + + rfcomm_dlc_unlock(dev->dlc); + + if (inserted > 0) + tty_flip_buffer_push(&dev->port); +} + +/* do the reverse of install, clearing the tty fields and releasing the + * reference to tty_port + */ +static void rfcomm_tty_cleanup(struct tty_struct *tty) +{ + struct rfcomm_dev *dev = tty->driver_data; + + clear_bit(RFCOMM_TTY_ATTACHED, &dev->flags); + + rfcomm_dlc_lock(dev->dlc); + tty->driver_data = NULL; + rfcomm_dlc_unlock(dev->dlc); + + /* + * purge the dlc->tx_queue to avoid circular dependencies + * between dev and dlc + */ + skb_queue_purge(&dev->dlc->tx_queue); + + tty_port_put(&dev->port); +} + +/* we acquire the tty_port reference since it's here the tty is first used + * by setting the termios. We also populate the driver_data field and install + * the tty port + */ +static int rfcomm_tty_install(struct tty_driver *driver, struct tty_struct *tty) +{ + struct rfcomm_dev *dev; + struct rfcomm_dlc *dlc; + int err; + + dev = rfcomm_dev_get(tty->index); + if (!dev) + return -ENODEV; + + dlc = dev->dlc; + + /* Attach TTY and open DLC */ + rfcomm_dlc_lock(dlc); + tty->driver_data = dev; + rfcomm_dlc_unlock(dlc); + set_bit(RFCOMM_TTY_ATTACHED, &dev->flags); + + /* install the tty_port */ + err = tty_port_install(&dev->port, driver, tty); + if (err) { + rfcomm_tty_cleanup(tty); + return err; + } + + /* take over the tty_port reference if the port was created with the + * flag RFCOMM_RELEASE_ONHUP. This will force the release of the port + * when the last process closes the tty. The behaviour is expected by + * userspace. + */ + if (test_bit(RFCOMM_RELEASE_ONHUP, &dev->flags)) { + set_bit(RFCOMM_TTY_OWNED, &dev->status); + tty_port_put(&dev->port); + } + + return 0; +} + +static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp) +{ + struct rfcomm_dev *dev = tty->driver_data; + int err; + + BT_DBG("tty %p id %d", tty, tty->index); + + BT_DBG("dev %p dst %pMR channel %d opened %d", dev, &dev->dst, + dev->channel, dev->port.count); + + err = tty_port_open(&dev->port, tty, filp); + if (err) + return err; + + /* + * FIXME: rfcomm should use proper flow control for + * received data. This hack will be unnecessary and can + * be removed when that's implemented + */ + rfcomm_tty_copy_pending(dev); + + rfcomm_dlc_unthrottle(dev->dlc); + + return 0; +} + +static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp) +{ + struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data; + + BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc, + dev->port.count); + + tty_port_close(&dev->port, tty, filp); +} + +static int rfcomm_tty_write(struct tty_struct *tty, const unsigned char *buf, int count) +{ + struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data; + struct rfcomm_dlc *dlc = dev->dlc; + struct sk_buff *skb; + int sent = 0, size; + + BT_DBG("tty %p count %d", tty, count); + + while (count) { + size = min_t(uint, count, dlc->mtu); + + skb = rfcomm_wmalloc(dev, size + RFCOMM_SKB_RESERVE, GFP_ATOMIC); + if (!skb) + break; + + skb_reserve(skb, RFCOMM_SKB_HEAD_RESERVE); + + skb_put_data(skb, buf + sent, size); + + rfcomm_dlc_send_noerror(dlc, skb); + + sent += size; + count -= size; + } + + return sent; +} + +static unsigned int rfcomm_tty_write_room(struct tty_struct *tty) +{ + struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data; + int room = 0; + + if (dev && dev->dlc) + room = rfcomm_room(dev); + + BT_DBG("tty %p room %d", tty, room); + + return room; +} + +static int rfcomm_tty_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) +{ + BT_DBG("tty %p cmd 0x%02x", tty, cmd); + + switch (cmd) { + case TCGETS: + BT_DBG("TCGETS is not supported"); + return -ENOIOCTLCMD; + + case TCSETS: + BT_DBG("TCSETS is not supported"); + return -ENOIOCTLCMD; + + case TIOCMIWAIT: + BT_DBG("TIOCMIWAIT"); + break; + + case TIOCSERGETLSR: + BT_ERR("TIOCSERGETLSR is not supported"); + return -ENOIOCTLCMD; + + case TIOCSERCONFIG: + BT_ERR("TIOCSERCONFIG is not supported"); + return -ENOIOCTLCMD; + + default: + return -ENOIOCTLCMD; /* ioctls which we must ignore */ + + } + + return -ENOIOCTLCMD; +} + +static void rfcomm_tty_set_termios(struct tty_struct *tty, + const struct ktermios *old) +{ + struct ktermios *new = &tty->termios; + int old_baud_rate = tty_termios_baud_rate(old); + int new_baud_rate = tty_termios_baud_rate(new); + + u8 baud, data_bits, stop_bits, parity, x_on, x_off; + u16 changes = 0; + + struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data; + + BT_DBG("tty %p termios %p", tty, old); + + if (!dev || !dev->dlc || !dev->dlc->session) + return; + + /* Handle turning off CRTSCTS */ + if ((old->c_cflag & CRTSCTS) && !(new->c_cflag & CRTSCTS)) + BT_DBG("Turning off CRTSCTS unsupported"); + + /* Parity on/off and when on, odd/even */ + if (((old->c_cflag & PARENB) != (new->c_cflag & PARENB)) || + ((old->c_cflag & PARODD) != (new->c_cflag & PARODD))) { + changes |= RFCOMM_RPN_PM_PARITY; + BT_DBG("Parity change detected."); + } + + /* Mark and space parity are not supported! */ + if (new->c_cflag & PARENB) { + if (new->c_cflag & PARODD) { + BT_DBG("Parity is ODD"); + parity = RFCOMM_RPN_PARITY_ODD; + } else { + BT_DBG("Parity is EVEN"); + parity = RFCOMM_RPN_PARITY_EVEN; + } + } else { + BT_DBG("Parity is OFF"); + parity = RFCOMM_RPN_PARITY_NONE; + } + + /* Setting the x_on / x_off characters */ + if (old->c_cc[VSTOP] != new->c_cc[VSTOP]) { + BT_DBG("XOFF custom"); + x_on = new->c_cc[VSTOP]; + changes |= RFCOMM_RPN_PM_XON; + } else { + BT_DBG("XOFF default"); + x_on = RFCOMM_RPN_XON_CHAR; + } + + if (old->c_cc[VSTART] != new->c_cc[VSTART]) { + BT_DBG("XON custom"); + x_off = new->c_cc[VSTART]; + changes |= RFCOMM_RPN_PM_XOFF; + } else { + BT_DBG("XON default"); + x_off = RFCOMM_RPN_XOFF_CHAR; + } + + /* Handle setting of stop bits */ + if ((old->c_cflag & CSTOPB) != (new->c_cflag & CSTOPB)) + changes |= RFCOMM_RPN_PM_STOP; + + /* POSIX does not support 1.5 stop bits and RFCOMM does not + * support 2 stop bits. So a request for 2 stop bits gets + * translated to 1.5 stop bits */ + if (new->c_cflag & CSTOPB) + stop_bits = RFCOMM_RPN_STOP_15; + else + stop_bits = RFCOMM_RPN_STOP_1; + + /* Handle number of data bits [5-8] */ + if ((old->c_cflag & CSIZE) != (new->c_cflag & CSIZE)) + changes |= RFCOMM_RPN_PM_DATA; + + switch (new->c_cflag & CSIZE) { + case CS5: + data_bits = RFCOMM_RPN_DATA_5; + break; + case CS6: + data_bits = RFCOMM_RPN_DATA_6; + break; + case CS7: + data_bits = RFCOMM_RPN_DATA_7; + break; + case CS8: + data_bits = RFCOMM_RPN_DATA_8; + break; + default: + data_bits = RFCOMM_RPN_DATA_8; + break; + } + + /* Handle baudrate settings */ + if (old_baud_rate != new_baud_rate) + changes |= RFCOMM_RPN_PM_BITRATE; + + switch (new_baud_rate) { + case 2400: + baud = RFCOMM_RPN_BR_2400; + break; + case 4800: + baud = RFCOMM_RPN_BR_4800; + break; + case 7200: + baud = RFCOMM_RPN_BR_7200; + break; + case 9600: + baud = RFCOMM_RPN_BR_9600; + break; + case 19200: + baud = RFCOMM_RPN_BR_19200; + break; + case 38400: + baud = RFCOMM_RPN_BR_38400; + break; + case 57600: + baud = RFCOMM_RPN_BR_57600; + break; + case 115200: + baud = RFCOMM_RPN_BR_115200; + break; + case 230400: + baud = RFCOMM_RPN_BR_230400; + break; + default: + /* 9600 is standard accordinag to the RFCOMM specification */ + baud = RFCOMM_RPN_BR_9600; + break; + + } + + if (changes) + rfcomm_send_rpn(dev->dlc->session, 1, dev->dlc->dlci, baud, + data_bits, stop_bits, parity, + RFCOMM_RPN_FLOW_NONE, x_on, x_off, changes); +} + +static void rfcomm_tty_throttle(struct tty_struct *tty) +{ + struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data; + + BT_DBG("tty %p dev %p", tty, dev); + + rfcomm_dlc_throttle(dev->dlc); +} + +static void rfcomm_tty_unthrottle(struct tty_struct *tty) +{ + struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data; + + BT_DBG("tty %p dev %p", tty, dev); + + rfcomm_dlc_unthrottle(dev->dlc); +} + +static unsigned int rfcomm_tty_chars_in_buffer(struct tty_struct *tty) +{ + struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data; + + BT_DBG("tty %p dev %p", tty, dev); + + if (!dev || !dev->dlc) + return 0; + + if (!skb_queue_empty(&dev->dlc->tx_queue)) + return dev->dlc->mtu; + + return 0; +} + +static void rfcomm_tty_flush_buffer(struct tty_struct *tty) +{ + struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data; + + BT_DBG("tty %p dev %p", tty, dev); + + if (!dev || !dev->dlc) + return; + + skb_queue_purge(&dev->dlc->tx_queue); + tty_wakeup(tty); +} + +static void rfcomm_tty_send_xchar(struct tty_struct *tty, char ch) +{ + BT_DBG("tty %p ch %c", tty, ch); +} + +static void rfcomm_tty_wait_until_sent(struct tty_struct *tty, int timeout) +{ + BT_DBG("tty %p timeout %d", tty, timeout); +} + +static void rfcomm_tty_hangup(struct tty_struct *tty) +{ + struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data; + + BT_DBG("tty %p dev %p", tty, dev); + + tty_port_hangup(&dev->port); +} + +static int rfcomm_tty_tiocmget(struct tty_struct *tty) +{ + struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data; + + BT_DBG("tty %p dev %p", tty, dev); + + return dev->modem_status; +} + +static int rfcomm_tty_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) +{ + struct rfcomm_dev *dev = (struct rfcomm_dev *) tty->driver_data; + struct rfcomm_dlc *dlc = dev->dlc; + u8 v24_sig; + + BT_DBG("tty %p dev %p set 0x%02x clear 0x%02x", tty, dev, set, clear); + + rfcomm_dlc_get_modem_status(dlc, &v24_sig); + + if (set & TIOCM_DSR || set & TIOCM_DTR) + v24_sig |= RFCOMM_V24_RTC; + if (set & TIOCM_RTS || set & TIOCM_CTS) + v24_sig |= RFCOMM_V24_RTR; + if (set & TIOCM_RI) + v24_sig |= RFCOMM_V24_IC; + if (set & TIOCM_CD) + v24_sig |= RFCOMM_V24_DV; + + if (clear & TIOCM_DSR || clear & TIOCM_DTR) + v24_sig &= ~RFCOMM_V24_RTC; + if (clear & TIOCM_RTS || clear & TIOCM_CTS) + v24_sig &= ~RFCOMM_V24_RTR; + if (clear & TIOCM_RI) + v24_sig &= ~RFCOMM_V24_IC; + if (clear & TIOCM_CD) + v24_sig &= ~RFCOMM_V24_DV; + + rfcomm_dlc_set_modem_status(dlc, v24_sig); + + return 0; +} + +/* ---- TTY structure ---- */ + +static const struct tty_operations rfcomm_ops = { + .open = rfcomm_tty_open, + .close = rfcomm_tty_close, + .write = rfcomm_tty_write, + .write_room = rfcomm_tty_write_room, + .chars_in_buffer = rfcomm_tty_chars_in_buffer, + .flush_buffer = rfcomm_tty_flush_buffer, + .ioctl = rfcomm_tty_ioctl, + .throttle = rfcomm_tty_throttle, + .unthrottle = rfcomm_tty_unthrottle, + .set_termios = rfcomm_tty_set_termios, + .send_xchar = rfcomm_tty_send_xchar, + .hangup = rfcomm_tty_hangup, + .wait_until_sent = rfcomm_tty_wait_until_sent, + .tiocmget = rfcomm_tty_tiocmget, + .tiocmset = rfcomm_tty_tiocmset, + .install = rfcomm_tty_install, + .cleanup = rfcomm_tty_cleanup, +}; + +int __init rfcomm_init_ttys(void) +{ + int error; + + rfcomm_tty_driver = tty_alloc_driver(RFCOMM_TTY_PORTS, + TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV); + if (IS_ERR(rfcomm_tty_driver)) + return PTR_ERR(rfcomm_tty_driver); + + rfcomm_tty_driver->driver_name = "rfcomm"; + rfcomm_tty_driver->name = "rfcomm"; + rfcomm_tty_driver->major = RFCOMM_TTY_MAJOR; + rfcomm_tty_driver->minor_start = RFCOMM_TTY_MINOR; + rfcomm_tty_driver->type = TTY_DRIVER_TYPE_SERIAL; + rfcomm_tty_driver->subtype = SERIAL_TYPE_NORMAL; + rfcomm_tty_driver->init_termios = tty_std_termios; + rfcomm_tty_driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL; + rfcomm_tty_driver->init_termios.c_lflag &= ~ICANON; + tty_set_operations(rfcomm_tty_driver, &rfcomm_ops); + + error = tty_register_driver(rfcomm_tty_driver); + if (error) { + BT_ERR("Can't register RFCOMM TTY driver"); + tty_driver_kref_put(rfcomm_tty_driver); + return error; + } + + BT_INFO("RFCOMM TTY layer initialized"); + + return 0; +} + +void rfcomm_cleanup_ttys(void) +{ + tty_unregister_driver(rfcomm_tty_driver); + tty_driver_kref_put(rfcomm_tty_driver); +} diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c new file mode 100644 index 000000000..6d4168cfe --- /dev/null +++ b/net/bluetooth/sco.c @@ -0,0 +1,1509 @@ +/* + BlueZ - Bluetooth protocol stack for Linux + Copyright (C) 2000-2001 Qualcomm Incorporated + + Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License version 2 as + published by the Free Software Foundation; + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + SOFTWARE IS DISCLAIMED. +*/ + +/* Bluetooth SCO sockets. */ + +#include <linux/module.h> +#include <linux/debugfs.h> +#include <linux/seq_file.h> +#include <linux/sched/signal.h> + +#include <net/bluetooth/bluetooth.h> +#include <net/bluetooth/hci_core.h> +#include <net/bluetooth/sco.h> + +static bool disable_esco; + +static const struct proto_ops sco_sock_ops; + +static struct bt_sock_list sco_sk_list = { + .lock = __RW_LOCK_UNLOCKED(sco_sk_list.lock) +}; + +/* ---- SCO connections ---- */ +struct sco_conn { + struct hci_conn *hcon; + + spinlock_t lock; + struct sock *sk; + + struct delayed_work timeout_work; + + unsigned int mtu; +}; + +#define sco_conn_lock(c) spin_lock(&c->lock) +#define sco_conn_unlock(c) spin_unlock(&c->lock) + +static void sco_sock_close(struct sock *sk); +static void sco_sock_kill(struct sock *sk); + +/* ----- SCO socket info ----- */ +#define sco_pi(sk) ((struct sco_pinfo *) sk) + +struct sco_pinfo { + struct bt_sock bt; + bdaddr_t src; + bdaddr_t dst; + __u32 flags; + __u16 setting; + __u8 cmsg_mask; + struct bt_codec codec; + struct sco_conn *conn; +}; + +/* ---- SCO timers ---- */ +#define SCO_CONN_TIMEOUT (HZ * 40) +#define SCO_DISCONN_TIMEOUT (HZ * 2) + +static void sco_sock_timeout(struct work_struct *work) +{ + struct sco_conn *conn = container_of(work, struct sco_conn, + timeout_work.work); + struct sock *sk; + + sco_conn_lock(conn); + sk = conn->sk; + if (sk) + sock_hold(sk); + sco_conn_unlock(conn); + + if (!sk) + return; + + BT_DBG("sock %p state %d", sk, sk->sk_state); + + lock_sock(sk); + sk->sk_err = ETIMEDOUT; + sk->sk_state_change(sk); + release_sock(sk); + sock_put(sk); +} + +static void sco_sock_set_timer(struct sock *sk, long timeout) +{ + if (!sco_pi(sk)->conn) + return; + + BT_DBG("sock %p state %d timeout %ld", sk, sk->sk_state, timeout); + cancel_delayed_work(&sco_pi(sk)->conn->timeout_work); + schedule_delayed_work(&sco_pi(sk)->conn->timeout_work, timeout); +} + +static void sco_sock_clear_timer(struct sock *sk) +{ + if (!sco_pi(sk)->conn) + return; + + BT_DBG("sock %p state %d", sk, sk->sk_state); + cancel_delayed_work(&sco_pi(sk)->conn->timeout_work); +} + +/* ---- SCO connections ---- */ +static struct sco_conn *sco_conn_add(struct hci_conn *hcon) +{ + struct hci_dev *hdev = hcon->hdev; + struct sco_conn *conn = hcon->sco_data; + + if (conn) + return conn; + + conn = kzalloc(sizeof(struct sco_conn), GFP_KERNEL); + if (!conn) + return NULL; + + spin_lock_init(&conn->lock); + INIT_DELAYED_WORK(&conn->timeout_work, sco_sock_timeout); + + hcon->sco_data = conn; + conn->hcon = hcon; + + if (hdev->sco_mtu > 0) + conn->mtu = hdev->sco_mtu; + else + conn->mtu = 60; + + BT_DBG("hcon %p conn %p", hcon, conn); + + return conn; +} + +/* Delete channel. + * Must be called on the locked socket. */ +static void sco_chan_del(struct sock *sk, int err) +{ + struct sco_conn *conn; + + conn = sco_pi(sk)->conn; + + BT_DBG("sk %p, conn %p, err %d", sk, conn, err); + + if (conn) { + sco_conn_lock(conn); + conn->sk = NULL; + sco_pi(sk)->conn = NULL; + sco_conn_unlock(conn); + + if (conn->hcon) + hci_conn_drop(conn->hcon); + } + + sk->sk_state = BT_CLOSED; + sk->sk_err = err; + sk->sk_state_change(sk); + + sock_set_flag(sk, SOCK_ZAPPED); +} + +static void sco_conn_del(struct hci_conn *hcon, int err) +{ + struct sco_conn *conn = hcon->sco_data; + struct sock *sk; + + if (!conn) + return; + + BT_DBG("hcon %p conn %p, err %d", hcon, conn, err); + + /* Kill socket */ + sco_conn_lock(conn); + sk = conn->sk; + if (sk) + sock_hold(sk); + sco_conn_unlock(conn); + + if (sk) { + lock_sock(sk); + sco_sock_clear_timer(sk); + sco_chan_del(sk, err); + release_sock(sk); + sock_put(sk); + } + + /* Ensure no more work items will run before freeing conn. */ + cancel_delayed_work_sync(&conn->timeout_work); + + hcon->sco_data = NULL; + kfree(conn); +} + +static void __sco_chan_add(struct sco_conn *conn, struct sock *sk, + struct sock *parent) +{ + BT_DBG("conn %p", conn); + + sco_pi(sk)->conn = conn; + conn->sk = sk; + + if (parent) + bt_accept_enqueue(parent, sk, true); +} + +static int sco_chan_add(struct sco_conn *conn, struct sock *sk, + struct sock *parent) +{ + int err = 0; + + sco_conn_lock(conn); + if (conn->sk) + err = -EBUSY; + else + __sco_chan_add(conn, sk, parent); + + sco_conn_unlock(conn); + return err; +} + +static int sco_connect(struct hci_dev *hdev, struct sock *sk) +{ + struct sco_conn *conn; + struct hci_conn *hcon; + int err, type; + + BT_DBG("%pMR -> %pMR", &sco_pi(sk)->src, &sco_pi(sk)->dst); + + if (lmp_esco_capable(hdev) && !disable_esco) + type = ESCO_LINK; + else + type = SCO_LINK; + + if (sco_pi(sk)->setting == BT_VOICE_TRANSPARENT && + (!lmp_transp_capable(hdev) || !lmp_esco_capable(hdev))) + return -EOPNOTSUPP; + + hcon = hci_connect_sco(hdev, type, &sco_pi(sk)->dst, + sco_pi(sk)->setting, &sco_pi(sk)->codec); + if (IS_ERR(hcon)) + return PTR_ERR(hcon); + + conn = sco_conn_add(hcon); + if (!conn) { + hci_conn_drop(hcon); + return -ENOMEM; + } + + /* Update source addr of the socket */ + bacpy(&sco_pi(sk)->src, &hcon->src); + + err = sco_chan_add(conn, sk, NULL); + if (err) + return err; + + if (hcon->state == BT_CONNECTED) { + sco_sock_clear_timer(sk); + sk->sk_state = BT_CONNECTED; + } else { + sk->sk_state = BT_CONNECT; + sco_sock_set_timer(sk, sk->sk_sndtimeo); + } + + return err; +} + +static int sco_send_frame(struct sock *sk, struct sk_buff *skb) +{ + struct sco_conn *conn = sco_pi(sk)->conn; + int len = skb->len; + + /* Check outgoing MTU */ + if (len > conn->mtu) + return -EINVAL; + + BT_DBG("sk %p len %d", sk, len); + + hci_send_sco(conn->hcon, skb); + + return len; +} + +static void sco_recv_frame(struct sco_conn *conn, struct sk_buff *skb) +{ + struct sock *sk; + + sco_conn_lock(conn); + sk = conn->sk; + sco_conn_unlock(conn); + + if (!sk) + goto drop; + + BT_DBG("sk %p len %u", sk, skb->len); + + if (sk->sk_state != BT_CONNECTED) + goto drop; + + if (!sock_queue_rcv_skb(sk, skb)) + return; + +drop: + kfree_skb(skb); +} + +/* -------- Socket interface ---------- */ +static struct sock *__sco_get_sock_listen_by_addr(bdaddr_t *ba) +{ + struct sock *sk; + + sk_for_each(sk, &sco_sk_list.head) { + if (sk->sk_state != BT_LISTEN) + continue; + + if (!bacmp(&sco_pi(sk)->src, ba)) + return sk; + } + + return NULL; +} + +/* Find socket listening on source bdaddr. + * Returns closest match. + */ +static struct sock *sco_get_sock_listen(bdaddr_t *src) +{ + struct sock *sk = NULL, *sk1 = NULL; + + read_lock(&sco_sk_list.lock); + + sk_for_each(sk, &sco_sk_list.head) { + if (sk->sk_state != BT_LISTEN) + continue; + + /* Exact match. */ + if (!bacmp(&sco_pi(sk)->src, src)) + break; + + /* Closest match */ + if (!bacmp(&sco_pi(sk)->src, BDADDR_ANY)) + sk1 = sk; + } + + read_unlock(&sco_sk_list.lock); + + return sk ? sk : sk1; +} + +static void sco_sock_destruct(struct sock *sk) +{ + BT_DBG("sk %p", sk); + + skb_queue_purge(&sk->sk_receive_queue); + skb_queue_purge(&sk->sk_write_queue); +} + +static void sco_sock_cleanup_listen(struct sock *parent) +{ + struct sock *sk; + + BT_DBG("parent %p", parent); + + /* Close not yet accepted channels */ + while ((sk = bt_accept_dequeue(parent, NULL))) { + sco_sock_close(sk); + sco_sock_kill(sk); + } + + parent->sk_state = BT_CLOSED; + sock_set_flag(parent, SOCK_ZAPPED); +} + +/* Kill socket (only if zapped and orphan) + * Must be called on unlocked socket. + */ +static void sco_sock_kill(struct sock *sk) +{ + if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket) + return; + + BT_DBG("sk %p state %d", sk, sk->sk_state); + + /* Kill poor orphan */ + bt_sock_unlink(&sco_sk_list, sk); + sock_set_flag(sk, SOCK_DEAD); + sock_put(sk); +} + +static void __sco_sock_close(struct sock *sk) +{ + BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket); + + switch (sk->sk_state) { + case BT_LISTEN: + sco_sock_cleanup_listen(sk); + break; + + case BT_CONNECTED: + case BT_CONFIG: + if (sco_pi(sk)->conn->hcon) { + sk->sk_state = BT_DISCONN; + sco_sock_set_timer(sk, SCO_DISCONN_TIMEOUT); + sco_conn_lock(sco_pi(sk)->conn); + hci_conn_drop(sco_pi(sk)->conn->hcon); + sco_pi(sk)->conn->hcon = NULL; + sco_conn_unlock(sco_pi(sk)->conn); + } else + sco_chan_del(sk, ECONNRESET); + break; + + case BT_CONNECT2: + case BT_CONNECT: + case BT_DISCONN: + sco_chan_del(sk, ECONNRESET); + break; + + default: + sock_set_flag(sk, SOCK_ZAPPED); + break; + } + +} + +/* Must be called on unlocked socket. */ +static void sco_sock_close(struct sock *sk) +{ + lock_sock(sk); + sco_sock_clear_timer(sk); + __sco_sock_close(sk); + release_sock(sk); +} + +static void sco_skb_put_cmsg(struct sk_buff *skb, struct msghdr *msg, + struct sock *sk) +{ + if (sco_pi(sk)->cmsg_mask & SCO_CMSG_PKT_STATUS) + put_cmsg(msg, SOL_BLUETOOTH, BT_SCM_PKT_STATUS, + sizeof(bt_cb(skb)->sco.pkt_status), + &bt_cb(skb)->sco.pkt_status); +} + +static void sco_sock_init(struct sock *sk, struct sock *parent) +{ + BT_DBG("sk %p", sk); + + if (parent) { + sk->sk_type = parent->sk_type; + bt_sk(sk)->flags = bt_sk(parent)->flags; + security_sk_clone(parent, sk); + } else { + bt_sk(sk)->skb_put_cmsg = sco_skb_put_cmsg; + } +} + +static struct proto sco_proto = { + .name = "SCO", + .owner = THIS_MODULE, + .obj_size = sizeof(struct sco_pinfo) +}; + +static struct sock *sco_sock_alloc(struct net *net, struct socket *sock, + int proto, gfp_t prio, int kern) +{ + struct sock *sk; + + sk = sk_alloc(net, PF_BLUETOOTH, prio, &sco_proto, kern); + if (!sk) + return NULL; + + sock_init_data(sock, sk); + INIT_LIST_HEAD(&bt_sk(sk)->accept_q); + + sk->sk_destruct = sco_sock_destruct; + sk->sk_sndtimeo = SCO_CONN_TIMEOUT; + + sock_reset_flag(sk, SOCK_ZAPPED); + + sk->sk_protocol = proto; + sk->sk_state = BT_OPEN; + + sco_pi(sk)->setting = BT_VOICE_CVSD_16BIT; + sco_pi(sk)->codec.id = BT_CODEC_CVSD; + sco_pi(sk)->codec.cid = 0xffff; + sco_pi(sk)->codec.vid = 0xffff; + sco_pi(sk)->codec.data_path = 0x00; + + bt_sock_link(&sco_sk_list, sk); + return sk; +} + +static int sco_sock_create(struct net *net, struct socket *sock, int protocol, + int kern) +{ + struct sock *sk; + + BT_DBG("sock %p", sock); + + sock->state = SS_UNCONNECTED; + + if (sock->type != SOCK_SEQPACKET) + return -ESOCKTNOSUPPORT; + + sock->ops = &sco_sock_ops; + + sk = sco_sock_alloc(net, sock, protocol, GFP_ATOMIC, kern); + if (!sk) + return -ENOMEM; + + sco_sock_init(sk, NULL); + return 0; +} + +static int sco_sock_bind(struct socket *sock, struct sockaddr *addr, + int addr_len) +{ + struct sockaddr_sco *sa = (struct sockaddr_sco *) addr; + struct sock *sk = sock->sk; + int err = 0; + + if (!addr || addr_len < sizeof(struct sockaddr_sco) || + addr->sa_family != AF_BLUETOOTH) + return -EINVAL; + + BT_DBG("sk %p %pMR", sk, &sa->sco_bdaddr); + + lock_sock(sk); + + if (sk->sk_state != BT_OPEN) { + err = -EBADFD; + goto done; + } + + if (sk->sk_type != SOCK_SEQPACKET) { + err = -EINVAL; + goto done; + } + + bacpy(&sco_pi(sk)->src, &sa->sco_bdaddr); + + sk->sk_state = BT_BOUND; + +done: + release_sock(sk); + return err; +} + +static int sco_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags) +{ + struct sockaddr_sco *sa = (struct sockaddr_sco *) addr; + struct sock *sk = sock->sk; + struct hci_dev *hdev; + int err; + + BT_DBG("sk %p", sk); + + if (alen < sizeof(struct sockaddr_sco) || + addr->sa_family != AF_BLUETOOTH) + return -EINVAL; + + lock_sock(sk); + if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND) { + err = -EBADFD; + goto done; + } + + if (sk->sk_type != SOCK_SEQPACKET) { + err = -EINVAL; + goto done; + } + + hdev = hci_get_route(&sa->sco_bdaddr, &sco_pi(sk)->src, BDADDR_BREDR); + if (!hdev) { + err = -EHOSTUNREACH; + goto done; + } + hci_dev_lock(hdev); + + /* Set destination address and psm */ + bacpy(&sco_pi(sk)->dst, &sa->sco_bdaddr); + + err = sco_connect(hdev, sk); + hci_dev_unlock(hdev); + hci_dev_put(hdev); + if (err) + goto done; + + err = bt_sock_wait_state(sk, BT_CONNECTED, + sock_sndtimeo(sk, flags & O_NONBLOCK)); + +done: + release_sock(sk); + return err; +} + +static int sco_sock_listen(struct socket *sock, int backlog) +{ + struct sock *sk = sock->sk; + bdaddr_t *src = &sco_pi(sk)->src; + int err = 0; + + BT_DBG("sk %p backlog %d", sk, backlog); + + lock_sock(sk); + + if (sk->sk_state != BT_BOUND) { + err = -EBADFD; + goto done; + } + + if (sk->sk_type != SOCK_SEQPACKET) { + err = -EINVAL; + goto done; + } + + write_lock(&sco_sk_list.lock); + + if (__sco_get_sock_listen_by_addr(src)) { + err = -EADDRINUSE; + goto unlock; + } + + sk->sk_max_ack_backlog = backlog; + sk->sk_ack_backlog = 0; + + sk->sk_state = BT_LISTEN; + +unlock: + write_unlock(&sco_sk_list.lock); + +done: + release_sock(sk); + return err; +} + +static int sco_sock_accept(struct socket *sock, struct socket *newsock, + int flags, bool kern) +{ + DEFINE_WAIT_FUNC(wait, woken_wake_function); + struct sock *sk = sock->sk, *ch; + long timeo; + int err = 0; + + lock_sock(sk); + + timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); + + BT_DBG("sk %p timeo %ld", sk, timeo); + + /* Wait for an incoming connection. (wake-one). */ + add_wait_queue_exclusive(sk_sleep(sk), &wait); + while (1) { + if (sk->sk_state != BT_LISTEN) { + err = -EBADFD; + break; + } + + ch = bt_accept_dequeue(sk, newsock); + if (ch) + break; + + if (!timeo) { + err = -EAGAIN; + break; + } + + if (signal_pending(current)) { + err = sock_intr_errno(timeo); + break; + } + + release_sock(sk); + + timeo = wait_woken(&wait, TASK_INTERRUPTIBLE, timeo); + lock_sock(sk); + } + remove_wait_queue(sk_sleep(sk), &wait); + + if (err) + goto done; + + newsock->state = SS_CONNECTED; + + BT_DBG("new socket %p", ch); + +done: + release_sock(sk); + return err; +} + +static int sco_sock_getname(struct socket *sock, struct sockaddr *addr, + int peer) +{ + struct sockaddr_sco *sa = (struct sockaddr_sco *) addr; + struct sock *sk = sock->sk; + + BT_DBG("sock %p, sk %p", sock, sk); + + addr->sa_family = AF_BLUETOOTH; + + if (peer) + bacpy(&sa->sco_bdaddr, &sco_pi(sk)->dst); + else + bacpy(&sa->sco_bdaddr, &sco_pi(sk)->src); + + return sizeof(struct sockaddr_sco); +} + +static int sco_sock_sendmsg(struct socket *sock, struct msghdr *msg, + size_t len) +{ + struct sock *sk = sock->sk; + struct sk_buff *skb; + int err; + + BT_DBG("sock %p, sk %p", sock, sk); + + err = sock_error(sk); + if (err) + return err; + + if (msg->msg_flags & MSG_OOB) + return -EOPNOTSUPP; + + skb = bt_skb_sendmsg(sk, msg, len, len, 0, 0); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + lock_sock(sk); + + if (sk->sk_state == BT_CONNECTED) + err = sco_send_frame(sk, skb); + else + err = -ENOTCONN; + + release_sock(sk); + + if (err < 0) + kfree_skb(skb); + return err; +} + +static void sco_conn_defer_accept(struct hci_conn *conn, u16 setting) +{ + struct hci_dev *hdev = conn->hdev; + + BT_DBG("conn %p", conn); + + conn->state = BT_CONFIG; + + if (!lmp_esco_capable(hdev)) { + struct hci_cp_accept_conn_req cp; + + bacpy(&cp.bdaddr, &conn->dst); + cp.role = 0x00; /* Ignored */ + + hci_send_cmd(hdev, HCI_OP_ACCEPT_CONN_REQ, sizeof(cp), &cp); + } else { + struct hci_cp_accept_sync_conn_req cp; + + bacpy(&cp.bdaddr, &conn->dst); + cp.pkt_type = cpu_to_le16(conn->pkt_type); + + cp.tx_bandwidth = cpu_to_le32(0x00001f40); + cp.rx_bandwidth = cpu_to_le32(0x00001f40); + cp.content_format = cpu_to_le16(setting); + + switch (setting & SCO_AIRMODE_MASK) { + case SCO_AIRMODE_TRANSP: + if (conn->pkt_type & ESCO_2EV3) + cp.max_latency = cpu_to_le16(0x0008); + else + cp.max_latency = cpu_to_le16(0x000D); + cp.retrans_effort = 0x02; + break; + case SCO_AIRMODE_CVSD: + cp.max_latency = cpu_to_le16(0xffff); + cp.retrans_effort = 0xff; + break; + default: + /* use CVSD settings as fallback */ + cp.max_latency = cpu_to_le16(0xffff); + cp.retrans_effort = 0xff; + break; + } + + hci_send_cmd(hdev, HCI_OP_ACCEPT_SYNC_CONN_REQ, + sizeof(cp), &cp); + } +} + +static int sco_sock_recvmsg(struct socket *sock, struct msghdr *msg, + size_t len, int flags) +{ + struct sock *sk = sock->sk; + struct sco_pinfo *pi = sco_pi(sk); + + lock_sock(sk); + + if (sk->sk_state == BT_CONNECT2 && + test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) { + sco_conn_defer_accept(pi->conn->hcon, pi->setting); + sk->sk_state = BT_CONFIG; + + release_sock(sk); + return 0; + } + + release_sock(sk); + + return bt_sock_recvmsg(sock, msg, len, flags); +} + +static int sco_sock_setsockopt(struct socket *sock, int level, int optname, + sockptr_t optval, unsigned int optlen) +{ + struct sock *sk = sock->sk; + int len, err = 0; + struct bt_voice voice; + u32 opt; + struct bt_codecs *codecs; + struct hci_dev *hdev; + __u8 buffer[255]; + + BT_DBG("sk %p", sk); + + lock_sock(sk); + + switch (optname) { + + case BT_DEFER_SETUP: + if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) { + err = -EINVAL; + break; + } + + if (copy_from_sockptr(&opt, optval, sizeof(u32))) { + err = -EFAULT; + break; + } + + if (opt) + set_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags); + else + clear_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags); + break; + + case BT_VOICE: + if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND && + sk->sk_state != BT_CONNECT2) { + err = -EINVAL; + break; + } + + voice.setting = sco_pi(sk)->setting; + + len = min_t(unsigned int, sizeof(voice), optlen); + if (copy_from_sockptr(&voice, optval, len)) { + err = -EFAULT; + break; + } + + /* Explicitly check for these values */ + if (voice.setting != BT_VOICE_TRANSPARENT && + voice.setting != BT_VOICE_CVSD_16BIT) { + err = -EINVAL; + break; + } + + sco_pi(sk)->setting = voice.setting; + hdev = hci_get_route(&sco_pi(sk)->dst, &sco_pi(sk)->src, + BDADDR_BREDR); + if (!hdev) { + err = -EBADFD; + break; + } + if (enhanced_sync_conn_capable(hdev) && + voice.setting == BT_VOICE_TRANSPARENT) + sco_pi(sk)->codec.id = BT_CODEC_TRANSPARENT; + hci_dev_put(hdev); + break; + + case BT_PKT_STATUS: + if (copy_from_sockptr(&opt, optval, sizeof(u32))) { + err = -EFAULT; + break; + } + + if (opt) + sco_pi(sk)->cmsg_mask |= SCO_CMSG_PKT_STATUS; + else + sco_pi(sk)->cmsg_mask &= SCO_CMSG_PKT_STATUS; + break; + + case BT_CODEC: + if (sk->sk_state != BT_OPEN && sk->sk_state != BT_BOUND && + sk->sk_state != BT_CONNECT2) { + err = -EINVAL; + break; + } + + hdev = hci_get_route(&sco_pi(sk)->dst, &sco_pi(sk)->src, + BDADDR_BREDR); + if (!hdev) { + err = -EBADFD; + break; + } + + if (!hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED)) { + hci_dev_put(hdev); + err = -EOPNOTSUPP; + break; + } + + if (!hdev->get_data_path_id) { + hci_dev_put(hdev); + err = -EOPNOTSUPP; + break; + } + + if (optlen < sizeof(struct bt_codecs) || + optlen > sizeof(buffer)) { + hci_dev_put(hdev); + err = -EINVAL; + break; + } + + if (copy_from_sockptr(buffer, optval, optlen)) { + hci_dev_put(hdev); + err = -EFAULT; + break; + } + + codecs = (void *)buffer; + + if (codecs->num_codecs > 1) { + hci_dev_put(hdev); + err = -EINVAL; + break; + } + + sco_pi(sk)->codec = codecs->codecs[0]; + hci_dev_put(hdev); + break; + + default: + err = -ENOPROTOOPT; + break; + } + + release_sock(sk); + return err; +} + +static int sco_sock_getsockopt_old(struct socket *sock, int optname, + char __user *optval, int __user *optlen) +{ + struct sock *sk = sock->sk; + struct sco_options opts; + struct sco_conninfo cinfo; + int len, err = 0; + + BT_DBG("sk %p", sk); + + if (get_user(len, optlen)) + return -EFAULT; + + lock_sock(sk); + + switch (optname) { + case SCO_OPTIONS: + if (sk->sk_state != BT_CONNECTED && + !(sk->sk_state == BT_CONNECT2 && + test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))) { + err = -ENOTCONN; + break; + } + + opts.mtu = sco_pi(sk)->conn->mtu; + + BT_DBG("mtu %u", opts.mtu); + + len = min_t(unsigned int, len, sizeof(opts)); + if (copy_to_user(optval, (char *)&opts, len)) + err = -EFAULT; + + break; + + case SCO_CONNINFO: + if (sk->sk_state != BT_CONNECTED && + !(sk->sk_state == BT_CONNECT2 && + test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags))) { + err = -ENOTCONN; + break; + } + + memset(&cinfo, 0, sizeof(cinfo)); + cinfo.hci_handle = sco_pi(sk)->conn->hcon->handle; + memcpy(cinfo.dev_class, sco_pi(sk)->conn->hcon->dev_class, 3); + + len = min_t(unsigned int, len, sizeof(cinfo)); + if (copy_to_user(optval, (char *)&cinfo, len)) + err = -EFAULT; + + break; + + default: + err = -ENOPROTOOPT; + break; + } + + release_sock(sk); + return err; +} + +static int sco_sock_getsockopt(struct socket *sock, int level, int optname, + char __user *optval, int __user *optlen) +{ + struct sock *sk = sock->sk; + int len, err = 0; + struct bt_voice voice; + u32 phys; + int pkt_status; + int buf_len; + struct codec_list *c; + u8 num_codecs, i, __user *ptr; + struct hci_dev *hdev; + struct hci_codec_caps *caps; + struct bt_codec codec; + + BT_DBG("sk %p", sk); + + if (level == SOL_SCO) + return sco_sock_getsockopt_old(sock, optname, optval, optlen); + + if (get_user(len, optlen)) + return -EFAULT; + + lock_sock(sk); + + switch (optname) { + + case BT_DEFER_SETUP: + if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) { + err = -EINVAL; + break; + } + + if (put_user(test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags), + (u32 __user *)optval)) + err = -EFAULT; + + break; + + case BT_VOICE: + voice.setting = sco_pi(sk)->setting; + + len = min_t(unsigned int, len, sizeof(voice)); + if (copy_to_user(optval, (char *)&voice, len)) + err = -EFAULT; + + break; + + case BT_PHY: + if (sk->sk_state != BT_CONNECTED) { + err = -ENOTCONN; + break; + } + + phys = hci_conn_get_phy(sco_pi(sk)->conn->hcon); + + if (put_user(phys, (u32 __user *) optval)) + err = -EFAULT; + break; + + case BT_PKT_STATUS: + pkt_status = (sco_pi(sk)->cmsg_mask & SCO_CMSG_PKT_STATUS); + + if (put_user(pkt_status, (int __user *)optval)) + err = -EFAULT; + break; + + case BT_SNDMTU: + case BT_RCVMTU: + if (sk->sk_state != BT_CONNECTED) { + err = -ENOTCONN; + break; + } + + if (put_user(sco_pi(sk)->conn->mtu, (u32 __user *)optval)) + err = -EFAULT; + break; + + case BT_CODEC: + num_codecs = 0; + buf_len = 0; + + hdev = hci_get_route(&sco_pi(sk)->dst, &sco_pi(sk)->src, BDADDR_BREDR); + if (!hdev) { + err = -EBADFD; + break; + } + + if (!hci_dev_test_flag(hdev, HCI_OFFLOAD_CODECS_ENABLED)) { + hci_dev_put(hdev); + err = -EOPNOTSUPP; + break; + } + + if (!hdev->get_data_path_id) { + hci_dev_put(hdev); + err = -EOPNOTSUPP; + break; + } + + release_sock(sk); + + /* find total buffer size required to copy codec + caps */ + hci_dev_lock(hdev); + list_for_each_entry(c, &hdev->local_codecs, list) { + if (c->transport != HCI_TRANSPORT_SCO_ESCO) + continue; + num_codecs++; + for (i = 0, caps = c->caps; i < c->num_caps; i++) { + buf_len += 1 + caps->len; + caps = (void *)&caps->data[caps->len]; + } + buf_len += sizeof(struct bt_codec); + } + hci_dev_unlock(hdev); + + buf_len += sizeof(struct bt_codecs); + if (buf_len > len) { + hci_dev_put(hdev); + return -ENOBUFS; + } + ptr = optval; + + if (put_user(num_codecs, ptr)) { + hci_dev_put(hdev); + return -EFAULT; + } + ptr += sizeof(num_codecs); + + /* Iterate all the codecs supported over SCO and populate + * codec data + */ + hci_dev_lock(hdev); + list_for_each_entry(c, &hdev->local_codecs, list) { + if (c->transport != HCI_TRANSPORT_SCO_ESCO) + continue; + + codec.id = c->id; + codec.cid = c->cid; + codec.vid = c->vid; + err = hdev->get_data_path_id(hdev, &codec.data_path); + if (err < 0) + break; + codec.num_caps = c->num_caps; + if (copy_to_user(ptr, &codec, sizeof(codec))) { + err = -EFAULT; + break; + } + ptr += sizeof(codec); + + /* find codec capabilities data length */ + len = 0; + for (i = 0, caps = c->caps; i < c->num_caps; i++) { + len += 1 + caps->len; + caps = (void *)&caps->data[caps->len]; + } + + /* copy codec capabilities data */ + if (len && copy_to_user(ptr, c->caps, len)) { + err = -EFAULT; + break; + } + ptr += len; + } + + hci_dev_unlock(hdev); + hci_dev_put(hdev); + + lock_sock(sk); + + if (!err && put_user(buf_len, optlen)) + err = -EFAULT; + + break; + + default: + err = -ENOPROTOOPT; + break; + } + + release_sock(sk); + return err; +} + +static int sco_sock_shutdown(struct socket *sock, int how) +{ + struct sock *sk = sock->sk; + int err = 0; + + BT_DBG("sock %p, sk %p", sock, sk); + + if (!sk) + return 0; + + sock_hold(sk); + lock_sock(sk); + + if (!sk->sk_shutdown) { + sk->sk_shutdown = SHUTDOWN_MASK; + sco_sock_clear_timer(sk); + __sco_sock_close(sk); + + if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime && + !(current->flags & PF_EXITING)) + err = bt_sock_wait_state(sk, BT_CLOSED, + sk->sk_lingertime); + } + + release_sock(sk); + sock_put(sk); + + return err; +} + +static int sco_sock_release(struct socket *sock) +{ + struct sock *sk = sock->sk; + int err = 0; + + BT_DBG("sock %p, sk %p", sock, sk); + + if (!sk) + return 0; + + sco_sock_close(sk); + + if (sock_flag(sk, SOCK_LINGER) && READ_ONCE(sk->sk_lingertime) && + !(current->flags & PF_EXITING)) { + lock_sock(sk); + err = bt_sock_wait_state(sk, BT_CLOSED, sk->sk_lingertime); + release_sock(sk); + } + + sock_orphan(sk); + sco_sock_kill(sk); + return err; +} + +static void sco_conn_ready(struct sco_conn *conn) +{ + struct sock *parent; + struct sock *sk = conn->sk; + + BT_DBG("conn %p", conn); + + if (sk) { + lock_sock(sk); + sco_sock_clear_timer(sk); + sk->sk_state = BT_CONNECTED; + sk->sk_state_change(sk); + release_sock(sk); + } else { + sco_conn_lock(conn); + + if (!conn->hcon) { + sco_conn_unlock(conn); + return; + } + + parent = sco_get_sock_listen(&conn->hcon->src); + if (!parent) { + sco_conn_unlock(conn); + return; + } + + lock_sock(parent); + + sk = sco_sock_alloc(sock_net(parent), NULL, + BTPROTO_SCO, GFP_ATOMIC, 0); + if (!sk) { + release_sock(parent); + sco_conn_unlock(conn); + return; + } + + sco_sock_init(sk, parent); + + bacpy(&sco_pi(sk)->src, &conn->hcon->src); + bacpy(&sco_pi(sk)->dst, &conn->hcon->dst); + + hci_conn_hold(conn->hcon); + __sco_chan_add(conn, sk, parent); + + if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags)) + sk->sk_state = BT_CONNECT2; + else + sk->sk_state = BT_CONNECTED; + + /* Wake up parent */ + parent->sk_data_ready(parent); + + release_sock(parent); + + sco_conn_unlock(conn); + } +} + +/* ----- SCO interface with lower layer (HCI) ----- */ +int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags) +{ + struct sock *sk; + int lm = 0; + + BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr); + + /* Find listening sockets */ + read_lock(&sco_sk_list.lock); + sk_for_each(sk, &sco_sk_list.head) { + if (sk->sk_state != BT_LISTEN) + continue; + + if (!bacmp(&sco_pi(sk)->src, &hdev->bdaddr) || + !bacmp(&sco_pi(sk)->src, BDADDR_ANY)) { + lm |= HCI_LM_ACCEPT; + + if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) + *flags |= HCI_PROTO_DEFER; + break; + } + } + read_unlock(&sco_sk_list.lock); + + return lm; +} + +static void sco_connect_cfm(struct hci_conn *hcon, __u8 status) +{ + if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK) + return; + + BT_DBG("hcon %p bdaddr %pMR status %u", hcon, &hcon->dst, status); + + if (!status) { + struct sco_conn *conn; + + conn = sco_conn_add(hcon); + if (conn) + sco_conn_ready(conn); + } else + sco_conn_del(hcon, bt_to_errno(status)); +} + +static void sco_disconn_cfm(struct hci_conn *hcon, __u8 reason) +{ + if (hcon->type != SCO_LINK && hcon->type != ESCO_LINK) + return; + + BT_DBG("hcon %p reason %d", hcon, reason); + + sco_conn_del(hcon, bt_to_errno(reason)); +} + +void sco_recv_scodata(struct hci_conn *hcon, struct sk_buff *skb) +{ + struct sco_conn *conn = hcon->sco_data; + + if (!conn) + goto drop; + + BT_DBG("conn %p len %u", conn, skb->len); + + if (skb->len) { + sco_recv_frame(conn, skb); + return; + } + +drop: + kfree_skb(skb); +} + +static struct hci_cb sco_cb = { + .name = "SCO", + .connect_cfm = sco_connect_cfm, + .disconn_cfm = sco_disconn_cfm, +}; + +static int sco_debugfs_show(struct seq_file *f, void *p) +{ + struct sock *sk; + + read_lock(&sco_sk_list.lock); + + sk_for_each(sk, &sco_sk_list.head) { + seq_printf(f, "%pMR %pMR %d\n", &sco_pi(sk)->src, + &sco_pi(sk)->dst, sk->sk_state); + } + + read_unlock(&sco_sk_list.lock); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(sco_debugfs); + +static struct dentry *sco_debugfs; + +static const struct proto_ops sco_sock_ops = { + .family = PF_BLUETOOTH, + .owner = THIS_MODULE, + .release = sco_sock_release, + .bind = sco_sock_bind, + .connect = sco_sock_connect, + .listen = sco_sock_listen, + .accept = sco_sock_accept, + .getname = sco_sock_getname, + .sendmsg = sco_sock_sendmsg, + .recvmsg = sco_sock_recvmsg, + .poll = bt_sock_poll, + .ioctl = bt_sock_ioctl, + .gettstamp = sock_gettstamp, + .mmap = sock_no_mmap, + .socketpair = sock_no_socketpair, + .shutdown = sco_sock_shutdown, + .setsockopt = sco_sock_setsockopt, + .getsockopt = sco_sock_getsockopt +}; + +static const struct net_proto_family sco_sock_family_ops = { + .family = PF_BLUETOOTH, + .owner = THIS_MODULE, + .create = sco_sock_create, +}; + +int __init sco_init(void) +{ + int err; + + BUILD_BUG_ON(sizeof(struct sockaddr_sco) > sizeof(struct sockaddr)); + + err = proto_register(&sco_proto, 0); + if (err < 0) + return err; + + err = bt_sock_register(BTPROTO_SCO, &sco_sock_family_ops); + if (err < 0) { + BT_ERR("SCO socket registration failed"); + goto error; + } + + err = bt_procfs_init(&init_net, "sco", &sco_sk_list, NULL); + if (err < 0) { + BT_ERR("Failed to create SCO proc file"); + bt_sock_unregister(BTPROTO_SCO); + goto error; + } + + BT_INFO("SCO socket layer initialized"); + + hci_register_cb(&sco_cb); + + if (IS_ERR_OR_NULL(bt_debugfs)) + return 0; + + sco_debugfs = debugfs_create_file("sco", 0444, bt_debugfs, + NULL, &sco_debugfs_fops); + + return 0; + +error: + proto_unregister(&sco_proto); + return err; +} + +void sco_exit(void) +{ + bt_procfs_cleanup(&init_net, "sco"); + + debugfs_remove(sco_debugfs); + + hci_unregister_cb(&sco_cb); + + bt_sock_unregister(BTPROTO_SCO); + + proto_unregister(&sco_proto); +} + +module_param(disable_esco, bool, 0644); +MODULE_PARM_DESC(disable_esco, "Disable eSCO connection creation"); diff --git a/net/bluetooth/selftest.c b/net/bluetooth/selftest.c new file mode 100644 index 000000000..f49604d44 --- /dev/null +++ b/net/bluetooth/selftest.c @@ -0,0 +1,309 @@ +/* + BlueZ - Bluetooth protocol stack for Linux + + Copyright (C) 2014 Intel Corporation + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License version 2 as + published by the Free Software Foundation; + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + SOFTWARE IS DISCLAIMED. +*/ + +#include <linux/debugfs.h> + +#include <net/bluetooth/bluetooth.h> +#include <net/bluetooth/hci_core.h> + +#include "ecdh_helper.h" +#include "smp.h" +#include "selftest.h" + +#if IS_ENABLED(CONFIG_BT_SELFTEST_ECDH) + +static const u8 priv_a_1[32] __initconst = { + 0xbd, 0x1a, 0x3c, 0xcd, 0xa6, 0xb8, 0x99, 0x58, + 0x99, 0xb7, 0x40, 0xeb, 0x7b, 0x60, 0xff, 0x4a, + 0x50, 0x3f, 0x10, 0xd2, 0xe3, 0xb3, 0xc9, 0x74, + 0x38, 0x5f, 0xc5, 0xa3, 0xd4, 0xf6, 0x49, 0x3f, +}; +static const u8 priv_b_1[32] __initconst = { + 0xfd, 0xc5, 0x7f, 0xf4, 0x49, 0xdd, 0x4f, 0x6b, + 0xfb, 0x7c, 0x9d, 0xf1, 0xc2, 0x9a, 0xcb, 0x59, + 0x2a, 0xe7, 0xd4, 0xee, 0xfb, 0xfc, 0x0a, 0x90, + 0x9a, 0xbb, 0xf6, 0x32, 0x3d, 0x8b, 0x18, 0x55, +}; +static const u8 pub_a_1[64] __initconst = { + 0xe6, 0x9d, 0x35, 0x0e, 0x48, 0x01, 0x03, 0xcc, + 0xdb, 0xfd, 0xf4, 0xac, 0x11, 0x91, 0xf4, 0xef, + 0xb9, 0xa5, 0xf9, 0xe9, 0xa7, 0x83, 0x2c, 0x5e, + 0x2c, 0xbe, 0x97, 0xf2, 0xd2, 0x03, 0xb0, 0x20, + + 0x8b, 0xd2, 0x89, 0x15, 0xd0, 0x8e, 0x1c, 0x74, + 0x24, 0x30, 0xed, 0x8f, 0xc2, 0x45, 0x63, 0x76, + 0x5c, 0x15, 0x52, 0x5a, 0xbf, 0x9a, 0x32, 0x63, + 0x6d, 0xeb, 0x2a, 0x65, 0x49, 0x9c, 0x80, 0xdc, +}; +static const u8 pub_b_1[64] __initconst = { + 0x90, 0xa1, 0xaa, 0x2f, 0xb2, 0x77, 0x90, 0x55, + 0x9f, 0xa6, 0x15, 0x86, 0xfd, 0x8a, 0xb5, 0x47, + 0x00, 0x4c, 0x9e, 0xf1, 0x84, 0x22, 0x59, 0x09, + 0x96, 0x1d, 0xaf, 0x1f, 0xf0, 0xf0, 0xa1, 0x1e, + + 0x4a, 0x21, 0xb1, 0x15, 0xf9, 0xaf, 0x89, 0x5f, + 0x76, 0x36, 0x8e, 0xe2, 0x30, 0x11, 0x2d, 0x47, + 0x60, 0x51, 0xb8, 0x9a, 0x3a, 0x70, 0x56, 0x73, + 0x37, 0xad, 0x9d, 0x42, 0x3e, 0xf3, 0x55, 0x4c, +}; +static const u8 dhkey_1[32] __initconst = { + 0x98, 0xa6, 0xbf, 0x73, 0xf3, 0x34, 0x8d, 0x86, + 0xf1, 0x66, 0xf8, 0xb4, 0x13, 0x6b, 0x79, 0x99, + 0x9b, 0x7d, 0x39, 0x0a, 0xa6, 0x10, 0x10, 0x34, + 0x05, 0xad, 0xc8, 0x57, 0xa3, 0x34, 0x02, 0xec, +}; + +static const u8 priv_a_2[32] __initconst = { + 0x63, 0x76, 0x45, 0xd0, 0xf7, 0x73, 0xac, 0xb7, + 0xff, 0xdd, 0x03, 0x72, 0xb9, 0x72, 0x85, 0xb4, + 0x41, 0xb6, 0x5d, 0x0c, 0x5d, 0x54, 0x84, 0x60, + 0x1a, 0xa3, 0x9a, 0x3c, 0x69, 0x16, 0xa5, 0x06, +}; +static const u8 priv_b_2[32] __initconst = { + 0xba, 0x30, 0x55, 0x50, 0x19, 0xa2, 0xca, 0xa3, + 0xa5, 0x29, 0x08, 0xc6, 0xb5, 0x03, 0x88, 0x7e, + 0x03, 0x2b, 0x50, 0x73, 0xd4, 0x2e, 0x50, 0x97, + 0x64, 0xcd, 0x72, 0x0d, 0x67, 0xa0, 0x9a, 0x52, +}; +static const u8 pub_a_2[64] __initconst = { + 0xdd, 0x78, 0x5c, 0x74, 0x03, 0x9b, 0x7e, 0x98, + 0xcb, 0x94, 0x87, 0x4a, 0xad, 0xfa, 0xf8, 0xd5, + 0x43, 0x3e, 0x5c, 0xaf, 0xea, 0xb5, 0x4c, 0xf4, + 0x9e, 0x80, 0x79, 0x57, 0x7b, 0xa4, 0x31, 0x2c, + + 0x4f, 0x5d, 0x71, 0x43, 0x77, 0x43, 0xf8, 0xea, + 0xd4, 0x3e, 0xbd, 0x17, 0x91, 0x10, 0x21, 0xd0, + 0x1f, 0x87, 0x43, 0x8e, 0x40, 0xe2, 0x52, 0xcd, + 0xbe, 0xdf, 0x98, 0x38, 0x18, 0x12, 0x95, 0x91, +}; +static const u8 pub_b_2[64] __initconst = { + 0xcc, 0x00, 0x65, 0xe1, 0xf5, 0x6c, 0x0d, 0xcf, + 0xec, 0x96, 0x47, 0x20, 0x66, 0xc9, 0xdb, 0x84, + 0x81, 0x75, 0xa8, 0x4d, 0xc0, 0xdf, 0xc7, 0x9d, + 0x1b, 0x3f, 0x3d, 0xf2, 0x3f, 0xe4, 0x65, 0xf4, + + 0x79, 0xb2, 0xec, 0xd8, 0xca, 0x55, 0xa1, 0xa8, + 0x43, 0x4d, 0x6b, 0xca, 0x10, 0xb0, 0xc2, 0x01, + 0xc2, 0x33, 0x4e, 0x16, 0x24, 0xc4, 0xef, 0xee, + 0x99, 0xd8, 0xbb, 0xbc, 0x48, 0xd0, 0x01, 0x02, +}; +static const u8 dhkey_2[32] __initconst = { + 0x69, 0xeb, 0x21, 0x32, 0xf2, 0xc6, 0x05, 0x41, + 0x60, 0x19, 0xcd, 0x5e, 0x94, 0xe1, 0xe6, 0x5f, + 0x33, 0x07, 0xe3, 0x38, 0x4b, 0x68, 0xe5, 0x62, + 0x3f, 0x88, 0x6d, 0x2f, 0x3a, 0x84, 0x85, 0xab, +}; + +static const u8 priv_a_3[32] __initconst = { + 0xbd, 0x1a, 0x3c, 0xcd, 0xa6, 0xb8, 0x99, 0x58, + 0x99, 0xb7, 0x40, 0xeb, 0x7b, 0x60, 0xff, 0x4a, + 0x50, 0x3f, 0x10, 0xd2, 0xe3, 0xb3, 0xc9, 0x74, + 0x38, 0x5f, 0xc5, 0xa3, 0xd4, 0xf6, 0x49, 0x3f, +}; +static const u8 pub_a_3[64] __initconst = { + 0xe6, 0x9d, 0x35, 0x0e, 0x48, 0x01, 0x03, 0xcc, + 0xdb, 0xfd, 0xf4, 0xac, 0x11, 0x91, 0xf4, 0xef, + 0xb9, 0xa5, 0xf9, 0xe9, 0xa7, 0x83, 0x2c, 0x5e, + 0x2c, 0xbe, 0x97, 0xf2, 0xd2, 0x03, 0xb0, 0x20, + + 0x8b, 0xd2, 0x89, 0x15, 0xd0, 0x8e, 0x1c, 0x74, + 0x24, 0x30, 0xed, 0x8f, 0xc2, 0x45, 0x63, 0x76, + 0x5c, 0x15, 0x52, 0x5a, 0xbf, 0x9a, 0x32, 0x63, + 0x6d, 0xeb, 0x2a, 0x65, 0x49, 0x9c, 0x80, 0xdc, +}; +static const u8 dhkey_3[32] __initconst = { + 0x2d, 0xab, 0x00, 0x48, 0xcb, 0xb3, 0x7b, 0xda, + 0x55, 0x7b, 0x8b, 0x72, 0xa8, 0x57, 0x87, 0xc3, + 0x87, 0x27, 0x99, 0x32, 0xfc, 0x79, 0x5f, 0xae, + 0x7c, 0x1c, 0xf9, 0x49, 0xe6, 0xd7, 0xaa, 0x70, +}; + +static int __init test_ecdh_sample(struct crypto_kpp *tfm, const u8 priv_a[32], + const u8 priv_b[32], const u8 pub_a[64], + const u8 pub_b[64], const u8 dhkey[32]) +{ + u8 *tmp, *dhkey_a, *dhkey_b; + int ret; + + tmp = kmalloc(64, GFP_KERNEL); + if (!tmp) + return -EINVAL; + + dhkey_a = &tmp[0]; + dhkey_b = &tmp[32]; + + ret = set_ecdh_privkey(tfm, priv_a); + if (ret) + goto out; + + ret = compute_ecdh_secret(tfm, pub_b, dhkey_a); + if (ret) + goto out; + + if (memcmp(dhkey_a, dhkey, 32)) { + ret = -EINVAL; + goto out; + } + + ret = set_ecdh_privkey(tfm, priv_b); + if (ret) + goto out; + + ret = compute_ecdh_secret(tfm, pub_a, dhkey_b); + if (ret) + goto out; + + if (memcmp(dhkey_b, dhkey, 32)) + ret = -EINVAL; + /* fall through*/ +out: + kfree(tmp); + return ret; +} + +static char test_ecdh_buffer[32]; + +static ssize_t test_ecdh_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + return simple_read_from_buffer(user_buf, count, ppos, test_ecdh_buffer, + strlen(test_ecdh_buffer)); +} + +static const struct file_operations test_ecdh_fops = { + .open = simple_open, + .read = test_ecdh_read, + .llseek = default_llseek, +}; + +static int __init test_ecdh(void) +{ + struct crypto_kpp *tfm; + ktime_t calltime, delta, rettime; + unsigned long long duration = 0; + int err; + + calltime = ktime_get(); + + tfm = crypto_alloc_kpp("ecdh-nist-p256", 0, 0); + if (IS_ERR(tfm)) { + BT_ERR("Unable to create ECDH crypto context"); + err = PTR_ERR(tfm); + goto done; + } + + err = test_ecdh_sample(tfm, priv_a_1, priv_b_1, pub_a_1, pub_b_1, + dhkey_1); + if (err) { + BT_ERR("ECDH sample 1 failed"); + goto done; + } + + err = test_ecdh_sample(tfm, priv_a_2, priv_b_2, pub_a_2, pub_b_2, + dhkey_2); + if (err) { + BT_ERR("ECDH sample 2 failed"); + goto done; + } + + err = test_ecdh_sample(tfm, priv_a_3, priv_a_3, pub_a_3, pub_a_3, + dhkey_3); + if (err) { + BT_ERR("ECDH sample 3 failed"); + goto done; + } + + crypto_free_kpp(tfm); + + rettime = ktime_get(); + delta = ktime_sub(rettime, calltime); + duration = (unsigned long long) ktime_to_ns(delta) >> 10; + + BT_INFO("ECDH test passed in %llu usecs", duration); + +done: + if (!err) + snprintf(test_ecdh_buffer, sizeof(test_ecdh_buffer), + "PASS (%llu usecs)\n", duration); + else + snprintf(test_ecdh_buffer, sizeof(test_ecdh_buffer), "FAIL\n"); + + debugfs_create_file("selftest_ecdh", 0444, bt_debugfs, NULL, + &test_ecdh_fops); + + return err; +} + +#else + +static inline int test_ecdh(void) +{ + return 0; +} + +#endif + +static int __init run_selftest(void) +{ + int err; + + BT_INFO("Starting self testing"); + + err = test_ecdh(); + if (err) + goto done; + + err = bt_selftest_smp(); + +done: + BT_INFO("Finished self testing"); + + return err; +} + +#if IS_MODULE(CONFIG_BT) + +/* This is run when CONFIG_BT_SELFTEST=y and CONFIG_BT=m and is just a + * wrapper to allow running this at module init. + * + * If CONFIG_BT_SELFTEST=n, then this code is not compiled at all. + */ +int __init bt_selftest(void) +{ + return run_selftest(); +} + +#else + +/* This is run when CONFIG_BT_SELFTEST=y and CONFIG_BT=y and is run + * via late_initcall() as last item in the initialization sequence. + * + * If CONFIG_BT_SELFTEST=n, then this code is not compiled at all. + */ +static int __init bt_selftest_init(void) +{ + return run_selftest(); +} +late_initcall(bt_selftest_init); + +#endif diff --git a/net/bluetooth/selftest.h b/net/bluetooth/selftest.h new file mode 100644 index 000000000..2aa0a346a --- /dev/null +++ b/net/bluetooth/selftest.h @@ -0,0 +1,45 @@ +/* + BlueZ - Bluetooth protocol stack for Linux + Copyright (C) 2014 Intel Corporation + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License version 2 as + published by the Free Software Foundation; + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + SOFTWARE IS DISCLAIMED. +*/ + +#if IS_ENABLED(CONFIG_BT_SELFTEST) && IS_MODULE(CONFIG_BT) + +/* When CONFIG_BT_SELFTEST=y and the CONFIG_BT=m, then the self testing + * is run at module loading time. + */ +int bt_selftest(void); + +#else + +/* When CONFIG_BT_SELFTEST=y and CONFIG_BT=y, then the self testing + * is run via late_initcall() to make sure that subsys_initcall() of + * the Bluetooth subsystem and device_initcall() of the Crypto subsystem + * do not clash. + * + * When CONFIG_BT_SELFTEST=n, then this turns into an empty call that + * has no impact. + */ +static inline int bt_selftest(void) +{ + return 0; +} + +#endif diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c new file mode 100644 index 000000000..ecb005bce --- /dev/null +++ b/net/bluetooth/smp.c @@ -0,0 +1,3848 @@ +/* + BlueZ - Bluetooth protocol stack for Linux + Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies). + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License version 2 as + published by the Free Software Foundation; + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + SOFTWARE IS DISCLAIMED. +*/ + +#include <linux/debugfs.h> +#include <linux/scatterlist.h> +#include <linux/crypto.h> +#include <crypto/aes.h> +#include <crypto/algapi.h> +#include <crypto/hash.h> +#include <crypto/kpp.h> + +#include <net/bluetooth/bluetooth.h> +#include <net/bluetooth/hci_core.h> +#include <net/bluetooth/l2cap.h> +#include <net/bluetooth/mgmt.h> + +#include "ecdh_helper.h" +#include "smp.h" + +#define SMP_DEV(hdev) \ + ((struct smp_dev *)((struct l2cap_chan *)((hdev)->smp_data))->data) + +/* Low-level debug macros to be used for stuff that we don't want + * accidentally in dmesg, i.e. the values of the various crypto keys + * and the inputs & outputs of crypto functions. + */ +#ifdef DEBUG +#define SMP_DBG(fmt, ...) printk(KERN_DEBUG "%s: " fmt, __func__, \ + ##__VA_ARGS__) +#else +#define SMP_DBG(fmt, ...) no_printk(KERN_DEBUG "%s: " fmt, __func__, \ + ##__VA_ARGS__) +#endif + +#define SMP_ALLOW_CMD(smp, code) set_bit(code, &smp->allow_cmd) + +/* Keys which are not distributed with Secure Connections */ +#define SMP_SC_NO_DIST (SMP_DIST_ENC_KEY | SMP_DIST_LINK_KEY) + +#define SMP_TIMEOUT msecs_to_jiffies(30000) + +#define AUTH_REQ_MASK(dev) (hci_dev_test_flag(dev, HCI_SC_ENABLED) ? \ + 0x3f : 0x07) +#define KEY_DIST_MASK 0x07 + +/* Maximum message length that can be passed to aes_cmac */ +#define CMAC_MSG_MAX 80 + +enum { + SMP_FLAG_TK_VALID, + SMP_FLAG_CFM_PENDING, + SMP_FLAG_MITM_AUTH, + SMP_FLAG_COMPLETE, + SMP_FLAG_INITIATOR, + SMP_FLAG_SC, + SMP_FLAG_REMOTE_PK, + SMP_FLAG_DEBUG_KEY, + SMP_FLAG_WAIT_USER, + SMP_FLAG_DHKEY_PENDING, + SMP_FLAG_REMOTE_OOB, + SMP_FLAG_LOCAL_OOB, + SMP_FLAG_CT2, +}; + +struct smp_dev { + /* Secure Connections OOB data */ + bool local_oob; + u8 local_pk[64]; + u8 local_rand[16]; + bool debug_key; + + struct crypto_shash *tfm_cmac; + struct crypto_kpp *tfm_ecdh; +}; + +struct smp_chan { + struct l2cap_conn *conn; + struct delayed_work security_timer; + unsigned long allow_cmd; /* Bitmask of allowed commands */ + + u8 preq[7]; /* SMP Pairing Request */ + u8 prsp[7]; /* SMP Pairing Response */ + u8 prnd[16]; /* SMP Pairing Random (local) */ + u8 rrnd[16]; /* SMP Pairing Random (remote) */ + u8 pcnf[16]; /* SMP Pairing Confirm */ + u8 tk[16]; /* SMP Temporary Key */ + u8 rr[16]; /* Remote OOB ra/rb value */ + u8 lr[16]; /* Local OOB ra/rb value */ + u8 enc_key_size; + u8 remote_key_dist; + bdaddr_t id_addr; + u8 id_addr_type; + u8 irk[16]; + struct smp_csrk *csrk; + struct smp_csrk *responder_csrk; + struct smp_ltk *ltk; + struct smp_ltk *responder_ltk; + struct smp_irk *remote_irk; + u8 *link_key; + unsigned long flags; + u8 method; + u8 passkey_round; + + /* Secure Connections variables */ + u8 local_pk[64]; + u8 remote_pk[64]; + u8 dhkey[32]; + u8 mackey[16]; + + struct crypto_shash *tfm_cmac; + struct crypto_kpp *tfm_ecdh; +}; + +/* These debug key values are defined in the SMP section of the core + * specification. debug_pk is the public debug key and debug_sk the + * private debug key. + */ +static const u8 debug_pk[64] = { + 0xe6, 0x9d, 0x35, 0x0e, 0x48, 0x01, 0x03, 0xcc, + 0xdb, 0xfd, 0xf4, 0xac, 0x11, 0x91, 0xf4, 0xef, + 0xb9, 0xa5, 0xf9, 0xe9, 0xa7, 0x83, 0x2c, 0x5e, + 0x2c, 0xbe, 0x97, 0xf2, 0xd2, 0x03, 0xb0, 0x20, + + 0x8b, 0xd2, 0x89, 0x15, 0xd0, 0x8e, 0x1c, 0x74, + 0x24, 0x30, 0xed, 0x8f, 0xc2, 0x45, 0x63, 0x76, + 0x5c, 0x15, 0x52, 0x5a, 0xbf, 0x9a, 0x32, 0x63, + 0x6d, 0xeb, 0x2a, 0x65, 0x49, 0x9c, 0x80, 0xdc, +}; + +static const u8 debug_sk[32] = { + 0xbd, 0x1a, 0x3c, 0xcd, 0xa6, 0xb8, 0x99, 0x58, + 0x99, 0xb7, 0x40, 0xeb, 0x7b, 0x60, 0xff, 0x4a, + 0x50, 0x3f, 0x10, 0xd2, 0xe3, 0xb3, 0xc9, 0x74, + 0x38, 0x5f, 0xc5, 0xa3, 0xd4, 0xf6, 0x49, 0x3f, +}; + +static inline void swap_buf(const u8 *src, u8 *dst, size_t len) +{ + size_t i; + + for (i = 0; i < len; i++) + dst[len - 1 - i] = src[i]; +} + +/* The following functions map to the LE SC SMP crypto functions + * AES-CMAC, f4, f5, f6, g2 and h6. + */ + +static int aes_cmac(struct crypto_shash *tfm, const u8 k[16], const u8 *m, + size_t len, u8 mac[16]) +{ + uint8_t tmp[16], mac_msb[16], msg_msb[CMAC_MSG_MAX]; + int err; + + if (len > CMAC_MSG_MAX) + return -EFBIG; + + if (!tfm) { + BT_ERR("tfm %p", tfm); + return -EINVAL; + } + + /* Swap key and message from LSB to MSB */ + swap_buf(k, tmp, 16); + swap_buf(m, msg_msb, len); + + SMP_DBG("msg (len %zu) %*phN", len, (int) len, m); + SMP_DBG("key %16phN", k); + + err = crypto_shash_setkey(tfm, tmp, 16); + if (err) { + BT_ERR("cipher setkey failed: %d", err); + return err; + } + + err = crypto_shash_tfm_digest(tfm, msg_msb, len, mac_msb); + if (err) { + BT_ERR("Hash computation error %d", err); + return err; + } + + swap_buf(mac_msb, mac, 16); + + SMP_DBG("mac %16phN", mac); + + return 0; +} + +static int smp_f4(struct crypto_shash *tfm_cmac, const u8 u[32], + const u8 v[32], const u8 x[16], u8 z, u8 res[16]) +{ + u8 m[65]; + int err; + + SMP_DBG("u %32phN", u); + SMP_DBG("v %32phN", v); + SMP_DBG("x %16phN z %02x", x, z); + + m[0] = z; + memcpy(m + 1, v, 32); + memcpy(m + 33, u, 32); + + err = aes_cmac(tfm_cmac, x, m, sizeof(m), res); + if (err) + return err; + + SMP_DBG("res %16phN", res); + + return err; +} + +static int smp_f5(struct crypto_shash *tfm_cmac, const u8 w[32], + const u8 n1[16], const u8 n2[16], const u8 a1[7], + const u8 a2[7], u8 mackey[16], u8 ltk[16]) +{ + /* The btle, salt and length "magic" values are as defined in + * the SMP section of the Bluetooth core specification. In ASCII + * the btle value ends up being 'btle'. The salt is just a + * random number whereas length is the value 256 in little + * endian format. + */ + const u8 btle[4] = { 0x65, 0x6c, 0x74, 0x62 }; + const u8 salt[16] = { 0xbe, 0x83, 0x60, 0x5a, 0xdb, 0x0b, 0x37, 0x60, + 0x38, 0xa5, 0xf5, 0xaa, 0x91, 0x83, 0x88, 0x6c }; + const u8 length[2] = { 0x00, 0x01 }; + u8 m[53], t[16]; + int err; + + SMP_DBG("w %32phN", w); + SMP_DBG("n1 %16phN n2 %16phN", n1, n2); + SMP_DBG("a1 %7phN a2 %7phN", a1, a2); + + err = aes_cmac(tfm_cmac, salt, w, 32, t); + if (err) + return err; + + SMP_DBG("t %16phN", t); + + memcpy(m, length, 2); + memcpy(m + 2, a2, 7); + memcpy(m + 9, a1, 7); + memcpy(m + 16, n2, 16); + memcpy(m + 32, n1, 16); + memcpy(m + 48, btle, 4); + + m[52] = 0; /* Counter */ + + err = aes_cmac(tfm_cmac, t, m, sizeof(m), mackey); + if (err) + return err; + + SMP_DBG("mackey %16phN", mackey); + + m[52] = 1; /* Counter */ + + err = aes_cmac(tfm_cmac, t, m, sizeof(m), ltk); + if (err) + return err; + + SMP_DBG("ltk %16phN", ltk); + + return 0; +} + +static int smp_f6(struct crypto_shash *tfm_cmac, const u8 w[16], + const u8 n1[16], const u8 n2[16], const u8 r[16], + const u8 io_cap[3], const u8 a1[7], const u8 a2[7], + u8 res[16]) +{ + u8 m[65]; + int err; + + SMP_DBG("w %16phN", w); + SMP_DBG("n1 %16phN n2 %16phN", n1, n2); + SMP_DBG("r %16phN io_cap %3phN a1 %7phN a2 %7phN", r, io_cap, a1, a2); + + memcpy(m, a2, 7); + memcpy(m + 7, a1, 7); + memcpy(m + 14, io_cap, 3); + memcpy(m + 17, r, 16); + memcpy(m + 33, n2, 16); + memcpy(m + 49, n1, 16); + + err = aes_cmac(tfm_cmac, w, m, sizeof(m), res); + if (err) + return err; + + SMP_DBG("res %16phN", res); + + return err; +} + +static int smp_g2(struct crypto_shash *tfm_cmac, const u8 u[32], const u8 v[32], + const u8 x[16], const u8 y[16], u32 *val) +{ + u8 m[80], tmp[16]; + int err; + + SMP_DBG("u %32phN", u); + SMP_DBG("v %32phN", v); + SMP_DBG("x %16phN y %16phN", x, y); + + memcpy(m, y, 16); + memcpy(m + 16, v, 32); + memcpy(m + 48, u, 32); + + err = aes_cmac(tfm_cmac, x, m, sizeof(m), tmp); + if (err) + return err; + + *val = get_unaligned_le32(tmp); + *val %= 1000000; + + SMP_DBG("val %06u", *val); + + return 0; +} + +static int smp_h6(struct crypto_shash *tfm_cmac, const u8 w[16], + const u8 key_id[4], u8 res[16]) +{ + int err; + + SMP_DBG("w %16phN key_id %4phN", w, key_id); + + err = aes_cmac(tfm_cmac, w, key_id, 4, res); + if (err) + return err; + + SMP_DBG("res %16phN", res); + + return err; +} + +static int smp_h7(struct crypto_shash *tfm_cmac, const u8 w[16], + const u8 salt[16], u8 res[16]) +{ + int err; + + SMP_DBG("w %16phN salt %16phN", w, salt); + + err = aes_cmac(tfm_cmac, salt, w, 16, res); + if (err) + return err; + + SMP_DBG("res %16phN", res); + + return err; +} + +/* The following functions map to the legacy SMP crypto functions e, c1, + * s1 and ah. + */ + +static int smp_e(const u8 *k, u8 *r) +{ + struct crypto_aes_ctx ctx; + uint8_t tmp[16], data[16]; + int err; + + SMP_DBG("k %16phN r %16phN", k, r); + + /* The most significant octet of key corresponds to k[0] */ + swap_buf(k, tmp, 16); + + err = aes_expandkey(&ctx, tmp, 16); + if (err) { + BT_ERR("cipher setkey failed: %d", err); + return err; + } + + /* Most significant octet of plaintextData corresponds to data[0] */ + swap_buf(r, data, 16); + + aes_encrypt(&ctx, data, data); + + /* Most significant octet of encryptedData corresponds to data[0] */ + swap_buf(data, r, 16); + + SMP_DBG("r %16phN", r); + + memzero_explicit(&ctx, sizeof(ctx)); + return err; +} + +static int smp_c1(const u8 k[16], + const u8 r[16], const u8 preq[7], const u8 pres[7], u8 _iat, + const bdaddr_t *ia, u8 _rat, const bdaddr_t *ra, u8 res[16]) +{ + u8 p1[16], p2[16]; + int err; + + SMP_DBG("k %16phN r %16phN", k, r); + SMP_DBG("iat %u ia %6phN rat %u ra %6phN", _iat, ia, _rat, ra); + SMP_DBG("preq %7phN pres %7phN", preq, pres); + + memset(p1, 0, 16); + + /* p1 = pres || preq || _rat || _iat */ + p1[0] = _iat; + p1[1] = _rat; + memcpy(p1 + 2, preq, 7); + memcpy(p1 + 9, pres, 7); + + SMP_DBG("p1 %16phN", p1); + + /* res = r XOR p1 */ + crypto_xor_cpy(res, r, p1, sizeof(p1)); + + /* res = e(k, res) */ + err = smp_e(k, res); + if (err) { + BT_ERR("Encrypt data error"); + return err; + } + + /* p2 = padding || ia || ra */ + memcpy(p2, ra, 6); + memcpy(p2 + 6, ia, 6); + memset(p2 + 12, 0, 4); + + SMP_DBG("p2 %16phN", p2); + + /* res = res XOR p2 */ + crypto_xor(res, p2, sizeof(p2)); + + /* res = e(k, res) */ + err = smp_e(k, res); + if (err) + BT_ERR("Encrypt data error"); + + return err; +} + +static int smp_s1(const u8 k[16], + const u8 r1[16], const u8 r2[16], u8 _r[16]) +{ + int err; + + /* Just least significant octets from r1 and r2 are considered */ + memcpy(_r, r2, 8); + memcpy(_r + 8, r1, 8); + + err = smp_e(k, _r); + if (err) + BT_ERR("Encrypt data error"); + + return err; +} + +static int smp_ah(const u8 irk[16], const u8 r[3], u8 res[3]) +{ + u8 _res[16]; + int err; + + /* r' = padding || r */ + memcpy(_res, r, 3); + memset(_res + 3, 0, 13); + + err = smp_e(irk, _res); + if (err) { + BT_ERR("Encrypt error"); + return err; + } + + /* The output of the random address function ah is: + * ah(k, r) = e(k, r') mod 2^24 + * The output of the security function e is then truncated to 24 bits + * by taking the least significant 24 bits of the output of e as the + * result of ah. + */ + memcpy(res, _res, 3); + + return 0; +} + +bool smp_irk_matches(struct hci_dev *hdev, const u8 irk[16], + const bdaddr_t *bdaddr) +{ + struct l2cap_chan *chan = hdev->smp_data; + u8 hash[3]; + int err; + + if (!chan || !chan->data) + return false; + + bt_dev_dbg(hdev, "RPA %pMR IRK %*phN", bdaddr, 16, irk); + + err = smp_ah(irk, &bdaddr->b[3], hash); + if (err) + return false; + + return !crypto_memneq(bdaddr->b, hash, 3); +} + +int smp_generate_rpa(struct hci_dev *hdev, const u8 irk[16], bdaddr_t *rpa) +{ + struct l2cap_chan *chan = hdev->smp_data; + int err; + + if (!chan || !chan->data) + return -EOPNOTSUPP; + + get_random_bytes(&rpa->b[3], 3); + + rpa->b[5] &= 0x3f; /* Clear two most significant bits */ + rpa->b[5] |= 0x40; /* Set second most significant bit */ + + err = smp_ah(irk, &rpa->b[3], rpa->b); + if (err < 0) + return err; + + bt_dev_dbg(hdev, "RPA %pMR", rpa); + + return 0; +} + +int smp_generate_oob(struct hci_dev *hdev, u8 hash[16], u8 rand[16]) +{ + struct l2cap_chan *chan = hdev->smp_data; + struct smp_dev *smp; + int err; + + if (!chan || !chan->data) + return -EOPNOTSUPP; + + smp = chan->data; + + if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS)) { + bt_dev_dbg(hdev, "Using debug keys"); + err = set_ecdh_privkey(smp->tfm_ecdh, debug_sk); + if (err) + return err; + memcpy(smp->local_pk, debug_pk, 64); + smp->debug_key = true; + } else { + while (true) { + /* Generate key pair for Secure Connections */ + err = generate_ecdh_keys(smp->tfm_ecdh, smp->local_pk); + if (err) + return err; + + /* This is unlikely, but we need to check that + * we didn't accidentally generate a debug key. + */ + if (crypto_memneq(smp->local_pk, debug_pk, 64)) + break; + } + smp->debug_key = false; + } + + SMP_DBG("OOB Public Key X: %32phN", smp->local_pk); + SMP_DBG("OOB Public Key Y: %32phN", smp->local_pk + 32); + + get_random_bytes(smp->local_rand, 16); + + err = smp_f4(smp->tfm_cmac, smp->local_pk, smp->local_pk, + smp->local_rand, 0, hash); + if (err < 0) + return err; + + memcpy(rand, smp->local_rand, 16); + + smp->local_oob = true; + + return 0; +} + +static void smp_send_cmd(struct l2cap_conn *conn, u8 code, u16 len, void *data) +{ + struct l2cap_chan *chan = conn->smp; + struct smp_chan *smp; + struct kvec iv[2]; + struct msghdr msg; + + if (!chan) + return; + + bt_dev_dbg(conn->hcon->hdev, "code 0x%2.2x", code); + + iv[0].iov_base = &code; + iv[0].iov_len = 1; + + iv[1].iov_base = data; + iv[1].iov_len = len; + + memset(&msg, 0, sizeof(msg)); + + iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, iv, 2, 1 + len); + + l2cap_chan_send(chan, &msg, 1 + len); + + if (!chan->data) + return; + + smp = chan->data; + + cancel_delayed_work_sync(&smp->security_timer); + schedule_delayed_work(&smp->security_timer, SMP_TIMEOUT); +} + +static u8 authreq_to_seclevel(u8 authreq) +{ + if (authreq & SMP_AUTH_MITM) { + if (authreq & SMP_AUTH_SC) + return BT_SECURITY_FIPS; + else + return BT_SECURITY_HIGH; + } else { + return BT_SECURITY_MEDIUM; + } +} + +static __u8 seclevel_to_authreq(__u8 sec_level) +{ + switch (sec_level) { + case BT_SECURITY_FIPS: + case BT_SECURITY_HIGH: + return SMP_AUTH_MITM | SMP_AUTH_BONDING; + case BT_SECURITY_MEDIUM: + return SMP_AUTH_BONDING; + default: + return SMP_AUTH_NONE; + } +} + +static void build_pairing_cmd(struct l2cap_conn *conn, + struct smp_cmd_pairing *req, + struct smp_cmd_pairing *rsp, __u8 authreq) +{ + struct l2cap_chan *chan = conn->smp; + struct smp_chan *smp = chan->data; + struct hci_conn *hcon = conn->hcon; + struct hci_dev *hdev = hcon->hdev; + u8 local_dist = 0, remote_dist = 0, oob_flag = SMP_OOB_NOT_PRESENT; + + if (hci_dev_test_flag(hdev, HCI_BONDABLE)) { + local_dist = SMP_DIST_ENC_KEY | SMP_DIST_SIGN; + remote_dist = SMP_DIST_ENC_KEY | SMP_DIST_SIGN; + authreq |= SMP_AUTH_BONDING; + } else { + authreq &= ~SMP_AUTH_BONDING; + } + + if (hci_dev_test_flag(hdev, HCI_RPA_RESOLVING)) + remote_dist |= SMP_DIST_ID_KEY; + + if (hci_dev_test_flag(hdev, HCI_PRIVACY)) + local_dist |= SMP_DIST_ID_KEY; + + if (hci_dev_test_flag(hdev, HCI_SC_ENABLED) && + (authreq & SMP_AUTH_SC)) { + struct oob_data *oob_data; + u8 bdaddr_type; + + if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) { + local_dist |= SMP_DIST_LINK_KEY; + remote_dist |= SMP_DIST_LINK_KEY; + } + + if (hcon->dst_type == ADDR_LE_DEV_PUBLIC) + bdaddr_type = BDADDR_LE_PUBLIC; + else + bdaddr_type = BDADDR_LE_RANDOM; + + oob_data = hci_find_remote_oob_data(hdev, &hcon->dst, + bdaddr_type); + if (oob_data && oob_data->present) { + set_bit(SMP_FLAG_REMOTE_OOB, &smp->flags); + oob_flag = SMP_OOB_PRESENT; + memcpy(smp->rr, oob_data->rand256, 16); + memcpy(smp->pcnf, oob_data->hash256, 16); + SMP_DBG("OOB Remote Confirmation: %16phN", smp->pcnf); + SMP_DBG("OOB Remote Random: %16phN", smp->rr); + } + + } else { + authreq &= ~SMP_AUTH_SC; + } + + if (rsp == NULL) { + req->io_capability = conn->hcon->io_capability; + req->oob_flag = oob_flag; + req->max_key_size = hdev->le_max_key_size; + req->init_key_dist = local_dist; + req->resp_key_dist = remote_dist; + req->auth_req = (authreq & AUTH_REQ_MASK(hdev)); + + smp->remote_key_dist = remote_dist; + return; + } + + rsp->io_capability = conn->hcon->io_capability; + rsp->oob_flag = oob_flag; + rsp->max_key_size = hdev->le_max_key_size; + rsp->init_key_dist = req->init_key_dist & remote_dist; + rsp->resp_key_dist = req->resp_key_dist & local_dist; + rsp->auth_req = (authreq & AUTH_REQ_MASK(hdev)); + + smp->remote_key_dist = rsp->init_key_dist; +} + +static u8 check_enc_key_size(struct l2cap_conn *conn, __u8 max_key_size) +{ + struct l2cap_chan *chan = conn->smp; + struct hci_dev *hdev = conn->hcon->hdev; + struct smp_chan *smp = chan->data; + + if (conn->hcon->pending_sec_level == BT_SECURITY_FIPS && + max_key_size != SMP_MAX_ENC_KEY_SIZE) + return SMP_ENC_KEY_SIZE; + + if (max_key_size > hdev->le_max_key_size || + max_key_size < SMP_MIN_ENC_KEY_SIZE) + return SMP_ENC_KEY_SIZE; + + smp->enc_key_size = max_key_size; + + return 0; +} + +static void smp_chan_destroy(struct l2cap_conn *conn) +{ + struct l2cap_chan *chan = conn->smp; + struct smp_chan *smp = chan->data; + struct hci_conn *hcon = conn->hcon; + bool complete; + + BUG_ON(!smp); + + cancel_delayed_work_sync(&smp->security_timer); + + complete = test_bit(SMP_FLAG_COMPLETE, &smp->flags); + mgmt_smp_complete(hcon, complete); + + kfree_sensitive(smp->csrk); + kfree_sensitive(smp->responder_csrk); + kfree_sensitive(smp->link_key); + + crypto_free_shash(smp->tfm_cmac); + crypto_free_kpp(smp->tfm_ecdh); + + /* Ensure that we don't leave any debug key around if debug key + * support hasn't been explicitly enabled. + */ + if (smp->ltk && smp->ltk->type == SMP_LTK_P256_DEBUG && + !hci_dev_test_flag(hcon->hdev, HCI_KEEP_DEBUG_KEYS)) { + list_del_rcu(&smp->ltk->list); + kfree_rcu(smp->ltk, rcu); + smp->ltk = NULL; + } + + /* If pairing failed clean up any keys we might have */ + if (!complete) { + if (smp->ltk) { + list_del_rcu(&smp->ltk->list); + kfree_rcu(smp->ltk, rcu); + } + + if (smp->responder_ltk) { + list_del_rcu(&smp->responder_ltk->list); + kfree_rcu(smp->responder_ltk, rcu); + } + + if (smp->remote_irk) { + list_del_rcu(&smp->remote_irk->list); + kfree_rcu(smp->remote_irk, rcu); + } + } + + chan->data = NULL; + kfree_sensitive(smp); + hci_conn_drop(hcon); +} + +static void smp_failure(struct l2cap_conn *conn, u8 reason) +{ + struct hci_conn *hcon = conn->hcon; + struct l2cap_chan *chan = conn->smp; + + if (reason) + smp_send_cmd(conn, SMP_CMD_PAIRING_FAIL, sizeof(reason), + &reason); + + mgmt_auth_failed(hcon, HCI_ERROR_AUTH_FAILURE); + + if (chan->data) + smp_chan_destroy(conn); +} + +#define JUST_WORKS 0x00 +#define JUST_CFM 0x01 +#define REQ_PASSKEY 0x02 +#define CFM_PASSKEY 0x03 +#define REQ_OOB 0x04 +#define DSP_PASSKEY 0x05 +#define OVERLAP 0xFF + +static const u8 gen_method[5][5] = { + { JUST_WORKS, JUST_CFM, REQ_PASSKEY, JUST_WORKS, REQ_PASSKEY }, + { JUST_WORKS, JUST_CFM, REQ_PASSKEY, JUST_WORKS, REQ_PASSKEY }, + { CFM_PASSKEY, CFM_PASSKEY, REQ_PASSKEY, JUST_WORKS, CFM_PASSKEY }, + { JUST_WORKS, JUST_CFM, JUST_WORKS, JUST_WORKS, JUST_CFM }, + { CFM_PASSKEY, CFM_PASSKEY, REQ_PASSKEY, JUST_WORKS, OVERLAP }, +}; + +static const u8 sc_method[5][5] = { + { JUST_WORKS, JUST_CFM, REQ_PASSKEY, JUST_WORKS, REQ_PASSKEY }, + { JUST_WORKS, CFM_PASSKEY, REQ_PASSKEY, JUST_WORKS, CFM_PASSKEY }, + { DSP_PASSKEY, DSP_PASSKEY, REQ_PASSKEY, JUST_WORKS, DSP_PASSKEY }, + { JUST_WORKS, JUST_CFM, JUST_WORKS, JUST_WORKS, JUST_CFM }, + { DSP_PASSKEY, CFM_PASSKEY, REQ_PASSKEY, JUST_WORKS, CFM_PASSKEY }, +}; + +static u8 get_auth_method(struct smp_chan *smp, u8 local_io, u8 remote_io) +{ + /* If either side has unknown io_caps, use JUST_CFM (which gets + * converted later to JUST_WORKS if we're initiators. + */ + if (local_io > SMP_IO_KEYBOARD_DISPLAY || + remote_io > SMP_IO_KEYBOARD_DISPLAY) + return JUST_CFM; + + if (test_bit(SMP_FLAG_SC, &smp->flags)) + return sc_method[remote_io][local_io]; + + return gen_method[remote_io][local_io]; +} + +static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth, + u8 local_io, u8 remote_io) +{ + struct hci_conn *hcon = conn->hcon; + struct l2cap_chan *chan = conn->smp; + struct smp_chan *smp = chan->data; + u32 passkey = 0; + int ret; + + /* Initialize key for JUST WORKS */ + memset(smp->tk, 0, sizeof(smp->tk)); + clear_bit(SMP_FLAG_TK_VALID, &smp->flags); + + bt_dev_dbg(hcon->hdev, "auth:%u lcl:%u rem:%u", auth, local_io, + remote_io); + + /* If neither side wants MITM, either "just" confirm an incoming + * request or use just-works for outgoing ones. The JUST_CFM + * will be converted to JUST_WORKS if necessary later in this + * function. If either side has MITM look up the method from the + * table. + */ + if (!(auth & SMP_AUTH_MITM)) + smp->method = JUST_CFM; + else + smp->method = get_auth_method(smp, local_io, remote_io); + + /* Don't confirm locally initiated pairing attempts */ + if (smp->method == JUST_CFM && test_bit(SMP_FLAG_INITIATOR, + &smp->flags)) + smp->method = JUST_WORKS; + + /* Don't bother user space with no IO capabilities */ + if (smp->method == JUST_CFM && + hcon->io_capability == HCI_IO_NO_INPUT_OUTPUT) + smp->method = JUST_WORKS; + + /* If Just Works, Continue with Zero TK and ask user-space for + * confirmation */ + if (smp->method == JUST_WORKS) { + ret = mgmt_user_confirm_request(hcon->hdev, &hcon->dst, + hcon->type, + hcon->dst_type, + passkey, 1); + if (ret) + return ret; + set_bit(SMP_FLAG_WAIT_USER, &smp->flags); + return 0; + } + + /* If this function is used for SC -> legacy fallback we + * can only recover the just-works case. + */ + if (test_bit(SMP_FLAG_SC, &smp->flags)) + return -EINVAL; + + /* Not Just Works/Confirm results in MITM Authentication */ + if (smp->method != JUST_CFM) { + set_bit(SMP_FLAG_MITM_AUTH, &smp->flags); + if (hcon->pending_sec_level < BT_SECURITY_HIGH) + hcon->pending_sec_level = BT_SECURITY_HIGH; + } + + /* If both devices have Keyboard-Display I/O, the initiator + * Confirms and the responder Enters the passkey. + */ + if (smp->method == OVERLAP) { + if (hcon->role == HCI_ROLE_MASTER) + smp->method = CFM_PASSKEY; + else + smp->method = REQ_PASSKEY; + } + + /* Generate random passkey. */ + if (smp->method == CFM_PASSKEY) { + memset(smp->tk, 0, sizeof(smp->tk)); + get_random_bytes(&passkey, sizeof(passkey)); + passkey %= 1000000; + put_unaligned_le32(passkey, smp->tk); + bt_dev_dbg(hcon->hdev, "PassKey: %u", passkey); + set_bit(SMP_FLAG_TK_VALID, &smp->flags); + } + + if (smp->method == REQ_PASSKEY) + ret = mgmt_user_passkey_request(hcon->hdev, &hcon->dst, + hcon->type, hcon->dst_type); + else if (smp->method == JUST_CFM) + ret = mgmt_user_confirm_request(hcon->hdev, &hcon->dst, + hcon->type, hcon->dst_type, + passkey, 1); + else + ret = mgmt_user_passkey_notify(hcon->hdev, &hcon->dst, + hcon->type, hcon->dst_type, + passkey, 0); + + return ret; +} + +static u8 smp_confirm(struct smp_chan *smp) +{ + struct l2cap_conn *conn = smp->conn; + struct smp_cmd_pairing_confirm cp; + int ret; + + bt_dev_dbg(conn->hcon->hdev, "conn %p", conn); + + ret = smp_c1(smp->tk, smp->prnd, smp->preq, smp->prsp, + conn->hcon->init_addr_type, &conn->hcon->init_addr, + conn->hcon->resp_addr_type, &conn->hcon->resp_addr, + cp.confirm_val); + if (ret) + return SMP_UNSPECIFIED; + + clear_bit(SMP_FLAG_CFM_PENDING, &smp->flags); + + smp_send_cmd(smp->conn, SMP_CMD_PAIRING_CONFIRM, sizeof(cp), &cp); + + if (conn->hcon->out) + SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_CONFIRM); + else + SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RANDOM); + + return 0; +} + +static u8 smp_random(struct smp_chan *smp) +{ + struct l2cap_conn *conn = smp->conn; + struct hci_conn *hcon = conn->hcon; + u8 confirm[16]; + int ret; + + bt_dev_dbg(conn->hcon->hdev, "conn %p %s", conn, + conn->hcon->out ? "initiator" : "responder"); + + ret = smp_c1(smp->tk, smp->rrnd, smp->preq, smp->prsp, + hcon->init_addr_type, &hcon->init_addr, + hcon->resp_addr_type, &hcon->resp_addr, confirm); + if (ret) + return SMP_UNSPECIFIED; + + if (crypto_memneq(smp->pcnf, confirm, sizeof(smp->pcnf))) { + bt_dev_err(hcon->hdev, "pairing failed " + "(confirmation values mismatch)"); + return SMP_CONFIRM_FAILED; + } + + if (hcon->out) { + u8 stk[16]; + __le64 rand = 0; + __le16 ediv = 0; + + smp_s1(smp->tk, smp->rrnd, smp->prnd, stk); + + if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->flags)) + return SMP_UNSPECIFIED; + + hci_le_start_enc(hcon, ediv, rand, stk, smp->enc_key_size); + hcon->enc_key_size = smp->enc_key_size; + set_bit(HCI_CONN_STK_ENCRYPT, &hcon->flags); + } else { + u8 stk[16], auth; + __le64 rand = 0; + __le16 ediv = 0; + + smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd), + smp->prnd); + + smp_s1(smp->tk, smp->prnd, smp->rrnd, stk); + + if (hcon->pending_sec_level == BT_SECURITY_HIGH) + auth = 1; + else + auth = 0; + + /* Even though there's no _RESPONDER suffix this is the + * responder STK we're adding for later lookup (the initiator + * STK never needs to be stored). + */ + hci_add_ltk(hcon->hdev, &hcon->dst, hcon->dst_type, + SMP_STK, auth, stk, smp->enc_key_size, ediv, rand); + } + + return 0; +} + +static void smp_notify_keys(struct l2cap_conn *conn) +{ + struct l2cap_chan *chan = conn->smp; + struct smp_chan *smp = chan->data; + struct hci_conn *hcon = conn->hcon; + struct hci_dev *hdev = hcon->hdev; + struct smp_cmd_pairing *req = (void *) &smp->preq[1]; + struct smp_cmd_pairing *rsp = (void *) &smp->prsp[1]; + bool persistent; + + if (hcon->type == ACL_LINK) { + if (hcon->key_type == HCI_LK_DEBUG_COMBINATION) + persistent = false; + else + persistent = !test_bit(HCI_CONN_FLUSH_KEY, + &hcon->flags); + } else { + /* The LTKs, IRKs and CSRKs should be persistent only if + * both sides had the bonding bit set in their + * authentication requests. + */ + persistent = !!((req->auth_req & rsp->auth_req) & + SMP_AUTH_BONDING); + } + + if (smp->remote_irk) { + smp->remote_irk->link_type = hcon->type; + mgmt_new_irk(hdev, smp->remote_irk, persistent); + + /* Now that user space can be considered to know the + * identity address track the connection based on it + * from now on (assuming this is an LE link). + */ + if (hcon->type == LE_LINK) { + bacpy(&hcon->dst, &smp->remote_irk->bdaddr); + hcon->dst_type = smp->remote_irk->addr_type; + queue_work(hdev->workqueue, &conn->id_addr_update_work); + } + } + + if (smp->csrk) { + smp->csrk->link_type = hcon->type; + smp->csrk->bdaddr_type = hcon->dst_type; + bacpy(&smp->csrk->bdaddr, &hcon->dst); + mgmt_new_csrk(hdev, smp->csrk, persistent); + } + + if (smp->responder_csrk) { + smp->responder_csrk->link_type = hcon->type; + smp->responder_csrk->bdaddr_type = hcon->dst_type; + bacpy(&smp->responder_csrk->bdaddr, &hcon->dst); + mgmt_new_csrk(hdev, smp->responder_csrk, persistent); + } + + if (smp->ltk) { + smp->ltk->link_type = hcon->type; + smp->ltk->bdaddr_type = hcon->dst_type; + bacpy(&smp->ltk->bdaddr, &hcon->dst); + mgmt_new_ltk(hdev, smp->ltk, persistent); + } + + if (smp->responder_ltk) { + smp->responder_ltk->link_type = hcon->type; + smp->responder_ltk->bdaddr_type = hcon->dst_type; + bacpy(&smp->responder_ltk->bdaddr, &hcon->dst); + mgmt_new_ltk(hdev, smp->responder_ltk, persistent); + } + + if (smp->link_key) { + struct link_key *key; + u8 type; + + if (test_bit(SMP_FLAG_DEBUG_KEY, &smp->flags)) + type = HCI_LK_DEBUG_COMBINATION; + else if (hcon->sec_level == BT_SECURITY_FIPS) + type = HCI_LK_AUTH_COMBINATION_P256; + else + type = HCI_LK_UNAUTH_COMBINATION_P256; + + key = hci_add_link_key(hdev, smp->conn->hcon, &hcon->dst, + smp->link_key, type, 0, &persistent); + if (key) { + key->link_type = hcon->type; + key->bdaddr_type = hcon->dst_type; + mgmt_new_link_key(hdev, key, persistent); + + /* Don't keep debug keys around if the relevant + * flag is not set. + */ + if (!hci_dev_test_flag(hdev, HCI_KEEP_DEBUG_KEYS) && + key->type == HCI_LK_DEBUG_COMBINATION) { + list_del_rcu(&key->list); + kfree_rcu(key, rcu); + } + } + } +} + +static void sc_add_ltk(struct smp_chan *smp) +{ + struct hci_conn *hcon = smp->conn->hcon; + u8 key_type, auth; + + if (test_bit(SMP_FLAG_DEBUG_KEY, &smp->flags)) + key_type = SMP_LTK_P256_DEBUG; + else + key_type = SMP_LTK_P256; + + if (hcon->pending_sec_level == BT_SECURITY_FIPS) + auth = 1; + else + auth = 0; + + smp->ltk = hci_add_ltk(hcon->hdev, &hcon->dst, hcon->dst_type, + key_type, auth, smp->tk, smp->enc_key_size, + 0, 0); +} + +static void sc_generate_link_key(struct smp_chan *smp) +{ + /* From core spec. Spells out in ASCII as 'lebr'. */ + const u8 lebr[4] = { 0x72, 0x62, 0x65, 0x6c }; + + smp->link_key = kzalloc(16, GFP_KERNEL); + if (!smp->link_key) + return; + + if (test_bit(SMP_FLAG_CT2, &smp->flags)) { + /* SALT = 0x000000000000000000000000746D7031 */ + const u8 salt[16] = { 0x31, 0x70, 0x6d, 0x74 }; + + if (smp_h7(smp->tfm_cmac, smp->tk, salt, smp->link_key)) { + kfree_sensitive(smp->link_key); + smp->link_key = NULL; + return; + } + } else { + /* From core spec. Spells out in ASCII as 'tmp1'. */ + const u8 tmp1[4] = { 0x31, 0x70, 0x6d, 0x74 }; + + if (smp_h6(smp->tfm_cmac, smp->tk, tmp1, smp->link_key)) { + kfree_sensitive(smp->link_key); + smp->link_key = NULL; + return; + } + } + + if (smp_h6(smp->tfm_cmac, smp->link_key, lebr, smp->link_key)) { + kfree_sensitive(smp->link_key); + smp->link_key = NULL; + return; + } +} + +static void smp_allow_key_dist(struct smp_chan *smp) +{ + /* Allow the first expected phase 3 PDU. The rest of the PDUs + * will be allowed in each PDU handler to ensure we receive + * them in the correct order. + */ + if (smp->remote_key_dist & SMP_DIST_ENC_KEY) + SMP_ALLOW_CMD(smp, SMP_CMD_ENCRYPT_INFO); + else if (smp->remote_key_dist & SMP_DIST_ID_KEY) + SMP_ALLOW_CMD(smp, SMP_CMD_IDENT_INFO); + else if (smp->remote_key_dist & SMP_DIST_SIGN) + SMP_ALLOW_CMD(smp, SMP_CMD_SIGN_INFO); +} + +static void sc_generate_ltk(struct smp_chan *smp) +{ + /* From core spec. Spells out in ASCII as 'brle'. */ + const u8 brle[4] = { 0x65, 0x6c, 0x72, 0x62 }; + struct hci_conn *hcon = smp->conn->hcon; + struct hci_dev *hdev = hcon->hdev; + struct link_key *key; + + key = hci_find_link_key(hdev, &hcon->dst); + if (!key) { + bt_dev_err(hdev, "no Link Key found to generate LTK"); + return; + } + + if (key->type == HCI_LK_DEBUG_COMBINATION) + set_bit(SMP_FLAG_DEBUG_KEY, &smp->flags); + + if (test_bit(SMP_FLAG_CT2, &smp->flags)) { + /* SALT = 0x000000000000000000000000746D7032 */ + const u8 salt[16] = { 0x32, 0x70, 0x6d, 0x74 }; + + if (smp_h7(smp->tfm_cmac, key->val, salt, smp->tk)) + return; + } else { + /* From core spec. Spells out in ASCII as 'tmp2'. */ + const u8 tmp2[4] = { 0x32, 0x70, 0x6d, 0x74 }; + + if (smp_h6(smp->tfm_cmac, key->val, tmp2, smp->tk)) + return; + } + + if (smp_h6(smp->tfm_cmac, smp->tk, brle, smp->tk)) + return; + + sc_add_ltk(smp); +} + +static void smp_distribute_keys(struct smp_chan *smp) +{ + struct smp_cmd_pairing *req, *rsp; + struct l2cap_conn *conn = smp->conn; + struct hci_conn *hcon = conn->hcon; + struct hci_dev *hdev = hcon->hdev; + __u8 *keydist; + + bt_dev_dbg(hdev, "conn %p", conn); + + rsp = (void *) &smp->prsp[1]; + + /* The responder sends its keys first */ + if (hcon->out && (smp->remote_key_dist & KEY_DIST_MASK)) { + smp_allow_key_dist(smp); + return; + } + + req = (void *) &smp->preq[1]; + + if (hcon->out) { + keydist = &rsp->init_key_dist; + *keydist &= req->init_key_dist; + } else { + keydist = &rsp->resp_key_dist; + *keydist &= req->resp_key_dist; + } + + if (test_bit(SMP_FLAG_SC, &smp->flags)) { + if (hcon->type == LE_LINK && (*keydist & SMP_DIST_LINK_KEY)) + sc_generate_link_key(smp); + if (hcon->type == ACL_LINK && (*keydist & SMP_DIST_ENC_KEY)) + sc_generate_ltk(smp); + + /* Clear the keys which are generated but not distributed */ + *keydist &= ~SMP_SC_NO_DIST; + } + + bt_dev_dbg(hdev, "keydist 0x%x", *keydist); + + if (*keydist & SMP_DIST_ENC_KEY) { + struct smp_cmd_encrypt_info enc; + struct smp_cmd_initiator_ident ident; + struct smp_ltk *ltk; + u8 authenticated; + __le16 ediv; + __le64 rand; + + /* Make sure we generate only the significant amount of + * bytes based on the encryption key size, and set the rest + * of the value to zeroes. + */ + get_random_bytes(enc.ltk, smp->enc_key_size); + memset(enc.ltk + smp->enc_key_size, 0, + sizeof(enc.ltk) - smp->enc_key_size); + + get_random_bytes(&ediv, sizeof(ediv)); + get_random_bytes(&rand, sizeof(rand)); + + smp_send_cmd(conn, SMP_CMD_ENCRYPT_INFO, sizeof(enc), &enc); + + authenticated = hcon->sec_level == BT_SECURITY_HIGH; + ltk = hci_add_ltk(hdev, &hcon->dst, hcon->dst_type, + SMP_LTK_RESPONDER, authenticated, enc.ltk, + smp->enc_key_size, ediv, rand); + smp->responder_ltk = ltk; + + ident.ediv = ediv; + ident.rand = rand; + + smp_send_cmd(conn, SMP_CMD_INITIATOR_IDENT, sizeof(ident), + &ident); + + *keydist &= ~SMP_DIST_ENC_KEY; + } + + if (*keydist & SMP_DIST_ID_KEY) { + struct smp_cmd_ident_addr_info addrinfo; + struct smp_cmd_ident_info idinfo; + + memcpy(idinfo.irk, hdev->irk, sizeof(idinfo.irk)); + + smp_send_cmd(conn, SMP_CMD_IDENT_INFO, sizeof(idinfo), &idinfo); + + /* The hci_conn contains the local identity address + * after the connection has been established. + * + * This is true even when the connection has been + * established using a resolvable random address. + */ + bacpy(&addrinfo.bdaddr, &hcon->src); + addrinfo.addr_type = hcon->src_type; + + smp_send_cmd(conn, SMP_CMD_IDENT_ADDR_INFO, sizeof(addrinfo), + &addrinfo); + + *keydist &= ~SMP_DIST_ID_KEY; + } + + if (*keydist & SMP_DIST_SIGN) { + struct smp_cmd_sign_info sign; + struct smp_csrk *csrk; + + /* Generate a new random key */ + get_random_bytes(sign.csrk, sizeof(sign.csrk)); + + csrk = kzalloc(sizeof(*csrk), GFP_KERNEL); + if (csrk) { + if (hcon->sec_level > BT_SECURITY_MEDIUM) + csrk->type = MGMT_CSRK_LOCAL_AUTHENTICATED; + else + csrk->type = MGMT_CSRK_LOCAL_UNAUTHENTICATED; + memcpy(csrk->val, sign.csrk, sizeof(csrk->val)); + } + smp->responder_csrk = csrk; + + smp_send_cmd(conn, SMP_CMD_SIGN_INFO, sizeof(sign), &sign); + + *keydist &= ~SMP_DIST_SIGN; + } + + /* If there are still keys to be received wait for them */ + if (smp->remote_key_dist & KEY_DIST_MASK) { + smp_allow_key_dist(smp); + return; + } + + set_bit(SMP_FLAG_COMPLETE, &smp->flags); + smp_notify_keys(conn); + + smp_chan_destroy(conn); +} + +static void smp_timeout(struct work_struct *work) +{ + struct smp_chan *smp = container_of(work, struct smp_chan, + security_timer.work); + struct l2cap_conn *conn = smp->conn; + + bt_dev_dbg(conn->hcon->hdev, "conn %p", conn); + + hci_disconnect(conn->hcon, HCI_ERROR_REMOTE_USER_TERM); +} + +static struct smp_chan *smp_chan_create(struct l2cap_conn *conn) +{ + struct hci_conn *hcon = conn->hcon; + struct l2cap_chan *chan = conn->smp; + struct smp_chan *smp; + + smp = kzalloc(sizeof(*smp), GFP_ATOMIC); + if (!smp) + return NULL; + + smp->tfm_cmac = crypto_alloc_shash("cmac(aes)", 0, 0); + if (IS_ERR(smp->tfm_cmac)) { + bt_dev_err(hcon->hdev, "Unable to create CMAC crypto context"); + goto zfree_smp; + } + + smp->tfm_ecdh = crypto_alloc_kpp("ecdh-nist-p256", 0, 0); + if (IS_ERR(smp->tfm_ecdh)) { + bt_dev_err(hcon->hdev, "Unable to create ECDH crypto context"); + goto free_shash; + } + + smp->conn = conn; + chan->data = smp; + + SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_FAIL); + + INIT_DELAYED_WORK(&smp->security_timer, smp_timeout); + + hci_conn_hold(hcon); + + return smp; + +free_shash: + crypto_free_shash(smp->tfm_cmac); +zfree_smp: + kfree_sensitive(smp); + return NULL; +} + +static int sc_mackey_and_ltk(struct smp_chan *smp, u8 mackey[16], u8 ltk[16]) +{ + struct hci_conn *hcon = smp->conn->hcon; + u8 *na, *nb, a[7], b[7]; + + if (hcon->out) { + na = smp->prnd; + nb = smp->rrnd; + } else { + na = smp->rrnd; + nb = smp->prnd; + } + + memcpy(a, &hcon->init_addr, 6); + memcpy(b, &hcon->resp_addr, 6); + a[6] = hcon->init_addr_type; + b[6] = hcon->resp_addr_type; + + return smp_f5(smp->tfm_cmac, smp->dhkey, na, nb, a, b, mackey, ltk); +} + +static void sc_dhkey_check(struct smp_chan *smp) +{ + struct hci_conn *hcon = smp->conn->hcon; + struct smp_cmd_dhkey_check check; + u8 a[7], b[7], *local_addr, *remote_addr; + u8 io_cap[3], r[16]; + + memcpy(a, &hcon->init_addr, 6); + memcpy(b, &hcon->resp_addr, 6); + a[6] = hcon->init_addr_type; + b[6] = hcon->resp_addr_type; + + if (hcon->out) { + local_addr = a; + remote_addr = b; + memcpy(io_cap, &smp->preq[1], 3); + } else { + local_addr = b; + remote_addr = a; + memcpy(io_cap, &smp->prsp[1], 3); + } + + memset(r, 0, sizeof(r)); + + if (smp->method == REQ_PASSKEY || smp->method == DSP_PASSKEY) + put_unaligned_le32(hcon->passkey_notify, r); + + if (smp->method == REQ_OOB) + memcpy(r, smp->rr, 16); + + smp_f6(smp->tfm_cmac, smp->mackey, smp->prnd, smp->rrnd, r, io_cap, + local_addr, remote_addr, check.e); + + smp_send_cmd(smp->conn, SMP_CMD_DHKEY_CHECK, sizeof(check), &check); +} + +static u8 sc_passkey_send_confirm(struct smp_chan *smp) +{ + struct l2cap_conn *conn = smp->conn; + struct hci_conn *hcon = conn->hcon; + struct smp_cmd_pairing_confirm cfm; + u8 r; + + r = ((hcon->passkey_notify >> smp->passkey_round) & 0x01); + r |= 0x80; + + get_random_bytes(smp->prnd, sizeof(smp->prnd)); + + if (smp_f4(smp->tfm_cmac, smp->local_pk, smp->remote_pk, smp->prnd, r, + cfm.confirm_val)) + return SMP_UNSPECIFIED; + + smp_send_cmd(conn, SMP_CMD_PAIRING_CONFIRM, sizeof(cfm), &cfm); + + return 0; +} + +static u8 sc_passkey_round(struct smp_chan *smp, u8 smp_op) +{ + struct l2cap_conn *conn = smp->conn; + struct hci_conn *hcon = conn->hcon; + struct hci_dev *hdev = hcon->hdev; + u8 cfm[16], r; + + /* Ignore the PDU if we've already done 20 rounds (0 - 19) */ + if (smp->passkey_round >= 20) + return 0; + + switch (smp_op) { + case SMP_CMD_PAIRING_RANDOM: + r = ((hcon->passkey_notify >> smp->passkey_round) & 0x01); + r |= 0x80; + + if (smp_f4(smp->tfm_cmac, smp->remote_pk, smp->local_pk, + smp->rrnd, r, cfm)) + return SMP_UNSPECIFIED; + + if (crypto_memneq(smp->pcnf, cfm, 16)) + return SMP_CONFIRM_FAILED; + + smp->passkey_round++; + + if (smp->passkey_round == 20) { + /* Generate MacKey and LTK */ + if (sc_mackey_and_ltk(smp, smp->mackey, smp->tk)) + return SMP_UNSPECIFIED; + } + + /* The round is only complete when the initiator + * receives pairing random. + */ + if (!hcon->out) { + smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, + sizeof(smp->prnd), smp->prnd); + if (smp->passkey_round == 20) + SMP_ALLOW_CMD(smp, SMP_CMD_DHKEY_CHECK); + else + SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_CONFIRM); + return 0; + } + + /* Start the next round */ + if (smp->passkey_round != 20) + return sc_passkey_round(smp, 0); + + /* Passkey rounds are complete - start DHKey Check */ + sc_dhkey_check(smp); + SMP_ALLOW_CMD(smp, SMP_CMD_DHKEY_CHECK); + + break; + + case SMP_CMD_PAIRING_CONFIRM: + if (test_bit(SMP_FLAG_WAIT_USER, &smp->flags)) { + set_bit(SMP_FLAG_CFM_PENDING, &smp->flags); + return 0; + } + + SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RANDOM); + + if (hcon->out) { + smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, + sizeof(smp->prnd), smp->prnd); + return 0; + } + + return sc_passkey_send_confirm(smp); + + case SMP_CMD_PUBLIC_KEY: + default: + /* Initiating device starts the round */ + if (!hcon->out) + return 0; + + bt_dev_dbg(hdev, "Starting passkey round %u", + smp->passkey_round + 1); + + SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_CONFIRM); + + return sc_passkey_send_confirm(smp); + } + + return 0; +} + +static int sc_user_reply(struct smp_chan *smp, u16 mgmt_op, __le32 passkey) +{ + struct l2cap_conn *conn = smp->conn; + struct hci_conn *hcon = conn->hcon; + u8 smp_op; + + clear_bit(SMP_FLAG_WAIT_USER, &smp->flags); + + switch (mgmt_op) { + case MGMT_OP_USER_PASSKEY_NEG_REPLY: + smp_failure(smp->conn, SMP_PASSKEY_ENTRY_FAILED); + return 0; + case MGMT_OP_USER_CONFIRM_NEG_REPLY: + smp_failure(smp->conn, SMP_NUMERIC_COMP_FAILED); + return 0; + case MGMT_OP_USER_PASSKEY_REPLY: + hcon->passkey_notify = le32_to_cpu(passkey); + smp->passkey_round = 0; + + if (test_and_clear_bit(SMP_FLAG_CFM_PENDING, &smp->flags)) + smp_op = SMP_CMD_PAIRING_CONFIRM; + else + smp_op = 0; + + if (sc_passkey_round(smp, smp_op)) + return -EIO; + + return 0; + } + + /* Initiator sends DHKey check first */ + if (hcon->out) { + sc_dhkey_check(smp); + SMP_ALLOW_CMD(smp, SMP_CMD_DHKEY_CHECK); + } else if (test_and_clear_bit(SMP_FLAG_DHKEY_PENDING, &smp->flags)) { + sc_dhkey_check(smp); + sc_add_ltk(smp); + } + + return 0; +} + +int smp_user_confirm_reply(struct hci_conn *hcon, u16 mgmt_op, __le32 passkey) +{ + struct l2cap_conn *conn = hcon->l2cap_data; + struct l2cap_chan *chan; + struct smp_chan *smp; + u32 value; + int err; + + if (!conn) + return -ENOTCONN; + + bt_dev_dbg(conn->hcon->hdev, ""); + + chan = conn->smp; + if (!chan) + return -ENOTCONN; + + l2cap_chan_lock(chan); + if (!chan->data) { + err = -ENOTCONN; + goto unlock; + } + + smp = chan->data; + + if (test_bit(SMP_FLAG_SC, &smp->flags)) { + err = sc_user_reply(smp, mgmt_op, passkey); + goto unlock; + } + + switch (mgmt_op) { + case MGMT_OP_USER_PASSKEY_REPLY: + value = le32_to_cpu(passkey); + memset(smp->tk, 0, sizeof(smp->tk)); + bt_dev_dbg(conn->hcon->hdev, "PassKey: %u", value); + put_unaligned_le32(value, smp->tk); + fallthrough; + case MGMT_OP_USER_CONFIRM_REPLY: + set_bit(SMP_FLAG_TK_VALID, &smp->flags); + break; + case MGMT_OP_USER_PASSKEY_NEG_REPLY: + case MGMT_OP_USER_CONFIRM_NEG_REPLY: + smp_failure(conn, SMP_PASSKEY_ENTRY_FAILED); + err = 0; + goto unlock; + default: + smp_failure(conn, SMP_PASSKEY_ENTRY_FAILED); + err = -EOPNOTSUPP; + goto unlock; + } + + err = 0; + + /* If it is our turn to send Pairing Confirm, do so now */ + if (test_bit(SMP_FLAG_CFM_PENDING, &smp->flags)) { + u8 rsp = smp_confirm(smp); + if (rsp) + smp_failure(conn, rsp); + } + +unlock: + l2cap_chan_unlock(chan); + return err; +} + +static void build_bredr_pairing_cmd(struct smp_chan *smp, + struct smp_cmd_pairing *req, + struct smp_cmd_pairing *rsp) +{ + struct l2cap_conn *conn = smp->conn; + struct hci_dev *hdev = conn->hcon->hdev; + u8 local_dist = 0, remote_dist = 0; + + if (hci_dev_test_flag(hdev, HCI_BONDABLE)) { + local_dist = SMP_DIST_ENC_KEY | SMP_DIST_SIGN; + remote_dist = SMP_DIST_ENC_KEY | SMP_DIST_SIGN; + } + + if (hci_dev_test_flag(hdev, HCI_RPA_RESOLVING)) + remote_dist |= SMP_DIST_ID_KEY; + + if (hci_dev_test_flag(hdev, HCI_PRIVACY)) + local_dist |= SMP_DIST_ID_KEY; + + if (!rsp) { + memset(req, 0, sizeof(*req)); + + req->auth_req = SMP_AUTH_CT2; + req->init_key_dist = local_dist; + req->resp_key_dist = remote_dist; + req->max_key_size = conn->hcon->enc_key_size; + + smp->remote_key_dist = remote_dist; + + return; + } + + memset(rsp, 0, sizeof(*rsp)); + + rsp->auth_req = SMP_AUTH_CT2; + rsp->max_key_size = conn->hcon->enc_key_size; + rsp->init_key_dist = req->init_key_dist & remote_dist; + rsp->resp_key_dist = req->resp_key_dist & local_dist; + + smp->remote_key_dist = rsp->init_key_dist; +} + +static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb) +{ + struct smp_cmd_pairing rsp, *req = (void *) skb->data; + struct l2cap_chan *chan = conn->smp; + struct hci_dev *hdev = conn->hcon->hdev; + struct smp_chan *smp; + u8 key_size, auth, sec_level; + int ret; + + bt_dev_dbg(hdev, "conn %p", conn); + + if (skb->len < sizeof(*req)) + return SMP_INVALID_PARAMS; + + if (conn->hcon->role != HCI_ROLE_SLAVE) + return SMP_CMD_NOTSUPP; + + if (!chan->data) + smp = smp_chan_create(conn); + else + smp = chan->data; + + if (!smp) + return SMP_UNSPECIFIED; + + /* We didn't start the pairing, so match remote */ + auth = req->auth_req & AUTH_REQ_MASK(hdev); + + if (!hci_dev_test_flag(hdev, HCI_BONDABLE) && + (auth & SMP_AUTH_BONDING)) + return SMP_PAIRING_NOTSUPP; + + if (hci_dev_test_flag(hdev, HCI_SC_ONLY) && !(auth & SMP_AUTH_SC)) + return SMP_AUTH_REQUIREMENTS; + + smp->preq[0] = SMP_CMD_PAIRING_REQ; + memcpy(&smp->preq[1], req, sizeof(*req)); + skb_pull(skb, sizeof(*req)); + + /* If the remote side's OOB flag is set it means it has + * successfully received our local OOB data - therefore set the + * flag to indicate that local OOB is in use. + */ + if (req->oob_flag == SMP_OOB_PRESENT && SMP_DEV(hdev)->local_oob) + set_bit(SMP_FLAG_LOCAL_OOB, &smp->flags); + + /* SMP over BR/EDR requires special treatment */ + if (conn->hcon->type == ACL_LINK) { + /* We must have a BR/EDR SC link */ + if (!test_bit(HCI_CONN_AES_CCM, &conn->hcon->flags) && + !hci_dev_test_flag(hdev, HCI_FORCE_BREDR_SMP)) + return SMP_CROSS_TRANSP_NOT_ALLOWED; + + set_bit(SMP_FLAG_SC, &smp->flags); + + build_bredr_pairing_cmd(smp, req, &rsp); + + if (req->auth_req & SMP_AUTH_CT2) + set_bit(SMP_FLAG_CT2, &smp->flags); + + key_size = min(req->max_key_size, rsp.max_key_size); + if (check_enc_key_size(conn, key_size)) + return SMP_ENC_KEY_SIZE; + + /* Clear bits which are generated but not distributed */ + smp->remote_key_dist &= ~SMP_SC_NO_DIST; + + smp->prsp[0] = SMP_CMD_PAIRING_RSP; + memcpy(&smp->prsp[1], &rsp, sizeof(rsp)); + smp_send_cmd(conn, SMP_CMD_PAIRING_RSP, sizeof(rsp), &rsp); + + smp_distribute_keys(smp); + return 0; + } + + build_pairing_cmd(conn, req, &rsp, auth); + + if (rsp.auth_req & SMP_AUTH_SC) { + set_bit(SMP_FLAG_SC, &smp->flags); + + if (rsp.auth_req & SMP_AUTH_CT2) + set_bit(SMP_FLAG_CT2, &smp->flags); + } + + if (conn->hcon->io_capability == HCI_IO_NO_INPUT_OUTPUT) + sec_level = BT_SECURITY_MEDIUM; + else + sec_level = authreq_to_seclevel(auth); + + if (sec_level > conn->hcon->pending_sec_level) + conn->hcon->pending_sec_level = sec_level; + + /* If we need MITM check that it can be achieved */ + if (conn->hcon->pending_sec_level >= BT_SECURITY_HIGH) { + u8 method; + + method = get_auth_method(smp, conn->hcon->io_capability, + req->io_capability); + if (method == JUST_WORKS || method == JUST_CFM) + return SMP_AUTH_REQUIREMENTS; + } + + key_size = min(req->max_key_size, rsp.max_key_size); + if (check_enc_key_size(conn, key_size)) + return SMP_ENC_KEY_SIZE; + + get_random_bytes(smp->prnd, sizeof(smp->prnd)); + + smp->prsp[0] = SMP_CMD_PAIRING_RSP; + memcpy(&smp->prsp[1], &rsp, sizeof(rsp)); + + smp_send_cmd(conn, SMP_CMD_PAIRING_RSP, sizeof(rsp), &rsp); + + clear_bit(SMP_FLAG_INITIATOR, &smp->flags); + + /* Strictly speaking we shouldn't allow Pairing Confirm for the + * SC case, however some implementations incorrectly copy RFU auth + * req bits from our security request, which may create a false + * positive SC enablement. + */ + SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_CONFIRM); + + if (test_bit(SMP_FLAG_SC, &smp->flags)) { + SMP_ALLOW_CMD(smp, SMP_CMD_PUBLIC_KEY); + /* Clear bits which are generated but not distributed */ + smp->remote_key_dist &= ~SMP_SC_NO_DIST; + /* Wait for Public Key from Initiating Device */ + return 0; + } + + /* Request setup of TK */ + ret = tk_request(conn, 0, auth, rsp.io_capability, req->io_capability); + if (ret) + return SMP_UNSPECIFIED; + + return 0; +} + +static u8 sc_send_public_key(struct smp_chan *smp) +{ + struct hci_dev *hdev = smp->conn->hcon->hdev; + + bt_dev_dbg(hdev, ""); + + if (test_bit(SMP_FLAG_LOCAL_OOB, &smp->flags)) { + struct l2cap_chan *chan = hdev->smp_data; + struct smp_dev *smp_dev; + + if (!chan || !chan->data) + return SMP_UNSPECIFIED; + + smp_dev = chan->data; + + memcpy(smp->local_pk, smp_dev->local_pk, 64); + memcpy(smp->lr, smp_dev->local_rand, 16); + + if (smp_dev->debug_key) + set_bit(SMP_FLAG_DEBUG_KEY, &smp->flags); + + goto done; + } + + if (hci_dev_test_flag(hdev, HCI_USE_DEBUG_KEYS)) { + bt_dev_dbg(hdev, "Using debug keys"); + if (set_ecdh_privkey(smp->tfm_ecdh, debug_sk)) + return SMP_UNSPECIFIED; + memcpy(smp->local_pk, debug_pk, 64); + set_bit(SMP_FLAG_DEBUG_KEY, &smp->flags); + } else { + while (true) { + /* Generate key pair for Secure Connections */ + if (generate_ecdh_keys(smp->tfm_ecdh, smp->local_pk)) + return SMP_UNSPECIFIED; + + /* This is unlikely, but we need to check that + * we didn't accidentally generate a debug key. + */ + if (crypto_memneq(smp->local_pk, debug_pk, 64)) + break; + } + } + +done: + SMP_DBG("Local Public Key X: %32phN", smp->local_pk); + SMP_DBG("Local Public Key Y: %32phN", smp->local_pk + 32); + + smp_send_cmd(smp->conn, SMP_CMD_PUBLIC_KEY, 64, smp->local_pk); + + return 0; +} + +static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb) +{ + struct smp_cmd_pairing *req, *rsp = (void *) skb->data; + struct l2cap_chan *chan = conn->smp; + struct smp_chan *smp = chan->data; + struct hci_dev *hdev = conn->hcon->hdev; + u8 key_size, auth; + int ret; + + bt_dev_dbg(hdev, "conn %p", conn); + + if (skb->len < sizeof(*rsp)) + return SMP_INVALID_PARAMS; + + if (conn->hcon->role != HCI_ROLE_MASTER) + return SMP_CMD_NOTSUPP; + + skb_pull(skb, sizeof(*rsp)); + + req = (void *) &smp->preq[1]; + + key_size = min(req->max_key_size, rsp->max_key_size); + if (check_enc_key_size(conn, key_size)) + return SMP_ENC_KEY_SIZE; + + auth = rsp->auth_req & AUTH_REQ_MASK(hdev); + + if (hci_dev_test_flag(hdev, HCI_SC_ONLY) && !(auth & SMP_AUTH_SC)) + return SMP_AUTH_REQUIREMENTS; + + /* If the remote side's OOB flag is set it means it has + * successfully received our local OOB data - therefore set the + * flag to indicate that local OOB is in use. + */ + if (rsp->oob_flag == SMP_OOB_PRESENT && SMP_DEV(hdev)->local_oob) + set_bit(SMP_FLAG_LOCAL_OOB, &smp->flags); + + smp->prsp[0] = SMP_CMD_PAIRING_RSP; + memcpy(&smp->prsp[1], rsp, sizeof(*rsp)); + + /* Update remote key distribution in case the remote cleared + * some bits that we had enabled in our request. + */ + smp->remote_key_dist &= rsp->resp_key_dist; + + if ((req->auth_req & SMP_AUTH_CT2) && (auth & SMP_AUTH_CT2)) + set_bit(SMP_FLAG_CT2, &smp->flags); + + /* For BR/EDR this means we're done and can start phase 3 */ + if (conn->hcon->type == ACL_LINK) { + /* Clear bits which are generated but not distributed */ + smp->remote_key_dist &= ~SMP_SC_NO_DIST; + smp_distribute_keys(smp); + return 0; + } + + if ((req->auth_req & SMP_AUTH_SC) && (auth & SMP_AUTH_SC)) + set_bit(SMP_FLAG_SC, &smp->flags); + else if (conn->hcon->pending_sec_level > BT_SECURITY_HIGH) + conn->hcon->pending_sec_level = BT_SECURITY_HIGH; + + /* If we need MITM check that it can be achieved */ + if (conn->hcon->pending_sec_level >= BT_SECURITY_HIGH) { + u8 method; + + method = get_auth_method(smp, req->io_capability, + rsp->io_capability); + if (method == JUST_WORKS || method == JUST_CFM) + return SMP_AUTH_REQUIREMENTS; + } + + get_random_bytes(smp->prnd, sizeof(smp->prnd)); + + /* Update remote key distribution in case the remote cleared + * some bits that we had enabled in our request. + */ + smp->remote_key_dist &= rsp->resp_key_dist; + + if (test_bit(SMP_FLAG_SC, &smp->flags)) { + /* Clear bits which are generated but not distributed */ + smp->remote_key_dist &= ~SMP_SC_NO_DIST; + SMP_ALLOW_CMD(smp, SMP_CMD_PUBLIC_KEY); + return sc_send_public_key(smp); + } + + auth |= req->auth_req; + + ret = tk_request(conn, 0, auth, req->io_capability, rsp->io_capability); + if (ret) + return SMP_UNSPECIFIED; + + set_bit(SMP_FLAG_CFM_PENDING, &smp->flags); + + /* Can't compose response until we have been confirmed */ + if (test_bit(SMP_FLAG_TK_VALID, &smp->flags)) + return smp_confirm(smp); + + return 0; +} + +static u8 sc_check_confirm(struct smp_chan *smp) +{ + struct l2cap_conn *conn = smp->conn; + + bt_dev_dbg(conn->hcon->hdev, ""); + + if (smp->method == REQ_PASSKEY || smp->method == DSP_PASSKEY) + return sc_passkey_round(smp, SMP_CMD_PAIRING_CONFIRM); + + if (conn->hcon->out) { + smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd), + smp->prnd); + SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RANDOM); + } + + return 0; +} + +/* Work-around for some implementations that incorrectly copy RFU bits + * from our security request and thereby create the impression that + * we're doing SC when in fact the remote doesn't support it. + */ +static int fixup_sc_false_positive(struct smp_chan *smp) +{ + struct l2cap_conn *conn = smp->conn; + struct hci_conn *hcon = conn->hcon; + struct hci_dev *hdev = hcon->hdev; + struct smp_cmd_pairing *req, *rsp; + u8 auth; + + /* The issue is only observed when we're in responder role */ + if (hcon->out) + return SMP_UNSPECIFIED; + + if (hci_dev_test_flag(hdev, HCI_SC_ONLY)) { + bt_dev_err(hdev, "refusing legacy fallback in SC-only mode"); + return SMP_UNSPECIFIED; + } + + bt_dev_err(hdev, "trying to fall back to legacy SMP"); + + req = (void *) &smp->preq[1]; + rsp = (void *) &smp->prsp[1]; + + /* Rebuild key dist flags which may have been cleared for SC */ + smp->remote_key_dist = (req->init_key_dist & rsp->resp_key_dist); + + auth = req->auth_req & AUTH_REQ_MASK(hdev); + + if (tk_request(conn, 0, auth, rsp->io_capability, req->io_capability)) { + bt_dev_err(hdev, "failed to fall back to legacy SMP"); + return SMP_UNSPECIFIED; + } + + clear_bit(SMP_FLAG_SC, &smp->flags); + + return 0; +} + +static u8 smp_cmd_pairing_confirm(struct l2cap_conn *conn, struct sk_buff *skb) +{ + struct l2cap_chan *chan = conn->smp; + struct smp_chan *smp = chan->data; + struct hci_conn *hcon = conn->hcon; + struct hci_dev *hdev = hcon->hdev; + + bt_dev_dbg(hdev, "conn %p %s", conn, + hcon->out ? "initiator" : "responder"); + + if (skb->len < sizeof(smp->pcnf)) + return SMP_INVALID_PARAMS; + + memcpy(smp->pcnf, skb->data, sizeof(smp->pcnf)); + skb_pull(skb, sizeof(smp->pcnf)); + + if (test_bit(SMP_FLAG_SC, &smp->flags)) { + int ret; + + /* Public Key exchange must happen before any other steps */ + if (test_bit(SMP_FLAG_REMOTE_PK, &smp->flags)) + return sc_check_confirm(smp); + + bt_dev_err(hdev, "Unexpected SMP Pairing Confirm"); + + ret = fixup_sc_false_positive(smp); + if (ret) + return ret; + } + + if (conn->hcon->out) { + smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd), + smp->prnd); + SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RANDOM); + return 0; + } + + if (test_bit(SMP_FLAG_TK_VALID, &smp->flags)) + return smp_confirm(smp); + + set_bit(SMP_FLAG_CFM_PENDING, &smp->flags); + + return 0; +} + +static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb) +{ + struct l2cap_chan *chan = conn->smp; + struct smp_chan *smp = chan->data; + struct hci_conn *hcon = conn->hcon; + u8 *pkax, *pkbx, *na, *nb, confirm_hint; + u32 passkey; + int err; + + bt_dev_dbg(hcon->hdev, "conn %p", conn); + + if (skb->len < sizeof(smp->rrnd)) + return SMP_INVALID_PARAMS; + + memcpy(smp->rrnd, skb->data, sizeof(smp->rrnd)); + skb_pull(skb, sizeof(smp->rrnd)); + + if (!test_bit(SMP_FLAG_SC, &smp->flags)) + return smp_random(smp); + + if (hcon->out) { + pkax = smp->local_pk; + pkbx = smp->remote_pk; + na = smp->prnd; + nb = smp->rrnd; + } else { + pkax = smp->remote_pk; + pkbx = smp->local_pk; + na = smp->rrnd; + nb = smp->prnd; + } + + if (smp->method == REQ_OOB) { + if (!hcon->out) + smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, + sizeof(smp->prnd), smp->prnd); + SMP_ALLOW_CMD(smp, SMP_CMD_DHKEY_CHECK); + goto mackey_and_ltk; + } + + /* Passkey entry has special treatment */ + if (smp->method == REQ_PASSKEY || smp->method == DSP_PASSKEY) + return sc_passkey_round(smp, SMP_CMD_PAIRING_RANDOM); + + if (hcon->out) { + u8 cfm[16]; + + err = smp_f4(smp->tfm_cmac, smp->remote_pk, smp->local_pk, + smp->rrnd, 0, cfm); + if (err) + return SMP_UNSPECIFIED; + + if (crypto_memneq(smp->pcnf, cfm, 16)) + return SMP_CONFIRM_FAILED; + } else { + smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd), + smp->prnd); + SMP_ALLOW_CMD(smp, SMP_CMD_DHKEY_CHECK); + + /* Only Just-Works pairing requires extra checks */ + if (smp->method != JUST_WORKS) + goto mackey_and_ltk; + + /* If there already exists long term key in local host, leave + * the decision to user space since the remote device could + * be legitimate or malicious. + */ + if (hci_find_ltk(hcon->hdev, &hcon->dst, hcon->dst_type, + hcon->role)) { + /* Set passkey to 0. The value can be any number since + * it'll be ignored anyway. + */ + passkey = 0; + confirm_hint = 1; + goto confirm; + } + } + +mackey_and_ltk: + /* Generate MacKey and LTK */ + err = sc_mackey_and_ltk(smp, smp->mackey, smp->tk); + if (err) + return SMP_UNSPECIFIED; + + if (smp->method == REQ_OOB) { + if (hcon->out) { + sc_dhkey_check(smp); + SMP_ALLOW_CMD(smp, SMP_CMD_DHKEY_CHECK); + } + return 0; + } + + err = smp_g2(smp->tfm_cmac, pkax, pkbx, na, nb, &passkey); + if (err) + return SMP_UNSPECIFIED; + + confirm_hint = 0; + +confirm: + if (smp->method == JUST_WORKS) + confirm_hint = 1; + + err = mgmt_user_confirm_request(hcon->hdev, &hcon->dst, hcon->type, + hcon->dst_type, passkey, confirm_hint); + if (err) + return SMP_UNSPECIFIED; + + set_bit(SMP_FLAG_WAIT_USER, &smp->flags); + + return 0; +} + +static bool smp_ltk_encrypt(struct l2cap_conn *conn, u8 sec_level) +{ + struct smp_ltk *key; + struct hci_conn *hcon = conn->hcon; + + key = hci_find_ltk(hcon->hdev, &hcon->dst, hcon->dst_type, hcon->role); + if (!key) + return false; + + if (smp_ltk_sec_level(key) < sec_level) + return false; + + if (test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &hcon->flags)) + return true; + + hci_le_start_enc(hcon, key->ediv, key->rand, key->val, key->enc_size); + hcon->enc_key_size = key->enc_size; + + /* We never store STKs for initiator role, so clear this flag */ + clear_bit(HCI_CONN_STK_ENCRYPT, &hcon->flags); + + return true; +} + +bool smp_sufficient_security(struct hci_conn *hcon, u8 sec_level, + enum smp_key_pref key_pref) +{ + if (sec_level == BT_SECURITY_LOW) + return true; + + /* If we're encrypted with an STK but the caller prefers using + * LTK claim insufficient security. This way we allow the + * connection to be re-encrypted with an LTK, even if the LTK + * provides the same level of security. Only exception is if we + * don't have an LTK (e.g. because of key distribution bits). + */ + if (key_pref == SMP_USE_LTK && + test_bit(HCI_CONN_STK_ENCRYPT, &hcon->flags) && + hci_find_ltk(hcon->hdev, &hcon->dst, hcon->dst_type, hcon->role)) + return false; + + if (hcon->sec_level >= sec_level) + return true; + + return false; +} + +static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb) +{ + struct smp_cmd_security_req *rp = (void *) skb->data; + struct smp_cmd_pairing cp; + struct hci_conn *hcon = conn->hcon; + struct hci_dev *hdev = hcon->hdev; + struct smp_chan *smp; + u8 sec_level, auth; + + bt_dev_dbg(hdev, "conn %p", conn); + + if (skb->len < sizeof(*rp)) + return SMP_INVALID_PARAMS; + + if (hcon->role != HCI_ROLE_MASTER) + return SMP_CMD_NOTSUPP; + + auth = rp->auth_req & AUTH_REQ_MASK(hdev); + + if (hci_dev_test_flag(hdev, HCI_SC_ONLY) && !(auth & SMP_AUTH_SC)) + return SMP_AUTH_REQUIREMENTS; + + if (hcon->io_capability == HCI_IO_NO_INPUT_OUTPUT) + sec_level = BT_SECURITY_MEDIUM; + else + sec_level = authreq_to_seclevel(auth); + + if (smp_sufficient_security(hcon, sec_level, SMP_USE_LTK)) { + /* If link is already encrypted with sufficient security we + * still need refresh encryption as per Core Spec 5.0 Vol 3, + * Part H 2.4.6 + */ + smp_ltk_encrypt(conn, hcon->sec_level); + return 0; + } + + if (sec_level > hcon->pending_sec_level) + hcon->pending_sec_level = sec_level; + + if (smp_ltk_encrypt(conn, hcon->pending_sec_level)) + return 0; + + smp = smp_chan_create(conn); + if (!smp) + return SMP_UNSPECIFIED; + + if (!hci_dev_test_flag(hdev, HCI_BONDABLE) && + (auth & SMP_AUTH_BONDING)) + return SMP_PAIRING_NOTSUPP; + + skb_pull(skb, sizeof(*rp)); + + memset(&cp, 0, sizeof(cp)); + build_pairing_cmd(conn, &cp, NULL, auth); + + smp->preq[0] = SMP_CMD_PAIRING_REQ; + memcpy(&smp->preq[1], &cp, sizeof(cp)); + + smp_send_cmd(conn, SMP_CMD_PAIRING_REQ, sizeof(cp), &cp); + SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RSP); + + return 0; +} + +int smp_conn_security(struct hci_conn *hcon, __u8 sec_level) +{ + struct l2cap_conn *conn = hcon->l2cap_data; + struct l2cap_chan *chan; + struct smp_chan *smp; + __u8 authreq; + int ret; + + bt_dev_dbg(hcon->hdev, "conn %p hcon %p level 0x%2.2x", conn, hcon, + sec_level); + + /* This may be NULL if there's an unexpected disconnection */ + if (!conn) + return 1; + + if (!hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED)) + return 1; + + if (smp_sufficient_security(hcon, sec_level, SMP_USE_LTK)) + return 1; + + if (sec_level > hcon->pending_sec_level) + hcon->pending_sec_level = sec_level; + + if (hcon->role == HCI_ROLE_MASTER) + if (smp_ltk_encrypt(conn, hcon->pending_sec_level)) + return 0; + + chan = conn->smp; + if (!chan) { + bt_dev_err(hcon->hdev, "security requested but not available"); + return 1; + } + + l2cap_chan_lock(chan); + + /* If SMP is already in progress ignore this request */ + if (chan->data) { + ret = 0; + goto unlock; + } + + smp = smp_chan_create(conn); + if (!smp) { + ret = 1; + goto unlock; + } + + authreq = seclevel_to_authreq(sec_level); + + if (hci_dev_test_flag(hcon->hdev, HCI_SC_ENABLED)) { + authreq |= SMP_AUTH_SC; + if (hci_dev_test_flag(hcon->hdev, HCI_SSP_ENABLED)) + authreq |= SMP_AUTH_CT2; + } + + /* Don't attempt to set MITM if setting is overridden by debugfs + * Needed to pass certification test SM/MAS/PKE/BV-01-C + */ + if (!hci_dev_test_flag(hcon->hdev, HCI_FORCE_NO_MITM)) { + /* Require MITM if IO Capability allows or the security level + * requires it. + */ + if (hcon->io_capability != HCI_IO_NO_INPUT_OUTPUT || + hcon->pending_sec_level > BT_SECURITY_MEDIUM) + authreq |= SMP_AUTH_MITM; + } + + if (hcon->role == HCI_ROLE_MASTER) { + struct smp_cmd_pairing cp; + + build_pairing_cmd(conn, &cp, NULL, authreq); + smp->preq[0] = SMP_CMD_PAIRING_REQ; + memcpy(&smp->preq[1], &cp, sizeof(cp)); + + smp_send_cmd(conn, SMP_CMD_PAIRING_REQ, sizeof(cp), &cp); + SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RSP); + } else { + struct smp_cmd_security_req cp; + cp.auth_req = authreq; + smp_send_cmd(conn, SMP_CMD_SECURITY_REQ, sizeof(cp), &cp); + SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_REQ); + } + + set_bit(SMP_FLAG_INITIATOR, &smp->flags); + ret = 0; + +unlock: + l2cap_chan_unlock(chan); + return ret; +} + +int smp_cancel_and_remove_pairing(struct hci_dev *hdev, bdaddr_t *bdaddr, + u8 addr_type) +{ + struct hci_conn *hcon; + struct l2cap_conn *conn; + struct l2cap_chan *chan; + struct smp_chan *smp; + int err; + + err = hci_remove_ltk(hdev, bdaddr, addr_type); + hci_remove_irk(hdev, bdaddr, addr_type); + + hcon = hci_conn_hash_lookup_le(hdev, bdaddr, addr_type); + if (!hcon) + goto done; + + conn = hcon->l2cap_data; + if (!conn) + goto done; + + chan = conn->smp; + if (!chan) + goto done; + + l2cap_chan_lock(chan); + + smp = chan->data; + if (smp) { + /* Set keys to NULL to make sure smp_failure() does not try to + * remove and free already invalidated rcu list entries. */ + smp->ltk = NULL; + smp->responder_ltk = NULL; + smp->remote_irk = NULL; + + if (test_bit(SMP_FLAG_COMPLETE, &smp->flags)) + smp_failure(conn, 0); + else + smp_failure(conn, SMP_UNSPECIFIED); + err = 0; + } + + l2cap_chan_unlock(chan); + +done: + return err; +} + +static int smp_cmd_encrypt_info(struct l2cap_conn *conn, struct sk_buff *skb) +{ + struct smp_cmd_encrypt_info *rp = (void *) skb->data; + struct l2cap_chan *chan = conn->smp; + struct smp_chan *smp = chan->data; + + bt_dev_dbg(conn->hcon->hdev, "conn %p", conn); + + if (skb->len < sizeof(*rp)) + return SMP_INVALID_PARAMS; + + /* Pairing is aborted if any blocked keys are distributed */ + if (hci_is_blocked_key(conn->hcon->hdev, HCI_BLOCKED_KEY_TYPE_LTK, + rp->ltk)) { + bt_dev_warn_ratelimited(conn->hcon->hdev, + "LTK blocked for %pMR", + &conn->hcon->dst); + return SMP_INVALID_PARAMS; + } + + SMP_ALLOW_CMD(smp, SMP_CMD_INITIATOR_IDENT); + + skb_pull(skb, sizeof(*rp)); + + memcpy(smp->tk, rp->ltk, sizeof(smp->tk)); + + return 0; +} + +static int smp_cmd_initiator_ident(struct l2cap_conn *conn, struct sk_buff *skb) +{ + struct smp_cmd_initiator_ident *rp = (void *)skb->data; + struct l2cap_chan *chan = conn->smp; + struct smp_chan *smp = chan->data; + struct hci_dev *hdev = conn->hcon->hdev; + struct hci_conn *hcon = conn->hcon; + struct smp_ltk *ltk; + u8 authenticated; + + bt_dev_dbg(hdev, "conn %p", conn); + + if (skb->len < sizeof(*rp)) + return SMP_INVALID_PARAMS; + + /* Mark the information as received */ + smp->remote_key_dist &= ~SMP_DIST_ENC_KEY; + + if (smp->remote_key_dist & SMP_DIST_ID_KEY) + SMP_ALLOW_CMD(smp, SMP_CMD_IDENT_INFO); + else if (smp->remote_key_dist & SMP_DIST_SIGN) + SMP_ALLOW_CMD(smp, SMP_CMD_SIGN_INFO); + + skb_pull(skb, sizeof(*rp)); + + authenticated = (hcon->sec_level == BT_SECURITY_HIGH); + ltk = hci_add_ltk(hdev, &hcon->dst, hcon->dst_type, SMP_LTK, + authenticated, smp->tk, smp->enc_key_size, + rp->ediv, rp->rand); + smp->ltk = ltk; + if (!(smp->remote_key_dist & KEY_DIST_MASK)) + smp_distribute_keys(smp); + + return 0; +} + +static int smp_cmd_ident_info(struct l2cap_conn *conn, struct sk_buff *skb) +{ + struct smp_cmd_ident_info *info = (void *) skb->data; + struct l2cap_chan *chan = conn->smp; + struct smp_chan *smp = chan->data; + + bt_dev_dbg(conn->hcon->hdev, ""); + + if (skb->len < sizeof(*info)) + return SMP_INVALID_PARAMS; + + /* Pairing is aborted if any blocked keys are distributed */ + if (hci_is_blocked_key(conn->hcon->hdev, HCI_BLOCKED_KEY_TYPE_IRK, + info->irk)) { + bt_dev_warn_ratelimited(conn->hcon->hdev, + "Identity key blocked for %pMR", + &conn->hcon->dst); + return SMP_INVALID_PARAMS; + } + + SMP_ALLOW_CMD(smp, SMP_CMD_IDENT_ADDR_INFO); + + skb_pull(skb, sizeof(*info)); + + memcpy(smp->irk, info->irk, 16); + + return 0; +} + +static int smp_cmd_ident_addr_info(struct l2cap_conn *conn, + struct sk_buff *skb) +{ + struct smp_cmd_ident_addr_info *info = (void *) skb->data; + struct l2cap_chan *chan = conn->smp; + struct smp_chan *smp = chan->data; + struct hci_conn *hcon = conn->hcon; + bdaddr_t rpa; + + bt_dev_dbg(hcon->hdev, ""); + + if (skb->len < sizeof(*info)) + return SMP_INVALID_PARAMS; + + /* Mark the information as received */ + smp->remote_key_dist &= ~SMP_DIST_ID_KEY; + + if (smp->remote_key_dist & SMP_DIST_SIGN) + SMP_ALLOW_CMD(smp, SMP_CMD_SIGN_INFO); + + skb_pull(skb, sizeof(*info)); + + /* Strictly speaking the Core Specification (4.1) allows sending + * an empty address which would force us to rely on just the IRK + * as "identity information". However, since such + * implementations are not known of and in order to not over + * complicate our implementation, simply pretend that we never + * received an IRK for such a device. + * + * The Identity Address must also be a Static Random or Public + * Address, which hci_is_identity_address() checks for. + */ + if (!bacmp(&info->bdaddr, BDADDR_ANY) || + !hci_is_identity_address(&info->bdaddr, info->addr_type)) { + bt_dev_err(hcon->hdev, "ignoring IRK with no identity address"); + goto distribute; + } + + /* Drop IRK if peer is using identity address during pairing but is + * providing different address as identity information. + * + * Microsoft Surface Precision Mouse is known to have this bug. + */ + if (hci_is_identity_address(&hcon->dst, hcon->dst_type) && + (bacmp(&info->bdaddr, &hcon->dst) || + info->addr_type != hcon->dst_type)) { + bt_dev_err(hcon->hdev, + "ignoring IRK with invalid identity address"); + goto distribute; + } + + bacpy(&smp->id_addr, &info->bdaddr); + smp->id_addr_type = info->addr_type; + + if (hci_bdaddr_is_rpa(&hcon->dst, hcon->dst_type)) + bacpy(&rpa, &hcon->dst); + else + bacpy(&rpa, BDADDR_ANY); + + smp->remote_irk = hci_add_irk(conn->hcon->hdev, &smp->id_addr, + smp->id_addr_type, smp->irk, &rpa); + +distribute: + if (!(smp->remote_key_dist & KEY_DIST_MASK)) + smp_distribute_keys(smp); + + return 0; +} + +static int smp_cmd_sign_info(struct l2cap_conn *conn, struct sk_buff *skb) +{ + struct smp_cmd_sign_info *rp = (void *) skb->data; + struct l2cap_chan *chan = conn->smp; + struct smp_chan *smp = chan->data; + struct smp_csrk *csrk; + + bt_dev_dbg(conn->hcon->hdev, "conn %p", conn); + + if (skb->len < sizeof(*rp)) + return SMP_INVALID_PARAMS; + + /* Mark the information as received */ + smp->remote_key_dist &= ~SMP_DIST_SIGN; + + skb_pull(skb, sizeof(*rp)); + + csrk = kzalloc(sizeof(*csrk), GFP_KERNEL); + if (csrk) { + if (conn->hcon->sec_level > BT_SECURITY_MEDIUM) + csrk->type = MGMT_CSRK_REMOTE_AUTHENTICATED; + else + csrk->type = MGMT_CSRK_REMOTE_UNAUTHENTICATED; + memcpy(csrk->val, rp->csrk, sizeof(csrk->val)); + } + smp->csrk = csrk; + smp_distribute_keys(smp); + + return 0; +} + +static u8 sc_select_method(struct smp_chan *smp) +{ + struct l2cap_conn *conn = smp->conn; + struct hci_conn *hcon = conn->hcon; + struct smp_cmd_pairing *local, *remote; + u8 local_mitm, remote_mitm, local_io, remote_io, method; + + if (test_bit(SMP_FLAG_REMOTE_OOB, &smp->flags) || + test_bit(SMP_FLAG_LOCAL_OOB, &smp->flags)) + return REQ_OOB; + + /* The preq/prsp contain the raw Pairing Request/Response PDUs + * which are needed as inputs to some crypto functions. To get + * the "struct smp_cmd_pairing" from them we need to skip the + * first byte which contains the opcode. + */ + if (hcon->out) { + local = (void *) &smp->preq[1]; + remote = (void *) &smp->prsp[1]; + } else { + local = (void *) &smp->prsp[1]; + remote = (void *) &smp->preq[1]; + } + + local_io = local->io_capability; + remote_io = remote->io_capability; + + local_mitm = (local->auth_req & SMP_AUTH_MITM); + remote_mitm = (remote->auth_req & SMP_AUTH_MITM); + + /* If either side wants MITM, look up the method from the table, + * otherwise use JUST WORKS. + */ + if (local_mitm || remote_mitm) + method = get_auth_method(smp, local_io, remote_io); + else + method = JUST_WORKS; + + /* Don't confirm locally initiated pairing attempts */ + if (method == JUST_CFM && test_bit(SMP_FLAG_INITIATOR, &smp->flags)) + method = JUST_WORKS; + + return method; +} + +static int smp_cmd_public_key(struct l2cap_conn *conn, struct sk_buff *skb) +{ + struct smp_cmd_public_key *key = (void *) skb->data; + struct hci_conn *hcon = conn->hcon; + struct l2cap_chan *chan = conn->smp; + struct smp_chan *smp = chan->data; + struct hci_dev *hdev = hcon->hdev; + struct crypto_kpp *tfm_ecdh; + struct smp_cmd_pairing_confirm cfm; + int err; + + bt_dev_dbg(hdev, "conn %p", conn); + + if (skb->len < sizeof(*key)) + return SMP_INVALID_PARAMS; + + /* Check if remote and local public keys are the same and debug key is + * not in use. + */ + if (!test_bit(SMP_FLAG_DEBUG_KEY, &smp->flags) && + !crypto_memneq(key, smp->local_pk, 64)) { + bt_dev_err(hdev, "Remote and local public keys are identical"); + return SMP_UNSPECIFIED; + } + + memcpy(smp->remote_pk, key, 64); + + if (test_bit(SMP_FLAG_REMOTE_OOB, &smp->flags)) { + err = smp_f4(smp->tfm_cmac, smp->remote_pk, smp->remote_pk, + smp->rr, 0, cfm.confirm_val); + if (err) + return SMP_UNSPECIFIED; + + if (crypto_memneq(cfm.confirm_val, smp->pcnf, 16)) + return SMP_CONFIRM_FAILED; + } + + /* Non-initiating device sends its public key after receiving + * the key from the initiating device. + */ + if (!hcon->out) { + err = sc_send_public_key(smp); + if (err) + return err; + } + + SMP_DBG("Remote Public Key X: %32phN", smp->remote_pk); + SMP_DBG("Remote Public Key Y: %32phN", smp->remote_pk + 32); + + /* Compute the shared secret on the same crypto tfm on which the private + * key was set/generated. + */ + if (test_bit(SMP_FLAG_LOCAL_OOB, &smp->flags)) { + struct l2cap_chan *hchan = hdev->smp_data; + struct smp_dev *smp_dev; + + if (!hchan || !hchan->data) + return SMP_UNSPECIFIED; + + smp_dev = hchan->data; + + tfm_ecdh = smp_dev->tfm_ecdh; + } else { + tfm_ecdh = smp->tfm_ecdh; + } + + if (compute_ecdh_secret(tfm_ecdh, smp->remote_pk, smp->dhkey)) + return SMP_UNSPECIFIED; + + SMP_DBG("DHKey %32phN", smp->dhkey); + + set_bit(SMP_FLAG_REMOTE_PK, &smp->flags); + + smp->method = sc_select_method(smp); + + bt_dev_dbg(hdev, "selected method 0x%02x", smp->method); + + /* JUST_WORKS and JUST_CFM result in an unauthenticated key */ + if (smp->method == JUST_WORKS || smp->method == JUST_CFM) + hcon->pending_sec_level = BT_SECURITY_MEDIUM; + else + hcon->pending_sec_level = BT_SECURITY_FIPS; + + if (!crypto_memneq(debug_pk, smp->remote_pk, 64)) + set_bit(SMP_FLAG_DEBUG_KEY, &smp->flags); + + if (smp->method == DSP_PASSKEY) { + get_random_bytes(&hcon->passkey_notify, + sizeof(hcon->passkey_notify)); + hcon->passkey_notify %= 1000000; + hcon->passkey_entered = 0; + smp->passkey_round = 0; + if (mgmt_user_passkey_notify(hdev, &hcon->dst, hcon->type, + hcon->dst_type, + hcon->passkey_notify, + hcon->passkey_entered)) + return SMP_UNSPECIFIED; + SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_CONFIRM); + return sc_passkey_round(smp, SMP_CMD_PUBLIC_KEY); + } + + if (smp->method == REQ_OOB) { + if (hcon->out) + smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, + sizeof(smp->prnd), smp->prnd); + + SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RANDOM); + + return 0; + } + + if (hcon->out) + SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_CONFIRM); + + if (smp->method == REQ_PASSKEY) { + if (mgmt_user_passkey_request(hdev, &hcon->dst, hcon->type, + hcon->dst_type)) + return SMP_UNSPECIFIED; + SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_CONFIRM); + set_bit(SMP_FLAG_WAIT_USER, &smp->flags); + return 0; + } + + /* The Initiating device waits for the non-initiating device to + * send the confirm value. + */ + if (conn->hcon->out) + return 0; + + err = smp_f4(smp->tfm_cmac, smp->local_pk, smp->remote_pk, smp->prnd, + 0, cfm.confirm_val); + if (err) + return SMP_UNSPECIFIED; + + smp_send_cmd(conn, SMP_CMD_PAIRING_CONFIRM, sizeof(cfm), &cfm); + SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RANDOM); + + return 0; +} + +static int smp_cmd_dhkey_check(struct l2cap_conn *conn, struct sk_buff *skb) +{ + struct smp_cmd_dhkey_check *check = (void *) skb->data; + struct l2cap_chan *chan = conn->smp; + struct hci_conn *hcon = conn->hcon; + struct smp_chan *smp = chan->data; + u8 a[7], b[7], *local_addr, *remote_addr; + u8 io_cap[3], r[16], e[16]; + int err; + + bt_dev_dbg(hcon->hdev, "conn %p", conn); + + if (skb->len < sizeof(*check)) + return SMP_INVALID_PARAMS; + + memcpy(a, &hcon->init_addr, 6); + memcpy(b, &hcon->resp_addr, 6); + a[6] = hcon->init_addr_type; + b[6] = hcon->resp_addr_type; + + if (hcon->out) { + local_addr = a; + remote_addr = b; + memcpy(io_cap, &smp->prsp[1], 3); + } else { + local_addr = b; + remote_addr = a; + memcpy(io_cap, &smp->preq[1], 3); + } + + memset(r, 0, sizeof(r)); + + if (smp->method == REQ_PASSKEY || smp->method == DSP_PASSKEY) + put_unaligned_le32(hcon->passkey_notify, r); + else if (smp->method == REQ_OOB) + memcpy(r, smp->lr, 16); + + err = smp_f6(smp->tfm_cmac, smp->mackey, smp->rrnd, smp->prnd, r, + io_cap, remote_addr, local_addr, e); + if (err) + return SMP_UNSPECIFIED; + + if (crypto_memneq(check->e, e, 16)) + return SMP_DHKEY_CHECK_FAILED; + + if (!hcon->out) { + if (test_bit(SMP_FLAG_WAIT_USER, &smp->flags)) { + set_bit(SMP_FLAG_DHKEY_PENDING, &smp->flags); + return 0; + } + + /* Responder sends DHKey check as response to initiator */ + sc_dhkey_check(smp); + } + + sc_add_ltk(smp); + + if (hcon->out) { + hci_le_start_enc(hcon, 0, 0, smp->tk, smp->enc_key_size); + hcon->enc_key_size = smp->enc_key_size; + } + + return 0; +} + +static int smp_cmd_keypress_notify(struct l2cap_conn *conn, + struct sk_buff *skb) +{ + struct smp_cmd_keypress_notify *kp = (void *) skb->data; + + bt_dev_dbg(conn->hcon->hdev, "value 0x%02x", kp->value); + + return 0; +} + +static int smp_sig_channel(struct l2cap_chan *chan, struct sk_buff *skb) +{ + struct l2cap_conn *conn = chan->conn; + struct hci_conn *hcon = conn->hcon; + struct smp_chan *smp; + __u8 code, reason; + int err = 0; + + if (skb->len < 1) + return -EILSEQ; + + if (!hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED)) { + reason = SMP_PAIRING_NOTSUPP; + goto done; + } + + code = skb->data[0]; + skb_pull(skb, sizeof(code)); + + smp = chan->data; + + if (code > SMP_CMD_MAX) + goto drop; + + if (smp && !test_and_clear_bit(code, &smp->allow_cmd)) + goto drop; + + /* If we don't have a context the only allowed commands are + * pairing request and security request. + */ + if (!smp && code != SMP_CMD_PAIRING_REQ && code != SMP_CMD_SECURITY_REQ) + goto drop; + + switch (code) { + case SMP_CMD_PAIRING_REQ: + reason = smp_cmd_pairing_req(conn, skb); + break; + + case SMP_CMD_PAIRING_FAIL: + smp_failure(conn, 0); + err = -EPERM; + break; + + case SMP_CMD_PAIRING_RSP: + reason = smp_cmd_pairing_rsp(conn, skb); + break; + + case SMP_CMD_SECURITY_REQ: + reason = smp_cmd_security_req(conn, skb); + break; + + case SMP_CMD_PAIRING_CONFIRM: + reason = smp_cmd_pairing_confirm(conn, skb); + break; + + case SMP_CMD_PAIRING_RANDOM: + reason = smp_cmd_pairing_random(conn, skb); + break; + + case SMP_CMD_ENCRYPT_INFO: + reason = smp_cmd_encrypt_info(conn, skb); + break; + + case SMP_CMD_INITIATOR_IDENT: + reason = smp_cmd_initiator_ident(conn, skb); + break; + + case SMP_CMD_IDENT_INFO: + reason = smp_cmd_ident_info(conn, skb); + break; + + case SMP_CMD_IDENT_ADDR_INFO: + reason = smp_cmd_ident_addr_info(conn, skb); + break; + + case SMP_CMD_SIGN_INFO: + reason = smp_cmd_sign_info(conn, skb); + break; + + case SMP_CMD_PUBLIC_KEY: + reason = smp_cmd_public_key(conn, skb); + break; + + case SMP_CMD_DHKEY_CHECK: + reason = smp_cmd_dhkey_check(conn, skb); + break; + + case SMP_CMD_KEYPRESS_NOTIFY: + reason = smp_cmd_keypress_notify(conn, skb); + break; + + default: + bt_dev_dbg(hcon->hdev, "Unknown command code 0x%2.2x", code); + reason = SMP_CMD_NOTSUPP; + goto done; + } + +done: + if (!err) { + if (reason) + smp_failure(conn, reason); + kfree_skb(skb); + } + + return err; + +drop: + bt_dev_err(hcon->hdev, "unexpected SMP command 0x%02x from %pMR", + code, &hcon->dst); + kfree_skb(skb); + return 0; +} + +static void smp_teardown_cb(struct l2cap_chan *chan, int err) +{ + struct l2cap_conn *conn = chan->conn; + + bt_dev_dbg(conn->hcon->hdev, "chan %p", chan); + + if (chan->data) + smp_chan_destroy(conn); + + conn->smp = NULL; + l2cap_chan_put(chan); +} + +static void bredr_pairing(struct l2cap_chan *chan) +{ + struct l2cap_conn *conn = chan->conn; + struct hci_conn *hcon = conn->hcon; + struct hci_dev *hdev = hcon->hdev; + struct smp_cmd_pairing req; + struct smp_chan *smp; + + bt_dev_dbg(hdev, "chan %p", chan); + + /* Only new pairings are interesting */ + if (!test_bit(HCI_CONN_NEW_LINK_KEY, &hcon->flags)) + return; + + /* Don't bother if we're not encrypted */ + if (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags)) + return; + + /* Only initiator may initiate SMP over BR/EDR */ + if (hcon->role != HCI_ROLE_MASTER) + return; + + /* Secure Connections support must be enabled */ + if (!hci_dev_test_flag(hdev, HCI_SC_ENABLED)) + return; + + /* BR/EDR must use Secure Connections for SMP */ + if (!test_bit(HCI_CONN_AES_CCM, &hcon->flags) && + !hci_dev_test_flag(hdev, HCI_FORCE_BREDR_SMP)) + return; + + /* If our LE support is not enabled don't do anything */ + if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) + return; + + /* Don't bother if remote LE support is not enabled */ + if (!lmp_host_le_capable(hcon)) + return; + + /* Remote must support SMP fixed chan for BR/EDR */ + if (!(conn->remote_fixed_chan & L2CAP_FC_SMP_BREDR)) + return; + + /* Don't bother if SMP is already ongoing */ + if (chan->data) + return; + + smp = smp_chan_create(conn); + if (!smp) { + bt_dev_err(hdev, "unable to create SMP context for BR/EDR"); + return; + } + + set_bit(SMP_FLAG_SC, &smp->flags); + + bt_dev_dbg(hdev, "starting SMP over BR/EDR"); + + /* Prepare and send the BR/EDR SMP Pairing Request */ + build_bredr_pairing_cmd(smp, &req, NULL); + + smp->preq[0] = SMP_CMD_PAIRING_REQ; + memcpy(&smp->preq[1], &req, sizeof(req)); + + smp_send_cmd(conn, SMP_CMD_PAIRING_REQ, sizeof(req), &req); + SMP_ALLOW_CMD(smp, SMP_CMD_PAIRING_RSP); +} + +static void smp_resume_cb(struct l2cap_chan *chan) +{ + struct smp_chan *smp = chan->data; + struct l2cap_conn *conn = chan->conn; + struct hci_conn *hcon = conn->hcon; + + bt_dev_dbg(hcon->hdev, "chan %p", chan); + + if (hcon->type == ACL_LINK) { + bredr_pairing(chan); + return; + } + + if (!smp) + return; + + if (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags)) + return; + + cancel_delayed_work(&smp->security_timer); + + smp_distribute_keys(smp); +} + +static void smp_ready_cb(struct l2cap_chan *chan) +{ + struct l2cap_conn *conn = chan->conn; + struct hci_conn *hcon = conn->hcon; + + bt_dev_dbg(hcon->hdev, "chan %p", chan); + + /* No need to call l2cap_chan_hold() here since we already own + * the reference taken in smp_new_conn_cb(). This is just the + * first time that we tie it to a specific pointer. The code in + * l2cap_core.c ensures that there's no risk this function wont + * get called if smp_new_conn_cb was previously called. + */ + conn->smp = chan; + + if (hcon->type == ACL_LINK && test_bit(HCI_CONN_ENCRYPT, &hcon->flags)) + bredr_pairing(chan); +} + +static int smp_recv_cb(struct l2cap_chan *chan, struct sk_buff *skb) +{ + int err; + + bt_dev_dbg(chan->conn->hcon->hdev, "chan %p", chan); + + err = smp_sig_channel(chan, skb); + if (err) { + struct smp_chan *smp = chan->data; + + if (smp) + cancel_delayed_work_sync(&smp->security_timer); + + hci_disconnect(chan->conn->hcon, HCI_ERROR_AUTH_FAILURE); + } + + return err; +} + +static struct sk_buff *smp_alloc_skb_cb(struct l2cap_chan *chan, + unsigned long hdr_len, + unsigned long len, int nb) +{ + struct sk_buff *skb; + + skb = bt_skb_alloc(hdr_len + len, GFP_KERNEL); + if (!skb) + return ERR_PTR(-ENOMEM); + + skb->priority = HCI_PRIO_MAX; + bt_cb(skb)->l2cap.chan = chan; + + return skb; +} + +static const struct l2cap_ops smp_chan_ops = { + .name = "Security Manager", + .ready = smp_ready_cb, + .recv = smp_recv_cb, + .alloc_skb = smp_alloc_skb_cb, + .teardown = smp_teardown_cb, + .resume = smp_resume_cb, + + .new_connection = l2cap_chan_no_new_connection, + .state_change = l2cap_chan_no_state_change, + .close = l2cap_chan_no_close, + .defer = l2cap_chan_no_defer, + .suspend = l2cap_chan_no_suspend, + .set_shutdown = l2cap_chan_no_set_shutdown, + .get_sndtimeo = l2cap_chan_no_get_sndtimeo, +}; + +static inline struct l2cap_chan *smp_new_conn_cb(struct l2cap_chan *pchan) +{ + struct l2cap_chan *chan; + + BT_DBG("pchan %p", pchan); + + chan = l2cap_chan_create(); + if (!chan) + return NULL; + + chan->chan_type = pchan->chan_type; + chan->ops = &smp_chan_ops; + chan->scid = pchan->scid; + chan->dcid = chan->scid; + chan->imtu = pchan->imtu; + chan->omtu = pchan->omtu; + chan->mode = pchan->mode; + + /* Other L2CAP channels may request SMP routines in order to + * change the security level. This means that the SMP channel + * lock must be considered in its own category to avoid lockdep + * warnings. + */ + atomic_set(&chan->nesting, L2CAP_NESTING_SMP); + + BT_DBG("created chan %p", chan); + + return chan; +} + +static const struct l2cap_ops smp_root_chan_ops = { + .name = "Security Manager Root", + .new_connection = smp_new_conn_cb, + + /* None of these are implemented for the root channel */ + .close = l2cap_chan_no_close, + .alloc_skb = l2cap_chan_no_alloc_skb, + .recv = l2cap_chan_no_recv, + .state_change = l2cap_chan_no_state_change, + .teardown = l2cap_chan_no_teardown, + .ready = l2cap_chan_no_ready, + .defer = l2cap_chan_no_defer, + .suspend = l2cap_chan_no_suspend, + .resume = l2cap_chan_no_resume, + .set_shutdown = l2cap_chan_no_set_shutdown, + .get_sndtimeo = l2cap_chan_no_get_sndtimeo, +}; + +static struct l2cap_chan *smp_add_cid(struct hci_dev *hdev, u16 cid) +{ + struct l2cap_chan *chan; + struct smp_dev *smp; + struct crypto_shash *tfm_cmac; + struct crypto_kpp *tfm_ecdh; + + if (cid == L2CAP_CID_SMP_BREDR) { + smp = NULL; + goto create_chan; + } + + smp = kzalloc(sizeof(*smp), GFP_KERNEL); + if (!smp) + return ERR_PTR(-ENOMEM); + + tfm_cmac = crypto_alloc_shash("cmac(aes)", 0, 0); + if (IS_ERR(tfm_cmac)) { + bt_dev_err(hdev, "Unable to create CMAC crypto context"); + kfree_sensitive(smp); + return ERR_CAST(tfm_cmac); + } + + tfm_ecdh = crypto_alloc_kpp("ecdh-nist-p256", 0, 0); + if (IS_ERR(tfm_ecdh)) { + bt_dev_err(hdev, "Unable to create ECDH crypto context"); + crypto_free_shash(tfm_cmac); + kfree_sensitive(smp); + return ERR_CAST(tfm_ecdh); + } + + smp->local_oob = false; + smp->tfm_cmac = tfm_cmac; + smp->tfm_ecdh = tfm_ecdh; + +create_chan: + chan = l2cap_chan_create(); + if (!chan) { + if (smp) { + crypto_free_shash(smp->tfm_cmac); + crypto_free_kpp(smp->tfm_ecdh); + kfree_sensitive(smp); + } + return ERR_PTR(-ENOMEM); + } + + chan->data = smp; + + l2cap_add_scid(chan, cid); + + l2cap_chan_set_defaults(chan); + + if (cid == L2CAP_CID_SMP) { + u8 bdaddr_type; + + hci_copy_identity_address(hdev, &chan->src, &bdaddr_type); + + if (bdaddr_type == ADDR_LE_DEV_PUBLIC) + chan->src_type = BDADDR_LE_PUBLIC; + else + chan->src_type = BDADDR_LE_RANDOM; + } else { + bacpy(&chan->src, &hdev->bdaddr); + chan->src_type = BDADDR_BREDR; + } + + chan->state = BT_LISTEN; + chan->mode = L2CAP_MODE_BASIC; + chan->imtu = L2CAP_DEFAULT_MTU; + chan->ops = &smp_root_chan_ops; + + /* Set correct nesting level for a parent/listening channel */ + atomic_set(&chan->nesting, L2CAP_NESTING_PARENT); + + return chan; +} + +static void smp_del_chan(struct l2cap_chan *chan) +{ + struct smp_dev *smp; + + BT_DBG("chan %p", chan); + + smp = chan->data; + if (smp) { + chan->data = NULL; + crypto_free_shash(smp->tfm_cmac); + crypto_free_kpp(smp->tfm_ecdh); + kfree_sensitive(smp); + } + + l2cap_chan_put(chan); +} + +int smp_force_bredr(struct hci_dev *hdev, bool enable) +{ + if (enable == hci_dev_test_flag(hdev, HCI_FORCE_BREDR_SMP)) + return -EALREADY; + + if (enable) { + struct l2cap_chan *chan; + + chan = smp_add_cid(hdev, L2CAP_CID_SMP_BREDR); + if (IS_ERR(chan)) + return PTR_ERR(chan); + + hdev->smp_bredr_data = chan; + } else { + struct l2cap_chan *chan; + + chan = hdev->smp_bredr_data; + hdev->smp_bredr_data = NULL; + smp_del_chan(chan); + } + + hci_dev_change_flag(hdev, HCI_FORCE_BREDR_SMP); + + return 0; +} + +int smp_register(struct hci_dev *hdev) +{ + struct l2cap_chan *chan; + + bt_dev_dbg(hdev, ""); + + /* If the controller does not support Low Energy operation, then + * there is also no need to register any SMP channel. + */ + if (!lmp_le_capable(hdev)) + return 0; + + if (WARN_ON(hdev->smp_data)) { + chan = hdev->smp_data; + hdev->smp_data = NULL; + smp_del_chan(chan); + } + + chan = smp_add_cid(hdev, L2CAP_CID_SMP); + if (IS_ERR(chan)) + return PTR_ERR(chan); + + hdev->smp_data = chan; + + if (!lmp_sc_capable(hdev)) { + /* Flag can be already set here (due to power toggle) */ + if (!hci_dev_test_flag(hdev, HCI_FORCE_BREDR_SMP)) + return 0; + } + + if (WARN_ON(hdev->smp_bredr_data)) { + chan = hdev->smp_bredr_data; + hdev->smp_bredr_data = NULL; + smp_del_chan(chan); + } + + chan = smp_add_cid(hdev, L2CAP_CID_SMP_BREDR); + if (IS_ERR(chan)) { + int err = PTR_ERR(chan); + chan = hdev->smp_data; + hdev->smp_data = NULL; + smp_del_chan(chan); + return err; + } + + hdev->smp_bredr_data = chan; + + return 0; +} + +void smp_unregister(struct hci_dev *hdev) +{ + struct l2cap_chan *chan; + + if (hdev->smp_bredr_data) { + chan = hdev->smp_bredr_data; + hdev->smp_bredr_data = NULL; + smp_del_chan(chan); + } + + if (hdev->smp_data) { + chan = hdev->smp_data; + hdev->smp_data = NULL; + smp_del_chan(chan); + } +} + +#if IS_ENABLED(CONFIG_BT_SELFTEST_SMP) + +static int __init test_debug_key(struct crypto_kpp *tfm_ecdh) +{ + u8 pk[64]; + int err; + + err = set_ecdh_privkey(tfm_ecdh, debug_sk); + if (err) + return err; + + err = generate_ecdh_public_key(tfm_ecdh, pk); + if (err) + return err; + + if (crypto_memneq(pk, debug_pk, 64)) + return -EINVAL; + + return 0; +} + +static int __init test_ah(void) +{ + const u8 irk[16] = { + 0x9b, 0x7d, 0x39, 0x0a, 0xa6, 0x10, 0x10, 0x34, + 0x05, 0xad, 0xc8, 0x57, 0xa3, 0x34, 0x02, 0xec }; + const u8 r[3] = { 0x94, 0x81, 0x70 }; + const u8 exp[3] = { 0xaa, 0xfb, 0x0d }; + u8 res[3]; + int err; + + err = smp_ah(irk, r, res); + if (err) + return err; + + if (crypto_memneq(res, exp, 3)) + return -EINVAL; + + return 0; +} + +static int __init test_c1(void) +{ + const u8 k[16] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; + const u8 r[16] = { + 0xe0, 0x2e, 0x70, 0xc6, 0x4e, 0x27, 0x88, 0x63, + 0x0e, 0x6f, 0xad, 0x56, 0x21, 0xd5, 0x83, 0x57 }; + const u8 preq[7] = { 0x01, 0x01, 0x00, 0x00, 0x10, 0x07, 0x07 }; + const u8 pres[7] = { 0x02, 0x03, 0x00, 0x00, 0x08, 0x00, 0x05 }; + const u8 _iat = 0x01; + const u8 _rat = 0x00; + const bdaddr_t ra = { { 0xb6, 0xb5, 0xb4, 0xb3, 0xb2, 0xb1 } }; + const bdaddr_t ia = { { 0xa6, 0xa5, 0xa4, 0xa3, 0xa2, 0xa1 } }; + const u8 exp[16] = { + 0x86, 0x3b, 0xf1, 0xbe, 0xc5, 0x4d, 0xa7, 0xd2, + 0xea, 0x88, 0x89, 0x87, 0xef, 0x3f, 0x1e, 0x1e }; + u8 res[16]; + int err; + + err = smp_c1(k, r, preq, pres, _iat, &ia, _rat, &ra, res); + if (err) + return err; + + if (crypto_memneq(res, exp, 16)) + return -EINVAL; + + return 0; +} + +static int __init test_s1(void) +{ + const u8 k[16] = { + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; + const u8 r1[16] = { + 0x88, 0x77, 0x66, 0x55, 0x44, 0x33, 0x22, 0x11 }; + const u8 r2[16] = { + 0x00, 0xff, 0xee, 0xdd, 0xcc, 0xbb, 0xaa, 0x99 }; + const u8 exp[16] = { + 0x62, 0xa0, 0x6d, 0x79, 0xae, 0x16, 0x42, 0x5b, + 0x9b, 0xf4, 0xb0, 0xe8, 0xf0, 0xe1, 0x1f, 0x9a }; + u8 res[16]; + int err; + + err = smp_s1(k, r1, r2, res); + if (err) + return err; + + if (crypto_memneq(res, exp, 16)) + return -EINVAL; + + return 0; +} + +static int __init test_f4(struct crypto_shash *tfm_cmac) +{ + const u8 u[32] = { + 0xe6, 0x9d, 0x35, 0x0e, 0x48, 0x01, 0x03, 0xcc, + 0xdb, 0xfd, 0xf4, 0xac, 0x11, 0x91, 0xf4, 0xef, + 0xb9, 0xa5, 0xf9, 0xe9, 0xa7, 0x83, 0x2c, 0x5e, + 0x2c, 0xbe, 0x97, 0xf2, 0xd2, 0x03, 0xb0, 0x20 }; + const u8 v[32] = { + 0xfd, 0xc5, 0x7f, 0xf4, 0x49, 0xdd, 0x4f, 0x6b, + 0xfb, 0x7c, 0x9d, 0xf1, 0xc2, 0x9a, 0xcb, 0x59, + 0x2a, 0xe7, 0xd4, 0xee, 0xfb, 0xfc, 0x0a, 0x90, + 0x9a, 0xbb, 0xf6, 0x32, 0x3d, 0x8b, 0x18, 0x55 }; + const u8 x[16] = { + 0xab, 0xae, 0x2b, 0x71, 0xec, 0xb2, 0xff, 0xff, + 0x3e, 0x73, 0x77, 0xd1, 0x54, 0x84, 0xcb, 0xd5 }; + const u8 z = 0x00; + const u8 exp[16] = { + 0x2d, 0x87, 0x74, 0xa9, 0xbe, 0xa1, 0xed, 0xf1, + 0x1c, 0xbd, 0xa9, 0x07, 0xf1, 0x16, 0xc9, 0xf2 }; + u8 res[16]; + int err; + + err = smp_f4(tfm_cmac, u, v, x, z, res); + if (err) + return err; + + if (crypto_memneq(res, exp, 16)) + return -EINVAL; + + return 0; +} + +static int __init test_f5(struct crypto_shash *tfm_cmac) +{ + const u8 w[32] = { + 0x98, 0xa6, 0xbf, 0x73, 0xf3, 0x34, 0x8d, 0x86, + 0xf1, 0x66, 0xf8, 0xb4, 0x13, 0x6b, 0x79, 0x99, + 0x9b, 0x7d, 0x39, 0x0a, 0xa6, 0x10, 0x10, 0x34, + 0x05, 0xad, 0xc8, 0x57, 0xa3, 0x34, 0x02, 0xec }; + const u8 n1[16] = { + 0xab, 0xae, 0x2b, 0x71, 0xec, 0xb2, 0xff, 0xff, + 0x3e, 0x73, 0x77, 0xd1, 0x54, 0x84, 0xcb, 0xd5 }; + const u8 n2[16] = { + 0xcf, 0xc4, 0x3d, 0xff, 0xf7, 0x83, 0x65, 0x21, + 0x6e, 0x5f, 0xa7, 0x25, 0xcc, 0xe7, 0xe8, 0xa6 }; + const u8 a1[7] = { 0xce, 0xbf, 0x37, 0x37, 0x12, 0x56, 0x00 }; + const u8 a2[7] = { 0xc1, 0xcf, 0x2d, 0x70, 0x13, 0xa7, 0x00 }; + const u8 exp_ltk[16] = { + 0x38, 0x0a, 0x75, 0x94, 0xb5, 0x22, 0x05, 0x98, + 0x23, 0xcd, 0xd7, 0x69, 0x11, 0x79, 0x86, 0x69 }; + const u8 exp_mackey[16] = { + 0x20, 0x6e, 0x63, 0xce, 0x20, 0x6a, 0x3f, 0xfd, + 0x02, 0x4a, 0x08, 0xa1, 0x76, 0xf1, 0x65, 0x29 }; + u8 mackey[16], ltk[16]; + int err; + + err = smp_f5(tfm_cmac, w, n1, n2, a1, a2, mackey, ltk); + if (err) + return err; + + if (crypto_memneq(mackey, exp_mackey, 16)) + return -EINVAL; + + if (crypto_memneq(ltk, exp_ltk, 16)) + return -EINVAL; + + return 0; +} + +static int __init test_f6(struct crypto_shash *tfm_cmac) +{ + const u8 w[16] = { + 0x20, 0x6e, 0x63, 0xce, 0x20, 0x6a, 0x3f, 0xfd, + 0x02, 0x4a, 0x08, 0xa1, 0x76, 0xf1, 0x65, 0x29 }; + const u8 n1[16] = { + 0xab, 0xae, 0x2b, 0x71, 0xec, 0xb2, 0xff, 0xff, + 0x3e, 0x73, 0x77, 0xd1, 0x54, 0x84, 0xcb, 0xd5 }; + const u8 n2[16] = { + 0xcf, 0xc4, 0x3d, 0xff, 0xf7, 0x83, 0x65, 0x21, + 0x6e, 0x5f, 0xa7, 0x25, 0xcc, 0xe7, 0xe8, 0xa6 }; + const u8 r[16] = { + 0xc8, 0x0f, 0x2d, 0x0c, 0xd2, 0x42, 0xda, 0x08, + 0x54, 0xbb, 0x53, 0xb4, 0x3b, 0x34, 0xa3, 0x12 }; + const u8 io_cap[3] = { 0x02, 0x01, 0x01 }; + const u8 a1[7] = { 0xce, 0xbf, 0x37, 0x37, 0x12, 0x56, 0x00 }; + const u8 a2[7] = { 0xc1, 0xcf, 0x2d, 0x70, 0x13, 0xa7, 0x00 }; + const u8 exp[16] = { + 0x61, 0x8f, 0x95, 0xda, 0x09, 0x0b, 0x6c, 0xd2, + 0xc5, 0xe8, 0xd0, 0x9c, 0x98, 0x73, 0xc4, 0xe3 }; + u8 res[16]; + int err; + + err = smp_f6(tfm_cmac, w, n1, n2, r, io_cap, a1, a2, res); + if (err) + return err; + + if (crypto_memneq(res, exp, 16)) + return -EINVAL; + + return 0; +} + +static int __init test_g2(struct crypto_shash *tfm_cmac) +{ + const u8 u[32] = { + 0xe6, 0x9d, 0x35, 0x0e, 0x48, 0x01, 0x03, 0xcc, + 0xdb, 0xfd, 0xf4, 0xac, 0x11, 0x91, 0xf4, 0xef, + 0xb9, 0xa5, 0xf9, 0xe9, 0xa7, 0x83, 0x2c, 0x5e, + 0x2c, 0xbe, 0x97, 0xf2, 0xd2, 0x03, 0xb0, 0x20 }; + const u8 v[32] = { + 0xfd, 0xc5, 0x7f, 0xf4, 0x49, 0xdd, 0x4f, 0x6b, + 0xfb, 0x7c, 0x9d, 0xf1, 0xc2, 0x9a, 0xcb, 0x59, + 0x2a, 0xe7, 0xd4, 0xee, 0xfb, 0xfc, 0x0a, 0x90, + 0x9a, 0xbb, 0xf6, 0x32, 0x3d, 0x8b, 0x18, 0x55 }; + const u8 x[16] = { + 0xab, 0xae, 0x2b, 0x71, 0xec, 0xb2, 0xff, 0xff, + 0x3e, 0x73, 0x77, 0xd1, 0x54, 0x84, 0xcb, 0xd5 }; + const u8 y[16] = { + 0xcf, 0xc4, 0x3d, 0xff, 0xf7, 0x83, 0x65, 0x21, + 0x6e, 0x5f, 0xa7, 0x25, 0xcc, 0xe7, 0xe8, 0xa6 }; + const u32 exp_val = 0x2f9ed5ba % 1000000; + u32 val; + int err; + + err = smp_g2(tfm_cmac, u, v, x, y, &val); + if (err) + return err; + + if (val != exp_val) + return -EINVAL; + + return 0; +} + +static int __init test_h6(struct crypto_shash *tfm_cmac) +{ + const u8 w[16] = { + 0x9b, 0x7d, 0x39, 0x0a, 0xa6, 0x10, 0x10, 0x34, + 0x05, 0xad, 0xc8, 0x57, 0xa3, 0x34, 0x02, 0xec }; + const u8 key_id[4] = { 0x72, 0x62, 0x65, 0x6c }; + const u8 exp[16] = { + 0x99, 0x63, 0xb1, 0x80, 0xe2, 0xa9, 0xd3, 0xe8, + 0x1c, 0xc9, 0x6d, 0xe7, 0x02, 0xe1, 0x9a, 0x2d }; + u8 res[16]; + int err; + + err = smp_h6(tfm_cmac, w, key_id, res); + if (err) + return err; + + if (crypto_memneq(res, exp, 16)) + return -EINVAL; + + return 0; +} + +static char test_smp_buffer[32]; + +static ssize_t test_smp_read(struct file *file, char __user *user_buf, + size_t count, loff_t *ppos) +{ + return simple_read_from_buffer(user_buf, count, ppos, test_smp_buffer, + strlen(test_smp_buffer)); +} + +static const struct file_operations test_smp_fops = { + .open = simple_open, + .read = test_smp_read, + .llseek = default_llseek, +}; + +static int __init run_selftests(struct crypto_shash *tfm_cmac, + struct crypto_kpp *tfm_ecdh) +{ + ktime_t calltime, delta, rettime; + unsigned long long duration; + int err; + + calltime = ktime_get(); + + err = test_debug_key(tfm_ecdh); + if (err) { + BT_ERR("debug_key test failed"); + goto done; + } + + err = test_ah(); + if (err) { + BT_ERR("smp_ah test failed"); + goto done; + } + + err = test_c1(); + if (err) { + BT_ERR("smp_c1 test failed"); + goto done; + } + + err = test_s1(); + if (err) { + BT_ERR("smp_s1 test failed"); + goto done; + } + + err = test_f4(tfm_cmac); + if (err) { + BT_ERR("smp_f4 test failed"); + goto done; + } + + err = test_f5(tfm_cmac); + if (err) { + BT_ERR("smp_f5 test failed"); + goto done; + } + + err = test_f6(tfm_cmac); + if (err) { + BT_ERR("smp_f6 test failed"); + goto done; + } + + err = test_g2(tfm_cmac); + if (err) { + BT_ERR("smp_g2 test failed"); + goto done; + } + + err = test_h6(tfm_cmac); + if (err) { + BT_ERR("smp_h6 test failed"); + goto done; + } + + rettime = ktime_get(); + delta = ktime_sub(rettime, calltime); + duration = (unsigned long long) ktime_to_ns(delta) >> 10; + + BT_INFO("SMP test passed in %llu usecs", duration); + +done: + if (!err) + snprintf(test_smp_buffer, sizeof(test_smp_buffer), + "PASS (%llu usecs)\n", duration); + else + snprintf(test_smp_buffer, sizeof(test_smp_buffer), "FAIL\n"); + + debugfs_create_file("selftest_smp", 0444, bt_debugfs, NULL, + &test_smp_fops); + + return err; +} + +int __init bt_selftest_smp(void) +{ + struct crypto_shash *tfm_cmac; + struct crypto_kpp *tfm_ecdh; + int err; + + tfm_cmac = crypto_alloc_shash("cmac(aes)", 0, 0); + if (IS_ERR(tfm_cmac)) { + BT_ERR("Unable to create CMAC crypto context"); + return PTR_ERR(tfm_cmac); + } + + tfm_ecdh = crypto_alloc_kpp("ecdh-nist-p256", 0, 0); + if (IS_ERR(tfm_ecdh)) { + BT_ERR("Unable to create ECDH crypto context"); + crypto_free_shash(tfm_cmac); + return PTR_ERR(tfm_ecdh); + } + + err = run_selftests(tfm_cmac, tfm_ecdh); + + crypto_free_shash(tfm_cmac); + crypto_free_kpp(tfm_ecdh); + + return err; +} + +#endif diff --git a/net/bluetooth/smp.h b/net/bluetooth/smp.h new file mode 100644 index 000000000..87a59ec2c --- /dev/null +++ b/net/bluetooth/smp.h @@ -0,0 +1,214 @@ +/* + BlueZ - Bluetooth protocol stack for Linux + Copyright (C) 2011 Nokia Corporation and/or its subsidiary(-ies). + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License version 2 as + published by the Free Software Foundation; + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS + OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. + IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY + CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES + WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + + ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, + COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS + SOFTWARE IS DISCLAIMED. +*/ + +#ifndef __SMP_H +#define __SMP_H + +struct smp_command_hdr { + __u8 code; +} __packed; + +#define SMP_CMD_PAIRING_REQ 0x01 +#define SMP_CMD_PAIRING_RSP 0x02 +struct smp_cmd_pairing { + __u8 io_capability; + __u8 oob_flag; + __u8 auth_req; + __u8 max_key_size; + __u8 init_key_dist; + __u8 resp_key_dist; +} __packed; + +#define SMP_IO_DISPLAY_ONLY 0x00 +#define SMP_IO_DISPLAY_YESNO 0x01 +#define SMP_IO_KEYBOARD_ONLY 0x02 +#define SMP_IO_NO_INPUT_OUTPUT 0x03 +#define SMP_IO_KEYBOARD_DISPLAY 0x04 + +#define SMP_OOB_NOT_PRESENT 0x00 +#define SMP_OOB_PRESENT 0x01 + +#define SMP_DIST_ENC_KEY 0x01 +#define SMP_DIST_ID_KEY 0x02 +#define SMP_DIST_SIGN 0x04 +#define SMP_DIST_LINK_KEY 0x08 + +#define SMP_AUTH_NONE 0x00 +#define SMP_AUTH_BONDING 0x01 +#define SMP_AUTH_MITM 0x04 +#define SMP_AUTH_SC 0x08 +#define SMP_AUTH_KEYPRESS 0x10 +#define SMP_AUTH_CT2 0x20 + +#define SMP_CMD_PAIRING_CONFIRM 0x03 +struct smp_cmd_pairing_confirm { + __u8 confirm_val[16]; +} __packed; + +#define SMP_CMD_PAIRING_RANDOM 0x04 +struct smp_cmd_pairing_random { + __u8 rand_val[16]; +} __packed; + +#define SMP_CMD_PAIRING_FAIL 0x05 +struct smp_cmd_pairing_fail { + __u8 reason; +} __packed; + +#define SMP_CMD_ENCRYPT_INFO 0x06 +struct smp_cmd_encrypt_info { + __u8 ltk[16]; +} __packed; + +#define SMP_CMD_INITIATOR_IDENT 0x07 +struct smp_cmd_initiator_ident { + __le16 ediv; + __le64 rand; +} __packed; + +#define SMP_CMD_IDENT_INFO 0x08 +struct smp_cmd_ident_info { + __u8 irk[16]; +} __packed; + +#define SMP_CMD_IDENT_ADDR_INFO 0x09 +struct smp_cmd_ident_addr_info { + __u8 addr_type; + bdaddr_t bdaddr; +} __packed; + +#define SMP_CMD_SIGN_INFO 0x0a +struct smp_cmd_sign_info { + __u8 csrk[16]; +} __packed; + +#define SMP_CMD_SECURITY_REQ 0x0b +struct smp_cmd_security_req { + __u8 auth_req; +} __packed; + +#define SMP_CMD_PUBLIC_KEY 0x0c +struct smp_cmd_public_key { + __u8 x[32]; + __u8 y[32]; +} __packed; + +#define SMP_CMD_DHKEY_CHECK 0x0d +struct smp_cmd_dhkey_check { + __u8 e[16]; +} __packed; + +#define SMP_CMD_KEYPRESS_NOTIFY 0x0e +struct smp_cmd_keypress_notify { + __u8 value; +} __packed; + +#define SMP_CMD_MAX 0x0e + +#define SMP_PASSKEY_ENTRY_FAILED 0x01 +#define SMP_OOB_NOT_AVAIL 0x02 +#define SMP_AUTH_REQUIREMENTS 0x03 +#define SMP_CONFIRM_FAILED 0x04 +#define SMP_PAIRING_NOTSUPP 0x05 +#define SMP_ENC_KEY_SIZE 0x06 +#define SMP_CMD_NOTSUPP 0x07 +#define SMP_UNSPECIFIED 0x08 +#define SMP_REPEATED_ATTEMPTS 0x09 +#define SMP_INVALID_PARAMS 0x0a +#define SMP_DHKEY_CHECK_FAILED 0x0b +#define SMP_NUMERIC_COMP_FAILED 0x0c +#define SMP_BREDR_PAIRING_IN_PROGRESS 0x0d +#define SMP_CROSS_TRANSP_NOT_ALLOWED 0x0e + +#define SMP_MIN_ENC_KEY_SIZE 7 +#define SMP_MAX_ENC_KEY_SIZE 16 + +/* LTK types used in internal storage (struct smp_ltk) */ +enum { + SMP_STK, + SMP_LTK, + SMP_LTK_RESPONDER, + SMP_LTK_P256, + SMP_LTK_P256_DEBUG, +}; + +static inline bool smp_ltk_is_sc(struct smp_ltk *key) +{ + switch (key->type) { + case SMP_LTK_P256: + case SMP_LTK_P256_DEBUG: + return true; + } + + return false; +} + +static inline u8 smp_ltk_sec_level(struct smp_ltk *key) +{ + if (key->authenticated) { + if (smp_ltk_is_sc(key)) + return BT_SECURITY_FIPS; + else + return BT_SECURITY_HIGH; + } + + return BT_SECURITY_MEDIUM; +} + +/* Key preferences for smp_sufficient security */ +enum smp_key_pref { + SMP_ALLOW_STK, + SMP_USE_LTK, +}; + +/* SMP Commands */ +int smp_cancel_and_remove_pairing(struct hci_dev *hdev, bdaddr_t *bdaddr, + u8 addr_type); +bool smp_sufficient_security(struct hci_conn *hcon, u8 sec_level, + enum smp_key_pref key_pref); +int smp_conn_security(struct hci_conn *hcon, __u8 sec_level); +int smp_user_confirm_reply(struct hci_conn *conn, u16 mgmt_op, __le32 passkey); + +bool smp_irk_matches(struct hci_dev *hdev, const u8 irk[16], + const bdaddr_t *bdaddr); +int smp_generate_rpa(struct hci_dev *hdev, const u8 irk[16], bdaddr_t *rpa); +int smp_generate_oob(struct hci_dev *hdev, u8 hash[16], u8 rand[16]); + +int smp_force_bredr(struct hci_dev *hdev, bool enable); + +int smp_register(struct hci_dev *hdev); +void smp_unregister(struct hci_dev *hdev); + +#if IS_ENABLED(CONFIG_BT_SELFTEST_SMP) + +int bt_selftest_smp(void); + +#else + +static inline int bt_selftest_smp(void) +{ + return 0; +} + +#endif + +#endif /* __SMP_H */ |