summaryrefslogtreecommitdiffstats
path: root/net/nfc/hci
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
commit2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch)
tree848558de17fb3008cdf4d861b01ac7781903ce39 /net/nfc/hci
parentInitial commit. (diff)
downloadlinux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz
linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip
Adding upstream version 6.1.76.upstream/6.1.76
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'net/nfc/hci')
-rw-r--r--net/nfc/hci/Kconfig18
-rw-r--r--net/nfc/hci/Makefile9
-rw-r--r--net/nfc/hci/command.c344
-rw-r--r--net/nfc/hci/core.c1105
-rw-r--r--net/nfc/hci/hci.h120
-rw-r--r--net/nfc/hci/hcp.c136
-rw-r--r--net/nfc/hci/llc.c150
-rw-r--r--net/nfc/hci/llc.h56
-rw-r--r--net/nfc/hci/llc_nop.c86
-rw-r--r--net/nfc/hci/llc_shdlc.c818
10 files changed, 2842 insertions, 0 deletions
diff --git a/net/nfc/hci/Kconfig b/net/nfc/hci/Kconfig
new file mode 100644
index 000000000..9500b8a27
--- /dev/null
+++ b/net/nfc/hci/Kconfig
@@ -0,0 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0-only
+config NFC_HCI
+ depends on NFC
+ tristate "NFC HCI implementation"
+ default n
+ help
+ Say Y here if you want to build support for a kernel NFC HCI
+ implementation. This is mostly needed for devices that only process
+ HCI frames, like for example the NXP pn544.
+
+config NFC_SHDLC
+ depends on NFC_HCI
+ select CRC_CCITT
+ bool "SHDLC link layer for HCI based NFC drivers"
+ default n
+ help
+ Say yes if you use an NFC HCI driver that requires SHDLC link layer.
+ If unsure, say N here.
diff --git a/net/nfc/hci/Makefile b/net/nfc/hci/Makefile
new file mode 100644
index 000000000..5a0aaae6f
--- /dev/null
+++ b/net/nfc/hci/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for the Linux NFC HCI layer.
+#
+
+obj-$(CONFIG_NFC_HCI) += hci.o
+
+hci-y := core.o hcp.o command.o llc.o llc_nop.o
+hci-$(CONFIG_NFC_SHDLC) += llc_shdlc.o
diff --git a/net/nfc/hci/command.c b/net/nfc/hci/command.c
new file mode 100644
index 000000000..af6bacb3b
--- /dev/null
+++ b/net/nfc/hci/command.c
@@ -0,0 +1,344 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2012 Intel Corporation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "hci: %s: " fmt, __func__
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+
+#include <net/nfc/hci.h>
+
+#include "hci.h"
+
+#define MAX_FWI 4949
+
+static int nfc_hci_execute_cmd_async(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
+ const u8 *param, size_t param_len,
+ data_exchange_cb_t cb, void *cb_context)
+{
+ pr_debug("exec cmd async through pipe=%d, cmd=%d, plen=%zd\n", pipe,
+ cmd, param_len);
+
+ /* TODO: Define hci cmd execution delay. Should it be the same
+ * for all commands?
+ */
+ return nfc_hci_hcp_message_tx(hdev, pipe, NFC_HCI_HCP_COMMAND, cmd,
+ param, param_len, cb, cb_context, MAX_FWI);
+}
+
+/*
+ * HCI command execution completion callback.
+ * err will be a standard linux error (may be converted from HCI response)
+ * skb contains the response data and must be disposed, or may be NULL if
+ * an error occurred
+ */
+static void nfc_hci_execute_cb(void *context, struct sk_buff *skb, int err)
+{
+ struct hcp_exec_waiter *hcp_ew = (struct hcp_exec_waiter *)context;
+
+ pr_debug("HCI Cmd completed with result=%d\n", err);
+
+ hcp_ew->exec_result = err;
+ if (hcp_ew->exec_result == 0)
+ hcp_ew->result_skb = skb;
+ else
+ kfree_skb(skb);
+ hcp_ew->exec_complete = true;
+
+ wake_up(hcp_ew->wq);
+}
+
+static int nfc_hci_execute_cmd(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
+ const u8 *param, size_t param_len,
+ struct sk_buff **skb)
+{
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(ew_wq);
+ struct hcp_exec_waiter hcp_ew;
+ hcp_ew.wq = &ew_wq;
+ hcp_ew.exec_complete = false;
+ hcp_ew.result_skb = NULL;
+
+ pr_debug("exec cmd sync through pipe=%d, cmd=%d, plen=%zd\n", pipe,
+ cmd, param_len);
+
+ /* TODO: Define hci cmd execution delay. Should it be the same
+ * for all commands?
+ */
+ hcp_ew.exec_result = nfc_hci_hcp_message_tx(hdev, pipe,
+ NFC_HCI_HCP_COMMAND, cmd,
+ param, param_len,
+ nfc_hci_execute_cb, &hcp_ew,
+ MAX_FWI);
+ if (hcp_ew.exec_result < 0)
+ return hcp_ew.exec_result;
+
+ wait_event(ew_wq, hcp_ew.exec_complete == true);
+
+ if (hcp_ew.exec_result == 0) {
+ if (skb)
+ *skb = hcp_ew.result_skb;
+ else
+ kfree_skb(hcp_ew.result_skb);
+ }
+
+ return hcp_ew.exec_result;
+}
+
+int nfc_hci_send_event(struct nfc_hci_dev *hdev, u8 gate, u8 event,
+ const u8 *param, size_t param_len)
+{
+ u8 pipe;
+
+ pr_debug("%d to gate %d\n", event, gate);
+
+ pipe = hdev->gate2pipe[gate];
+ if (pipe == NFC_HCI_INVALID_PIPE)
+ return -EADDRNOTAVAIL;
+
+ return nfc_hci_hcp_message_tx(hdev, pipe, NFC_HCI_HCP_EVENT, event,
+ param, param_len, NULL, NULL, 0);
+}
+EXPORT_SYMBOL(nfc_hci_send_event);
+
+/*
+ * Execute an hci command sent to gate.
+ * skb will contain response data if success. skb can be NULL if you are not
+ * interested by the response.
+ */
+int nfc_hci_send_cmd(struct nfc_hci_dev *hdev, u8 gate, u8 cmd,
+ const u8 *param, size_t param_len, struct sk_buff **skb)
+{
+ u8 pipe;
+
+ pipe = hdev->gate2pipe[gate];
+ if (pipe == NFC_HCI_INVALID_PIPE)
+ return -EADDRNOTAVAIL;
+
+ return nfc_hci_execute_cmd(hdev, pipe, cmd, param, param_len, skb);
+}
+EXPORT_SYMBOL(nfc_hci_send_cmd);
+
+int nfc_hci_send_cmd_async(struct nfc_hci_dev *hdev, u8 gate, u8 cmd,
+ const u8 *param, size_t param_len,
+ data_exchange_cb_t cb, void *cb_context)
+{
+ u8 pipe;
+
+ pipe = hdev->gate2pipe[gate];
+ if (pipe == NFC_HCI_INVALID_PIPE)
+ return -EADDRNOTAVAIL;
+
+ return nfc_hci_execute_cmd_async(hdev, pipe, cmd, param, param_len,
+ cb, cb_context);
+}
+EXPORT_SYMBOL(nfc_hci_send_cmd_async);
+
+int nfc_hci_set_param(struct nfc_hci_dev *hdev, u8 gate, u8 idx,
+ const u8 *param, size_t param_len)
+{
+ int r;
+ u8 *tmp;
+
+ /* TODO ELa: reg idx must be inserted before param, but we don't want
+ * to ask the caller to do it to keep a simpler API.
+ * For now, just create a new temporary param buffer. This is far from
+ * optimal though, and the plan is to modify APIs to pass idx down to
+ * nfc_hci_hcp_message_tx where the frame is actually built, thereby
+ * eliminating the need for the temp allocation-copy here.
+ */
+
+ pr_debug("idx=%d to gate %d\n", idx, gate);
+
+ tmp = kmalloc(1 + param_len, GFP_KERNEL);
+ if (tmp == NULL)
+ return -ENOMEM;
+
+ *tmp = idx;
+ memcpy(tmp + 1, param, param_len);
+
+ r = nfc_hci_send_cmd(hdev, gate, NFC_HCI_ANY_SET_PARAMETER,
+ tmp, param_len + 1, NULL);
+
+ kfree(tmp);
+
+ return r;
+}
+EXPORT_SYMBOL(nfc_hci_set_param);
+
+int nfc_hci_get_param(struct nfc_hci_dev *hdev, u8 gate, u8 idx,
+ struct sk_buff **skb)
+{
+ pr_debug("gate=%d regidx=%d\n", gate, idx);
+
+ return nfc_hci_send_cmd(hdev, gate, NFC_HCI_ANY_GET_PARAMETER,
+ &idx, 1, skb);
+}
+EXPORT_SYMBOL(nfc_hci_get_param);
+
+static int nfc_hci_open_pipe(struct nfc_hci_dev *hdev, u8 pipe)
+{
+ struct sk_buff *skb;
+ int r;
+
+ pr_debug("pipe=%d\n", pipe);
+
+ r = nfc_hci_execute_cmd(hdev, pipe, NFC_HCI_ANY_OPEN_PIPE,
+ NULL, 0, &skb);
+ if (r == 0) {
+ /* dest host other than host controller will send
+ * number of pipes already open on this gate before
+ * execution. The number can be found in skb->data[0]
+ */
+ kfree_skb(skb);
+ }
+
+ return r;
+}
+
+static int nfc_hci_close_pipe(struct nfc_hci_dev *hdev, u8 pipe)
+{
+ return nfc_hci_execute_cmd(hdev, pipe, NFC_HCI_ANY_CLOSE_PIPE,
+ NULL, 0, NULL);
+}
+
+static u8 nfc_hci_create_pipe(struct nfc_hci_dev *hdev, u8 dest_host,
+ u8 dest_gate, int *result)
+{
+ struct sk_buff *skb;
+ struct hci_create_pipe_params params;
+ struct hci_create_pipe_resp *resp;
+ u8 pipe;
+
+ pr_debug("gate=%d\n", dest_gate);
+
+ params.src_gate = NFC_HCI_ADMIN_GATE;
+ params.dest_host = dest_host;
+ params.dest_gate = dest_gate;
+
+ *result = nfc_hci_execute_cmd(hdev, NFC_HCI_ADMIN_PIPE,
+ NFC_HCI_ADM_CREATE_PIPE,
+ (u8 *) &params, sizeof(params), &skb);
+ if (*result < 0)
+ return NFC_HCI_INVALID_PIPE;
+
+ resp = (struct hci_create_pipe_resp *)skb->data;
+ pipe = resp->pipe;
+ kfree_skb(skb);
+
+ pr_debug("pipe created=%d\n", pipe);
+
+ return pipe;
+}
+
+static int nfc_hci_delete_pipe(struct nfc_hci_dev *hdev, u8 pipe)
+{
+ return nfc_hci_execute_cmd(hdev, NFC_HCI_ADMIN_PIPE,
+ NFC_HCI_ADM_DELETE_PIPE, &pipe, 1, NULL);
+}
+
+static int nfc_hci_clear_all_pipes(struct nfc_hci_dev *hdev)
+{
+ u8 param[2];
+ size_t param_len = 2;
+
+ /* TODO: Find out what the identity reference data is
+ * and fill param with it. HCI spec 6.1.3.5 */
+
+ if (test_bit(NFC_HCI_QUIRK_SHORT_CLEAR, &hdev->quirks))
+ param_len = 0;
+
+ return nfc_hci_execute_cmd(hdev, NFC_HCI_ADMIN_PIPE,
+ NFC_HCI_ADM_CLEAR_ALL_PIPE, param, param_len,
+ NULL);
+}
+
+int nfc_hci_disconnect_gate(struct nfc_hci_dev *hdev, u8 gate)
+{
+ int r;
+ u8 pipe = hdev->gate2pipe[gate];
+
+ if (pipe == NFC_HCI_INVALID_PIPE)
+ return -EADDRNOTAVAIL;
+
+ r = nfc_hci_close_pipe(hdev, pipe);
+ if (r < 0)
+ return r;
+
+ if (pipe != NFC_HCI_LINK_MGMT_PIPE && pipe != NFC_HCI_ADMIN_PIPE) {
+ r = nfc_hci_delete_pipe(hdev, pipe);
+ if (r < 0)
+ return r;
+ }
+
+ hdev->gate2pipe[gate] = NFC_HCI_INVALID_PIPE;
+
+ return 0;
+}
+EXPORT_SYMBOL(nfc_hci_disconnect_gate);
+
+int nfc_hci_disconnect_all_gates(struct nfc_hci_dev *hdev)
+{
+ int r;
+
+ r = nfc_hci_clear_all_pipes(hdev);
+ if (r < 0)
+ return r;
+
+ nfc_hci_reset_pipes(hdev);
+
+ return 0;
+}
+EXPORT_SYMBOL(nfc_hci_disconnect_all_gates);
+
+int nfc_hci_connect_gate(struct nfc_hci_dev *hdev, u8 dest_host, u8 dest_gate,
+ u8 pipe)
+{
+ bool pipe_created = false;
+ int r;
+
+ if (pipe == NFC_HCI_DO_NOT_CREATE_PIPE)
+ return 0;
+
+ if (hdev->gate2pipe[dest_gate] != NFC_HCI_INVALID_PIPE)
+ return -EADDRINUSE;
+
+ if (pipe != NFC_HCI_INVALID_PIPE)
+ goto open_pipe;
+
+ switch (dest_gate) {
+ case NFC_HCI_LINK_MGMT_GATE:
+ pipe = NFC_HCI_LINK_MGMT_PIPE;
+ break;
+ case NFC_HCI_ADMIN_GATE:
+ pipe = NFC_HCI_ADMIN_PIPE;
+ break;
+ default:
+ pipe = nfc_hci_create_pipe(hdev, dest_host, dest_gate, &r);
+ if (pipe == NFC_HCI_INVALID_PIPE)
+ return r;
+ pipe_created = true;
+ break;
+ }
+
+open_pipe:
+ r = nfc_hci_open_pipe(hdev, pipe);
+ if (r < 0) {
+ if (pipe_created)
+ if (nfc_hci_delete_pipe(hdev, pipe) < 0) {
+ /* TODO: Cannot clean by deleting pipe...
+ * -> inconsistent state */
+ }
+ return r;
+ }
+
+ hdev->pipes[pipe].gate = dest_gate;
+ hdev->pipes[pipe].dest_host = dest_host;
+ hdev->gate2pipe[dest_gate] = pipe;
+
+ return 0;
+}
+EXPORT_SYMBOL(nfc_hci_connect_gate);
diff --git a/net/nfc/hci/core.c b/net/nfc/hci/core.c
new file mode 100644
index 000000000..ceb87db57
--- /dev/null
+++ b/net/nfc/hci/core.c
@@ -0,0 +1,1105 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2012 Intel Corporation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "hci: %s: " fmt, __func__
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/nfc.h>
+
+#include <net/nfc/nfc.h>
+#include <net/nfc/hci.h>
+#include <net/nfc/llc.h>
+
+#include "hci.h"
+
+/* Largest headroom needed for outgoing HCI commands */
+#define HCI_CMDS_HEADROOM 1
+
+int nfc_hci_result_to_errno(u8 result)
+{
+ switch (result) {
+ case NFC_HCI_ANY_OK:
+ return 0;
+ case NFC_HCI_ANY_E_REG_PAR_UNKNOWN:
+ return -EOPNOTSUPP;
+ case NFC_HCI_ANY_E_TIMEOUT:
+ return -ETIME;
+ default:
+ return -1;
+ }
+}
+EXPORT_SYMBOL(nfc_hci_result_to_errno);
+
+void nfc_hci_reset_pipes(struct nfc_hci_dev *hdev)
+{
+ int i = 0;
+
+ for (i = 0; i < NFC_HCI_MAX_PIPES; i++) {
+ hdev->pipes[i].gate = NFC_HCI_INVALID_GATE;
+ hdev->pipes[i].dest_host = NFC_HCI_INVALID_HOST;
+ }
+ memset(hdev->gate2pipe, NFC_HCI_INVALID_PIPE, sizeof(hdev->gate2pipe));
+}
+EXPORT_SYMBOL(nfc_hci_reset_pipes);
+
+void nfc_hci_reset_pipes_per_host(struct nfc_hci_dev *hdev, u8 host)
+{
+ int i = 0;
+
+ for (i = 0; i < NFC_HCI_MAX_PIPES; i++) {
+ if (hdev->pipes[i].dest_host != host)
+ continue;
+
+ hdev->pipes[i].gate = NFC_HCI_INVALID_GATE;
+ hdev->pipes[i].dest_host = NFC_HCI_INVALID_HOST;
+ }
+}
+EXPORT_SYMBOL(nfc_hci_reset_pipes_per_host);
+
+static void nfc_hci_msg_tx_work(struct work_struct *work)
+{
+ struct nfc_hci_dev *hdev = container_of(work, struct nfc_hci_dev,
+ msg_tx_work);
+ struct hci_msg *msg;
+ struct sk_buff *skb;
+ int r = 0;
+
+ mutex_lock(&hdev->msg_tx_mutex);
+ if (hdev->shutting_down)
+ goto exit;
+
+ if (hdev->cmd_pending_msg) {
+ if (timer_pending(&hdev->cmd_timer) == 0) {
+ if (hdev->cmd_pending_msg->cb)
+ hdev->cmd_pending_msg->cb(hdev->
+ cmd_pending_msg->
+ cb_context,
+ NULL,
+ -ETIME);
+ kfree(hdev->cmd_pending_msg);
+ hdev->cmd_pending_msg = NULL;
+ } else {
+ goto exit;
+ }
+ }
+
+next_msg:
+ if (list_empty(&hdev->msg_tx_queue))
+ goto exit;
+
+ msg = list_first_entry(&hdev->msg_tx_queue, struct hci_msg, msg_l);
+ list_del(&msg->msg_l);
+
+ pr_debug("msg_tx_queue has a cmd to send\n");
+ while ((skb = skb_dequeue(&msg->msg_frags)) != NULL) {
+ r = nfc_llc_xmit_from_hci(hdev->llc, skb);
+ if (r < 0) {
+ kfree_skb(skb);
+ skb_queue_purge(&msg->msg_frags);
+ if (msg->cb)
+ msg->cb(msg->cb_context, NULL, r);
+ kfree(msg);
+ break;
+ }
+ }
+
+ if (r)
+ goto next_msg;
+
+ if (msg->wait_response == false) {
+ kfree(msg);
+ goto next_msg;
+ }
+
+ hdev->cmd_pending_msg = msg;
+ mod_timer(&hdev->cmd_timer, jiffies +
+ msecs_to_jiffies(hdev->cmd_pending_msg->completion_delay));
+
+exit:
+ mutex_unlock(&hdev->msg_tx_mutex);
+}
+
+static void nfc_hci_msg_rx_work(struct work_struct *work)
+{
+ struct nfc_hci_dev *hdev = container_of(work, struct nfc_hci_dev,
+ msg_rx_work);
+ struct sk_buff *skb;
+ const struct hcp_message *message;
+ u8 pipe;
+ u8 type;
+ u8 instruction;
+
+ while ((skb = skb_dequeue(&hdev->msg_rx_queue)) != NULL) {
+ pipe = skb->data[0];
+ skb_pull(skb, NFC_HCI_HCP_PACKET_HEADER_LEN);
+ message = (struct hcp_message *)skb->data;
+ type = HCP_MSG_GET_TYPE(message->header);
+ instruction = HCP_MSG_GET_CMD(message->header);
+ skb_pull(skb, NFC_HCI_HCP_MESSAGE_HEADER_LEN);
+
+ nfc_hci_hcp_message_rx(hdev, pipe, type, instruction, skb);
+ }
+}
+
+static void __nfc_hci_cmd_completion(struct nfc_hci_dev *hdev, int err,
+ struct sk_buff *skb)
+{
+ del_timer_sync(&hdev->cmd_timer);
+
+ if (hdev->cmd_pending_msg->cb)
+ hdev->cmd_pending_msg->cb(hdev->cmd_pending_msg->cb_context,
+ skb, err);
+ else
+ kfree_skb(skb);
+
+ kfree(hdev->cmd_pending_msg);
+ hdev->cmd_pending_msg = NULL;
+
+ schedule_work(&hdev->msg_tx_work);
+}
+
+void nfc_hci_resp_received(struct nfc_hci_dev *hdev, u8 result,
+ struct sk_buff *skb)
+{
+ mutex_lock(&hdev->msg_tx_mutex);
+
+ if (hdev->cmd_pending_msg == NULL) {
+ kfree_skb(skb);
+ goto exit;
+ }
+
+ __nfc_hci_cmd_completion(hdev, nfc_hci_result_to_errno(result), skb);
+
+exit:
+ mutex_unlock(&hdev->msg_tx_mutex);
+}
+
+void nfc_hci_cmd_received(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd,
+ struct sk_buff *skb)
+{
+ u8 status = NFC_HCI_ANY_OK;
+ const struct hci_create_pipe_resp *create_info;
+ const struct hci_delete_pipe_noti *delete_info;
+ const struct hci_all_pipe_cleared_noti *cleared_info;
+ u8 gate;
+
+ pr_debug("from pipe %x cmd %x\n", pipe, cmd);
+
+ if (pipe >= NFC_HCI_MAX_PIPES) {
+ status = NFC_HCI_ANY_E_NOK;
+ goto exit;
+ }
+
+ gate = hdev->pipes[pipe].gate;
+
+ switch (cmd) {
+ case NFC_HCI_ADM_NOTIFY_PIPE_CREATED:
+ if (skb->len != 5) {
+ status = NFC_HCI_ANY_E_NOK;
+ goto exit;
+ }
+ create_info = (struct hci_create_pipe_resp *)skb->data;
+
+ if (create_info->pipe >= NFC_HCI_MAX_PIPES) {
+ status = NFC_HCI_ANY_E_NOK;
+ goto exit;
+ }
+
+ /* Save the new created pipe and bind with local gate,
+ * the description for skb->data[3] is destination gate id
+ * but since we received this cmd from host controller, we
+ * are the destination and it is our local gate
+ */
+ hdev->gate2pipe[create_info->dest_gate] = create_info->pipe;
+ hdev->pipes[create_info->pipe].gate = create_info->dest_gate;
+ hdev->pipes[create_info->pipe].dest_host =
+ create_info->src_host;
+ break;
+ case NFC_HCI_ANY_OPEN_PIPE:
+ if (gate == NFC_HCI_INVALID_GATE) {
+ status = NFC_HCI_ANY_E_NOK;
+ goto exit;
+ }
+ break;
+ case NFC_HCI_ADM_NOTIFY_PIPE_DELETED:
+ if (skb->len != 1) {
+ status = NFC_HCI_ANY_E_NOK;
+ goto exit;
+ }
+ delete_info = (struct hci_delete_pipe_noti *)skb->data;
+
+ if (delete_info->pipe >= NFC_HCI_MAX_PIPES) {
+ status = NFC_HCI_ANY_E_NOK;
+ goto exit;
+ }
+
+ hdev->pipes[delete_info->pipe].gate = NFC_HCI_INVALID_GATE;
+ hdev->pipes[delete_info->pipe].dest_host = NFC_HCI_INVALID_HOST;
+ break;
+ case NFC_HCI_ADM_NOTIFY_ALL_PIPE_CLEARED:
+ if (skb->len != 1) {
+ status = NFC_HCI_ANY_E_NOK;
+ goto exit;
+ }
+ cleared_info = (struct hci_all_pipe_cleared_noti *)skb->data;
+
+ nfc_hci_reset_pipes_per_host(hdev, cleared_info->host);
+ break;
+ default:
+ pr_info("Discarded unknown cmd %x to gate %x\n", cmd, gate);
+ break;
+ }
+
+ if (hdev->ops->cmd_received)
+ hdev->ops->cmd_received(hdev, pipe, cmd, skb);
+
+exit:
+ nfc_hci_hcp_message_tx(hdev, pipe, NFC_HCI_HCP_RESPONSE,
+ status, NULL, 0, NULL, NULL, 0);
+
+ kfree_skb(skb);
+}
+
+u32 nfc_hci_sak_to_protocol(u8 sak)
+{
+ switch (NFC_HCI_TYPE_A_SEL_PROT(sak)) {
+ case NFC_HCI_TYPE_A_SEL_PROT_MIFARE:
+ return NFC_PROTO_MIFARE_MASK;
+ case NFC_HCI_TYPE_A_SEL_PROT_ISO14443:
+ return NFC_PROTO_ISO14443_MASK;
+ case NFC_HCI_TYPE_A_SEL_PROT_DEP:
+ return NFC_PROTO_NFC_DEP_MASK;
+ case NFC_HCI_TYPE_A_SEL_PROT_ISO14443_DEP:
+ return NFC_PROTO_ISO14443_MASK | NFC_PROTO_NFC_DEP_MASK;
+ default:
+ return 0xffffffff;
+ }
+}
+EXPORT_SYMBOL(nfc_hci_sak_to_protocol);
+
+int nfc_hci_target_discovered(struct nfc_hci_dev *hdev, u8 gate)
+{
+ struct nfc_target *targets;
+ struct sk_buff *atqa_skb = NULL;
+ struct sk_buff *sak_skb = NULL;
+ struct sk_buff *uid_skb = NULL;
+ int r;
+
+ pr_debug("from gate %d\n", gate);
+
+ targets = kzalloc(sizeof(struct nfc_target), GFP_KERNEL);
+ if (targets == NULL)
+ return -ENOMEM;
+
+ switch (gate) {
+ case NFC_HCI_RF_READER_A_GATE:
+ r = nfc_hci_get_param(hdev, NFC_HCI_RF_READER_A_GATE,
+ NFC_HCI_RF_READER_A_ATQA, &atqa_skb);
+ if (r < 0)
+ goto exit;
+
+ r = nfc_hci_get_param(hdev, NFC_HCI_RF_READER_A_GATE,
+ NFC_HCI_RF_READER_A_SAK, &sak_skb);
+ if (r < 0)
+ goto exit;
+
+ if (atqa_skb->len != 2 || sak_skb->len != 1) {
+ r = -EPROTO;
+ goto exit;
+ }
+
+ targets->supported_protocols =
+ nfc_hci_sak_to_protocol(sak_skb->data[0]);
+ if (targets->supported_protocols == 0xffffffff) {
+ r = -EPROTO;
+ goto exit;
+ }
+
+ targets->sens_res = be16_to_cpu(*(__be16 *)atqa_skb->data);
+ targets->sel_res = sak_skb->data[0];
+
+ r = nfc_hci_get_param(hdev, NFC_HCI_RF_READER_A_GATE,
+ NFC_HCI_RF_READER_A_UID, &uid_skb);
+ if (r < 0)
+ goto exit;
+
+ if (uid_skb->len == 0 || uid_skb->len > NFC_NFCID1_MAXSIZE) {
+ r = -EPROTO;
+ goto exit;
+ }
+
+ memcpy(targets->nfcid1, uid_skb->data, uid_skb->len);
+ targets->nfcid1_len = uid_skb->len;
+
+ if (hdev->ops->complete_target_discovered) {
+ r = hdev->ops->complete_target_discovered(hdev, gate,
+ targets);
+ if (r < 0)
+ goto exit;
+ }
+ break;
+ case NFC_HCI_RF_READER_B_GATE:
+ targets->supported_protocols = NFC_PROTO_ISO14443_B_MASK;
+ break;
+ default:
+ if (hdev->ops->target_from_gate)
+ r = hdev->ops->target_from_gate(hdev, gate, targets);
+ else
+ r = -EPROTO;
+ if (r < 0)
+ goto exit;
+
+ if (hdev->ops->complete_target_discovered) {
+ r = hdev->ops->complete_target_discovered(hdev, gate,
+ targets);
+ if (r < 0)
+ goto exit;
+ }
+ break;
+ }
+
+ /* if driver set the new gate, we will skip the old one */
+ if (targets->hci_reader_gate == 0x00)
+ targets->hci_reader_gate = gate;
+
+ r = nfc_targets_found(hdev->ndev, targets, 1);
+
+exit:
+ kfree(targets);
+ kfree_skb(atqa_skb);
+ kfree_skb(sak_skb);
+ kfree_skb(uid_skb);
+
+ return r;
+}
+EXPORT_SYMBOL(nfc_hci_target_discovered);
+
+void nfc_hci_event_received(struct nfc_hci_dev *hdev, u8 pipe, u8 event,
+ struct sk_buff *skb)
+{
+ int r = 0;
+ u8 gate;
+
+ if (pipe >= NFC_HCI_MAX_PIPES) {
+ pr_err("Discarded event %x to invalid pipe %x\n", event, pipe);
+ goto exit;
+ }
+
+ gate = hdev->pipes[pipe].gate;
+ if (gate == NFC_HCI_INVALID_GATE) {
+ pr_err("Discarded event %x to unopened pipe %x\n", event, pipe);
+ goto exit;
+ }
+
+ if (hdev->ops->event_received) {
+ r = hdev->ops->event_received(hdev, pipe, event, skb);
+ if (r <= 0)
+ goto exit_noskb;
+ }
+
+ switch (event) {
+ case NFC_HCI_EVT_TARGET_DISCOVERED:
+ if (skb->len < 1) { /* no status data? */
+ r = -EPROTO;
+ goto exit;
+ }
+
+ if (skb->data[0] == 3) {
+ /* TODO: Multiple targets in field, none activated
+ * poll is supposedly stopped, but there is no
+ * single target to activate, so nothing to report
+ * up.
+ * if we need to restart poll, we must save the
+ * protocols from the initial poll and reuse here.
+ */
+ }
+
+ if (skb->data[0] != 0) {
+ r = -EPROTO;
+ goto exit;
+ }
+
+ r = nfc_hci_target_discovered(hdev, gate);
+ break;
+ default:
+ pr_info("Discarded unknown event %x to gate %x\n", event, gate);
+ r = -EINVAL;
+ break;
+ }
+
+exit:
+ kfree_skb(skb);
+
+exit_noskb:
+ if (r)
+ nfc_hci_driver_failure(hdev, r);
+}
+
+static void nfc_hci_cmd_timeout(struct timer_list *t)
+{
+ struct nfc_hci_dev *hdev = from_timer(hdev, t, cmd_timer);
+
+ schedule_work(&hdev->msg_tx_work);
+}
+
+static int hci_dev_connect_gates(struct nfc_hci_dev *hdev, u8 gate_count,
+ const struct nfc_hci_gate *gates)
+{
+ int r;
+ while (gate_count--) {
+ r = nfc_hci_connect_gate(hdev, NFC_HCI_HOST_CONTROLLER_ID,
+ gates->gate, gates->pipe);
+ if (r < 0)
+ return r;
+ gates++;
+ }
+
+ return 0;
+}
+
+static int hci_dev_session_init(struct nfc_hci_dev *hdev)
+{
+ struct sk_buff *skb = NULL;
+ int r;
+
+ if (hdev->init_data.gates[0].gate != NFC_HCI_ADMIN_GATE)
+ return -EPROTO;
+
+ r = nfc_hci_connect_gate(hdev, NFC_HCI_HOST_CONTROLLER_ID,
+ hdev->init_data.gates[0].gate,
+ hdev->init_data.gates[0].pipe);
+ if (r < 0)
+ goto exit;
+
+ r = nfc_hci_get_param(hdev, NFC_HCI_ADMIN_GATE,
+ NFC_HCI_ADMIN_SESSION_IDENTITY, &skb);
+ if (r < 0)
+ goto disconnect_all;
+
+ if (skb->len && skb->len == strlen(hdev->init_data.session_id) &&
+ (memcmp(hdev->init_data.session_id, skb->data,
+ skb->len) == 0) && hdev->ops->load_session) {
+ /* Restore gate<->pipe table from some proprietary location. */
+
+ r = hdev->ops->load_session(hdev);
+
+ if (r < 0)
+ goto disconnect_all;
+ } else {
+
+ r = nfc_hci_disconnect_all_gates(hdev);
+ if (r < 0)
+ goto exit;
+
+ r = hci_dev_connect_gates(hdev, hdev->init_data.gate_count,
+ hdev->init_data.gates);
+ if (r < 0)
+ goto disconnect_all;
+
+ r = nfc_hci_set_param(hdev, NFC_HCI_ADMIN_GATE,
+ NFC_HCI_ADMIN_SESSION_IDENTITY,
+ hdev->init_data.session_id,
+ strlen(hdev->init_data.session_id));
+ }
+ if (r == 0)
+ goto exit;
+
+disconnect_all:
+ nfc_hci_disconnect_all_gates(hdev);
+
+exit:
+ kfree_skb(skb);
+
+ return r;
+}
+
+static int hci_dev_version(struct nfc_hci_dev *hdev)
+{
+ int r;
+ struct sk_buff *skb;
+
+ r = nfc_hci_get_param(hdev, NFC_HCI_ID_MGMT_GATE,
+ NFC_HCI_ID_MGMT_VERSION_SW, &skb);
+ if (r == -EOPNOTSUPP) {
+ pr_info("Software/Hardware info not available\n");
+ return 0;
+ }
+ if (r < 0)
+ return r;
+
+ if (skb->len != 3) {
+ kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ hdev->sw_romlib = (skb->data[0] & 0xf0) >> 4;
+ hdev->sw_patch = skb->data[0] & 0x0f;
+ hdev->sw_flashlib_major = skb->data[1];
+ hdev->sw_flashlib_minor = skb->data[2];
+
+ kfree_skb(skb);
+
+ r = nfc_hci_get_param(hdev, NFC_HCI_ID_MGMT_GATE,
+ NFC_HCI_ID_MGMT_VERSION_HW, &skb);
+ if (r < 0)
+ return r;
+
+ if (skb->len != 3) {
+ kfree_skb(skb);
+ return -EINVAL;
+ }
+
+ hdev->hw_derivative = (skb->data[0] & 0xe0) >> 5;
+ hdev->hw_version = skb->data[0] & 0x1f;
+ hdev->hw_mpw = (skb->data[1] & 0xc0) >> 6;
+ hdev->hw_software = skb->data[1] & 0x3f;
+ hdev->hw_bsid = skb->data[2];
+
+ kfree_skb(skb);
+
+ pr_info("SOFTWARE INFO:\n");
+ pr_info("RomLib : %d\n", hdev->sw_romlib);
+ pr_info("Patch : %d\n", hdev->sw_patch);
+ pr_info("FlashLib Major : %d\n", hdev->sw_flashlib_major);
+ pr_info("FlashLib Minor : %d\n", hdev->sw_flashlib_minor);
+ pr_info("HARDWARE INFO:\n");
+ pr_info("Derivative : %d\n", hdev->hw_derivative);
+ pr_info("HW Version : %d\n", hdev->hw_version);
+ pr_info("#MPW : %d\n", hdev->hw_mpw);
+ pr_info("Software : %d\n", hdev->hw_software);
+ pr_info("BSID Version : %d\n", hdev->hw_bsid);
+
+ return 0;
+}
+
+static int hci_dev_up(struct nfc_dev *nfc_dev)
+{
+ struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
+ int r = 0;
+
+ if (hdev->ops->open) {
+ r = hdev->ops->open(hdev);
+ if (r < 0)
+ return r;
+ }
+
+ r = nfc_llc_start(hdev->llc);
+ if (r < 0)
+ goto exit_close;
+
+ r = hci_dev_session_init(hdev);
+ if (r < 0)
+ goto exit_llc;
+
+ r = nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE,
+ NFC_HCI_EVT_END_OPERATION, NULL, 0);
+ if (r < 0)
+ goto exit_llc;
+
+ if (hdev->ops->hci_ready) {
+ r = hdev->ops->hci_ready(hdev);
+ if (r < 0)
+ goto exit_llc;
+ }
+
+ r = hci_dev_version(hdev);
+ if (r < 0)
+ goto exit_llc;
+
+ return 0;
+
+exit_llc:
+ nfc_llc_stop(hdev->llc);
+
+exit_close:
+ if (hdev->ops->close)
+ hdev->ops->close(hdev);
+
+ return r;
+}
+
+static int hci_dev_down(struct nfc_dev *nfc_dev)
+{
+ struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
+
+ nfc_llc_stop(hdev->llc);
+
+ if (hdev->ops->close)
+ hdev->ops->close(hdev);
+
+ nfc_hci_reset_pipes(hdev);
+
+ return 0;
+}
+
+static int hci_start_poll(struct nfc_dev *nfc_dev,
+ u32 im_protocols, u32 tm_protocols)
+{
+ struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
+
+ if (hdev->ops->start_poll)
+ return hdev->ops->start_poll(hdev, im_protocols, tm_protocols);
+ else
+ return nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE,
+ NFC_HCI_EVT_READER_REQUESTED,
+ NULL, 0);
+}
+
+static void hci_stop_poll(struct nfc_dev *nfc_dev)
+{
+ struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
+
+ if (hdev->ops->stop_poll)
+ hdev->ops->stop_poll(hdev);
+ else
+ nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE,
+ NFC_HCI_EVT_END_OPERATION, NULL, 0);
+}
+
+static int hci_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target,
+ __u8 comm_mode, __u8 *gb, size_t gb_len)
+{
+ struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
+
+ if (!hdev->ops->dep_link_up)
+ return 0;
+
+ return hdev->ops->dep_link_up(hdev, target, comm_mode,
+ gb, gb_len);
+}
+
+static int hci_dep_link_down(struct nfc_dev *nfc_dev)
+{
+ struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
+
+ if (!hdev->ops->dep_link_down)
+ return 0;
+
+ return hdev->ops->dep_link_down(hdev);
+}
+
+static int hci_activate_target(struct nfc_dev *nfc_dev,
+ struct nfc_target *target, u32 protocol)
+{
+ return 0;
+}
+
+static void hci_deactivate_target(struct nfc_dev *nfc_dev,
+ struct nfc_target *target,
+ u8 mode)
+{
+}
+
+#define HCI_CB_TYPE_TRANSCEIVE 1
+
+static void hci_transceive_cb(void *context, struct sk_buff *skb, int err)
+{
+ struct nfc_hci_dev *hdev = context;
+
+ switch (hdev->async_cb_type) {
+ case HCI_CB_TYPE_TRANSCEIVE:
+ /*
+ * TODO: Check RF Error indicator to make sure data is valid.
+ * It seems that HCI cmd can complete without error, but data
+ * can be invalid if an RF error occurred? Ignore for now.
+ */
+ if (err == 0)
+ skb_trim(skb, skb->len - 1); /* RF Err ind */
+
+ hdev->async_cb(hdev->async_cb_context, skb, err);
+ break;
+ default:
+ if (err == 0)
+ kfree_skb(skb);
+ break;
+ }
+}
+
+static int hci_transceive(struct nfc_dev *nfc_dev, struct nfc_target *target,
+ struct sk_buff *skb, data_exchange_cb_t cb,
+ void *cb_context)
+{
+ struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
+ int r;
+
+ pr_debug("target_idx=%d\n", target->idx);
+
+ switch (target->hci_reader_gate) {
+ case NFC_HCI_RF_READER_A_GATE:
+ case NFC_HCI_RF_READER_B_GATE:
+ if (hdev->ops->im_transceive) {
+ r = hdev->ops->im_transceive(hdev, target, skb, cb,
+ cb_context);
+ if (r <= 0) /* handled */
+ break;
+ }
+
+ *(u8 *)skb_push(skb, 1) = 0; /* CTR, see spec:10.2.2.1 */
+
+ hdev->async_cb_type = HCI_CB_TYPE_TRANSCEIVE;
+ hdev->async_cb = cb;
+ hdev->async_cb_context = cb_context;
+
+ r = nfc_hci_send_cmd_async(hdev, target->hci_reader_gate,
+ NFC_HCI_WR_XCHG_DATA, skb->data,
+ skb->len, hci_transceive_cb, hdev);
+ break;
+ default:
+ if (hdev->ops->im_transceive) {
+ r = hdev->ops->im_transceive(hdev, target, skb, cb,
+ cb_context);
+ if (r == 1)
+ r = -ENOTSUPP;
+ } else {
+ r = -ENOTSUPP;
+ }
+ break;
+ }
+
+ kfree_skb(skb);
+
+ return r;
+}
+
+static int hci_tm_send(struct nfc_dev *nfc_dev, struct sk_buff *skb)
+{
+ struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
+
+ if (!hdev->ops->tm_send) {
+ kfree_skb(skb);
+ return -ENOTSUPP;
+ }
+
+ return hdev->ops->tm_send(hdev, skb);
+}
+
+static int hci_check_presence(struct nfc_dev *nfc_dev,
+ struct nfc_target *target)
+{
+ struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
+
+ if (!hdev->ops->check_presence)
+ return 0;
+
+ return hdev->ops->check_presence(hdev, target);
+}
+
+static int hci_discover_se(struct nfc_dev *nfc_dev)
+{
+ struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
+
+ if (hdev->ops->discover_se)
+ return hdev->ops->discover_se(hdev);
+
+ return 0;
+}
+
+static int hci_enable_se(struct nfc_dev *nfc_dev, u32 se_idx)
+{
+ struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
+
+ if (hdev->ops->enable_se)
+ return hdev->ops->enable_se(hdev, se_idx);
+
+ return 0;
+}
+
+static int hci_disable_se(struct nfc_dev *nfc_dev, u32 se_idx)
+{
+ struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
+
+ if (hdev->ops->disable_se)
+ return hdev->ops->disable_se(hdev, se_idx);
+
+ return 0;
+}
+
+static int hci_se_io(struct nfc_dev *nfc_dev, u32 se_idx,
+ u8 *apdu, size_t apdu_length,
+ se_io_cb_t cb, void *cb_context)
+{
+ struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
+
+ if (hdev->ops->se_io)
+ return hdev->ops->se_io(hdev, se_idx, apdu,
+ apdu_length, cb, cb_context);
+
+ return 0;
+}
+
+static void nfc_hci_failure(struct nfc_hci_dev *hdev, int err)
+{
+ mutex_lock(&hdev->msg_tx_mutex);
+
+ if (hdev->cmd_pending_msg == NULL) {
+ nfc_driver_failure(hdev->ndev, err);
+ goto exit;
+ }
+
+ __nfc_hci_cmd_completion(hdev, err, NULL);
+
+exit:
+ mutex_unlock(&hdev->msg_tx_mutex);
+}
+
+static void nfc_hci_llc_failure(struct nfc_hci_dev *hdev, int err)
+{
+ nfc_hci_failure(hdev, err);
+}
+
+static void nfc_hci_recv_from_llc(struct nfc_hci_dev *hdev, struct sk_buff *skb)
+{
+ struct hcp_packet *packet;
+ u8 type;
+ u8 instruction;
+ struct sk_buff *hcp_skb;
+ u8 pipe;
+ struct sk_buff *frag_skb;
+ int msg_len;
+
+ packet = (struct hcp_packet *)skb->data;
+ if ((packet->header & ~NFC_HCI_FRAGMENT) == 0) {
+ skb_queue_tail(&hdev->rx_hcp_frags, skb);
+ return;
+ }
+
+ /* it's the last fragment. Does it need re-aggregation? */
+ if (skb_queue_len(&hdev->rx_hcp_frags)) {
+ pipe = packet->header & NFC_HCI_FRAGMENT;
+ skb_queue_tail(&hdev->rx_hcp_frags, skb);
+
+ msg_len = 0;
+ skb_queue_walk(&hdev->rx_hcp_frags, frag_skb) {
+ msg_len += (frag_skb->len -
+ NFC_HCI_HCP_PACKET_HEADER_LEN);
+ }
+
+ hcp_skb = nfc_alloc_recv_skb(NFC_HCI_HCP_PACKET_HEADER_LEN +
+ msg_len, GFP_KERNEL);
+ if (hcp_skb == NULL) {
+ nfc_hci_failure(hdev, -ENOMEM);
+ return;
+ }
+
+ skb_put_u8(hcp_skb, pipe);
+
+ skb_queue_walk(&hdev->rx_hcp_frags, frag_skb) {
+ msg_len = frag_skb->len - NFC_HCI_HCP_PACKET_HEADER_LEN;
+ skb_put_data(hcp_skb,
+ frag_skb->data + NFC_HCI_HCP_PACKET_HEADER_LEN,
+ msg_len);
+ }
+
+ skb_queue_purge(&hdev->rx_hcp_frags);
+ } else {
+ packet->header &= NFC_HCI_FRAGMENT;
+ hcp_skb = skb;
+ }
+
+ /* if this is a response, dispatch immediately to
+ * unblock waiting cmd context. Otherwise, enqueue to dispatch
+ * in separate context where handler can also execute command.
+ */
+ packet = (struct hcp_packet *)hcp_skb->data;
+ type = HCP_MSG_GET_TYPE(packet->message.header);
+ if (type == NFC_HCI_HCP_RESPONSE) {
+ pipe = packet->header;
+ instruction = HCP_MSG_GET_CMD(packet->message.header);
+ skb_pull(hcp_skb, NFC_HCI_HCP_PACKET_HEADER_LEN +
+ NFC_HCI_HCP_MESSAGE_HEADER_LEN);
+ nfc_hci_hcp_message_rx(hdev, pipe, type, instruction, hcp_skb);
+ } else {
+ skb_queue_tail(&hdev->msg_rx_queue, hcp_skb);
+ schedule_work(&hdev->msg_rx_work);
+ }
+}
+
+static int hci_fw_download(struct nfc_dev *nfc_dev, const char *firmware_name)
+{
+ struct nfc_hci_dev *hdev = nfc_get_drvdata(nfc_dev);
+
+ if (!hdev->ops->fw_download)
+ return -ENOTSUPP;
+
+ return hdev->ops->fw_download(hdev, firmware_name);
+}
+
+static const struct nfc_ops hci_nfc_ops = {
+ .dev_up = hci_dev_up,
+ .dev_down = hci_dev_down,
+ .start_poll = hci_start_poll,
+ .stop_poll = hci_stop_poll,
+ .dep_link_up = hci_dep_link_up,
+ .dep_link_down = hci_dep_link_down,
+ .activate_target = hci_activate_target,
+ .deactivate_target = hci_deactivate_target,
+ .im_transceive = hci_transceive,
+ .tm_send = hci_tm_send,
+ .check_presence = hci_check_presence,
+ .fw_download = hci_fw_download,
+ .discover_se = hci_discover_se,
+ .enable_se = hci_enable_se,
+ .disable_se = hci_disable_se,
+ .se_io = hci_se_io,
+};
+
+struct nfc_hci_dev *nfc_hci_allocate_device(const struct nfc_hci_ops *ops,
+ struct nfc_hci_init_data *init_data,
+ unsigned long quirks,
+ u32 protocols,
+ const char *llc_name,
+ int tx_headroom,
+ int tx_tailroom,
+ int max_link_payload)
+{
+ struct nfc_hci_dev *hdev;
+
+ if (ops->xmit == NULL)
+ return NULL;
+
+ if (protocols == 0)
+ return NULL;
+
+ hdev = kzalloc(sizeof(struct nfc_hci_dev), GFP_KERNEL);
+ if (hdev == NULL)
+ return NULL;
+
+ hdev->llc = nfc_llc_allocate(llc_name, hdev, ops->xmit,
+ nfc_hci_recv_from_llc, tx_headroom,
+ tx_tailroom, nfc_hci_llc_failure);
+ if (hdev->llc == NULL) {
+ kfree(hdev);
+ return NULL;
+ }
+
+ hdev->ndev = nfc_allocate_device(&hci_nfc_ops, protocols,
+ tx_headroom + HCI_CMDS_HEADROOM,
+ tx_tailroom);
+ if (!hdev->ndev) {
+ nfc_llc_free(hdev->llc);
+ kfree(hdev);
+ return NULL;
+ }
+
+ hdev->ops = ops;
+ hdev->max_data_link_payload = max_link_payload;
+ hdev->init_data = *init_data;
+
+ nfc_set_drvdata(hdev->ndev, hdev);
+
+ nfc_hci_reset_pipes(hdev);
+
+ hdev->quirks = quirks;
+
+ return hdev;
+}
+EXPORT_SYMBOL(nfc_hci_allocate_device);
+
+void nfc_hci_free_device(struct nfc_hci_dev *hdev)
+{
+ nfc_free_device(hdev->ndev);
+ nfc_llc_free(hdev->llc);
+ kfree(hdev);
+}
+EXPORT_SYMBOL(nfc_hci_free_device);
+
+int nfc_hci_register_device(struct nfc_hci_dev *hdev)
+{
+ mutex_init(&hdev->msg_tx_mutex);
+
+ INIT_LIST_HEAD(&hdev->msg_tx_queue);
+
+ INIT_WORK(&hdev->msg_tx_work, nfc_hci_msg_tx_work);
+
+ timer_setup(&hdev->cmd_timer, nfc_hci_cmd_timeout, 0);
+
+ skb_queue_head_init(&hdev->rx_hcp_frags);
+
+ INIT_WORK(&hdev->msg_rx_work, nfc_hci_msg_rx_work);
+
+ skb_queue_head_init(&hdev->msg_rx_queue);
+
+ return nfc_register_device(hdev->ndev);
+}
+EXPORT_SYMBOL(nfc_hci_register_device);
+
+void nfc_hci_unregister_device(struct nfc_hci_dev *hdev)
+{
+ struct hci_msg *msg, *n;
+
+ mutex_lock(&hdev->msg_tx_mutex);
+
+ if (hdev->cmd_pending_msg) {
+ if (hdev->cmd_pending_msg->cb)
+ hdev->cmd_pending_msg->cb(
+ hdev->cmd_pending_msg->cb_context,
+ NULL, -ESHUTDOWN);
+ kfree(hdev->cmd_pending_msg);
+ hdev->cmd_pending_msg = NULL;
+ }
+
+ hdev->shutting_down = true;
+
+ mutex_unlock(&hdev->msg_tx_mutex);
+
+ del_timer_sync(&hdev->cmd_timer);
+ cancel_work_sync(&hdev->msg_tx_work);
+
+ cancel_work_sync(&hdev->msg_rx_work);
+
+ nfc_unregister_device(hdev->ndev);
+
+ skb_queue_purge(&hdev->rx_hcp_frags);
+ skb_queue_purge(&hdev->msg_rx_queue);
+
+ list_for_each_entry_safe(msg, n, &hdev->msg_tx_queue, msg_l) {
+ list_del(&msg->msg_l);
+ skb_queue_purge(&msg->msg_frags);
+ kfree(msg);
+ }
+}
+EXPORT_SYMBOL(nfc_hci_unregister_device);
+
+void nfc_hci_set_clientdata(struct nfc_hci_dev *hdev, void *clientdata)
+{
+ hdev->clientdata = clientdata;
+}
+EXPORT_SYMBOL(nfc_hci_set_clientdata);
+
+void *nfc_hci_get_clientdata(struct nfc_hci_dev *hdev)
+{
+ return hdev->clientdata;
+}
+EXPORT_SYMBOL(nfc_hci_get_clientdata);
+
+void nfc_hci_driver_failure(struct nfc_hci_dev *hdev, int err)
+{
+ nfc_hci_failure(hdev, err);
+}
+EXPORT_SYMBOL(nfc_hci_driver_failure);
+
+void nfc_hci_recv_frame(struct nfc_hci_dev *hdev, struct sk_buff *skb)
+{
+ nfc_llc_rcv_from_drv(hdev->llc, skb);
+}
+EXPORT_SYMBOL(nfc_hci_recv_frame);
+
+static int __init nfc_hci_init(void)
+{
+ return nfc_llc_init();
+}
+
+static void __exit nfc_hci_exit(void)
+{
+ nfc_llc_exit();
+}
+
+subsys_initcall(nfc_hci_init);
+module_exit(nfc_hci_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("NFC HCI Core");
diff --git a/net/nfc/hci/hci.h b/net/nfc/hci/hci.h
new file mode 100644
index 000000000..a59c96fcf
--- /dev/null
+++ b/net/nfc/hci/hci.h
@@ -0,0 +1,120 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2012 Intel Corporation. All rights reserved.
+ */
+
+#ifndef __LOCAL_HCI_H
+#define __LOCAL_HCI_H
+
+#include <net/nfc/hci.h>
+
+struct gate_pipe_map {
+ u8 gate;
+ u8 pipe;
+};
+
+struct hcp_message {
+ u8 header; /* type -cmd,evt,rsp- + instruction */
+ u8 data[];
+} __packed;
+
+struct hcp_packet {
+ u8 header; /* cbit+pipe */
+ struct hcp_message message;
+} __packed;
+
+struct hcp_exec_waiter {
+ wait_queue_head_t *wq;
+ bool exec_complete;
+ int exec_result;
+ struct sk_buff *result_skb;
+};
+
+struct hci_msg {
+ struct list_head msg_l;
+ struct sk_buff_head msg_frags;
+ bool wait_response;
+ data_exchange_cb_t cb;
+ void *cb_context;
+ unsigned long completion_delay;
+};
+
+struct hci_create_pipe_params {
+ u8 src_gate;
+ u8 dest_host;
+ u8 dest_gate;
+} __packed;
+
+struct hci_create_pipe_resp {
+ u8 src_host;
+ u8 src_gate;
+ u8 dest_host;
+ u8 dest_gate;
+ u8 pipe;
+} __packed;
+
+struct hci_delete_pipe_noti {
+ u8 pipe;
+} __packed;
+
+struct hci_all_pipe_cleared_noti {
+ u8 host;
+} __packed;
+
+#define NFC_HCI_FRAGMENT 0x7f
+
+#define HCP_HEADER(type, instr) ((((type) & 0x03) << 6) | ((instr) & 0x3f))
+#define HCP_MSG_GET_TYPE(header) ((header & 0xc0) >> 6)
+#define HCP_MSG_GET_CMD(header) (header & 0x3f)
+
+int nfc_hci_hcp_message_tx(struct nfc_hci_dev *hdev, u8 pipe,
+ u8 type, u8 instruction,
+ const u8 *payload, size_t payload_len,
+ data_exchange_cb_t cb, void *cb_context,
+ unsigned long completion_delay);
+
+void nfc_hci_hcp_message_rx(struct nfc_hci_dev *hdev, u8 pipe, u8 type,
+ u8 instruction, struct sk_buff *skb);
+
+/* HCP headers */
+#define NFC_HCI_HCP_PACKET_HEADER_LEN 1
+#define NFC_HCI_HCP_MESSAGE_HEADER_LEN 1
+#define NFC_HCI_HCP_HEADER_LEN 2
+
+/* HCP types */
+#define NFC_HCI_HCP_COMMAND 0x00
+#define NFC_HCI_HCP_EVENT 0x01
+#define NFC_HCI_HCP_RESPONSE 0x02
+
+/* Generic commands */
+#define NFC_HCI_ANY_SET_PARAMETER 0x01
+#define NFC_HCI_ANY_GET_PARAMETER 0x02
+#define NFC_HCI_ANY_OPEN_PIPE 0x03
+#define NFC_HCI_ANY_CLOSE_PIPE 0x04
+
+/* Reader RF commands */
+#define NFC_HCI_WR_XCHG_DATA 0x10
+
+/* Admin commands */
+#define NFC_HCI_ADM_CREATE_PIPE 0x10
+#define NFC_HCI_ADM_DELETE_PIPE 0x11
+#define NFC_HCI_ADM_NOTIFY_PIPE_CREATED 0x12
+#define NFC_HCI_ADM_NOTIFY_PIPE_DELETED 0x13
+#define NFC_HCI_ADM_CLEAR_ALL_PIPE 0x14
+#define NFC_HCI_ADM_NOTIFY_ALL_PIPE_CLEARED 0x15
+
+/* Generic responses */
+#define NFC_HCI_ANY_OK 0x00
+#define NFC_HCI_ANY_E_NOT_CONNECTED 0x01
+#define NFC_HCI_ANY_E_CMD_PAR_UNKNOWN 0x02
+#define NFC_HCI_ANY_E_NOK 0x03
+#define NFC_HCI_ANY_E_PIPES_FULL 0x04
+#define NFC_HCI_ANY_E_REG_PAR_UNKNOWN 0x05
+#define NFC_HCI_ANY_E_PIPE_NOT_OPENED 0x06
+#define NFC_HCI_ANY_E_CMD_NOT_SUPPORTED 0x07
+#define NFC_HCI_ANY_E_INHIBITED 0x08
+#define NFC_HCI_ANY_E_TIMEOUT 0x09
+#define NFC_HCI_ANY_E_REG_ACCESS_DENIED 0x0a
+#define NFC_HCI_ANY_E_PIPE_ACCESS_DENIED 0x0b
+
+#endif /* __LOCAL_HCI_H */
diff --git a/net/nfc/hci/hcp.c b/net/nfc/hci/hcp.c
new file mode 100644
index 000000000..4902f5064
--- /dev/null
+++ b/net/nfc/hci/hcp.c
@@ -0,0 +1,136 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2012 Intel Corporation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "hci: %s: " fmt, __func__
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include <net/nfc/hci.h>
+
+#include "hci.h"
+
+/*
+ * Payload is the HCP message data only. Instruction will be prepended.
+ * Guarantees that cb will be called upon completion or timeout delay
+ * counted from the moment the cmd is sent to the transport.
+ */
+int nfc_hci_hcp_message_tx(struct nfc_hci_dev *hdev, u8 pipe,
+ u8 type, u8 instruction,
+ const u8 *payload, size_t payload_len,
+ data_exchange_cb_t cb, void *cb_context,
+ unsigned long completion_delay)
+{
+ struct nfc_dev *ndev = hdev->ndev;
+ struct hci_msg *cmd;
+ const u8 *ptr = payload;
+ int hci_len, err;
+ bool firstfrag = true;
+
+ cmd = kzalloc(sizeof(struct hci_msg), GFP_KERNEL);
+ if (cmd == NULL)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&cmd->msg_l);
+ skb_queue_head_init(&cmd->msg_frags);
+ cmd->wait_response = (type == NFC_HCI_HCP_COMMAND) ? true : false;
+ cmd->cb = cb;
+ cmd->cb_context = cb_context;
+ cmd->completion_delay = completion_delay;
+
+ hci_len = payload_len + 1;
+ while (hci_len > 0) {
+ struct sk_buff *skb;
+ int skb_len, data_link_len;
+ struct hcp_packet *packet;
+
+ if (NFC_HCI_HCP_PACKET_HEADER_LEN + hci_len <=
+ hdev->max_data_link_payload)
+ data_link_len = hci_len;
+ else
+ data_link_len = hdev->max_data_link_payload -
+ NFC_HCI_HCP_PACKET_HEADER_LEN;
+
+ skb_len = ndev->tx_headroom + NFC_HCI_HCP_PACKET_HEADER_LEN +
+ data_link_len + ndev->tx_tailroom;
+ hci_len -= data_link_len;
+
+ skb = alloc_skb(skb_len, GFP_KERNEL);
+ if (skb == NULL) {
+ err = -ENOMEM;
+ goto out_skb_err;
+ }
+ skb_reserve(skb, ndev->tx_headroom);
+
+ skb_put(skb, NFC_HCI_HCP_PACKET_HEADER_LEN + data_link_len);
+
+ /* Only the last fragment will have the cb bit set to 1 */
+ packet = (struct hcp_packet *)skb->data;
+ packet->header = pipe;
+ if (firstfrag) {
+ firstfrag = false;
+ packet->message.header = HCP_HEADER(type, instruction);
+ } else {
+ packet->message.header = *ptr++;
+ }
+ if (ptr) {
+ memcpy(packet->message.data, ptr, data_link_len - 1);
+ ptr += data_link_len - 1;
+ }
+
+ /* This is the last fragment, set the cb bit */
+ if (hci_len == 0)
+ packet->header |= ~NFC_HCI_FRAGMENT;
+
+ skb_queue_tail(&cmd->msg_frags, skb);
+ }
+
+ mutex_lock(&hdev->msg_tx_mutex);
+
+ if (hdev->shutting_down) {
+ err = -ESHUTDOWN;
+ mutex_unlock(&hdev->msg_tx_mutex);
+ goto out_skb_err;
+ }
+
+ list_add_tail(&cmd->msg_l, &hdev->msg_tx_queue);
+ mutex_unlock(&hdev->msg_tx_mutex);
+
+ schedule_work(&hdev->msg_tx_work);
+
+ return 0;
+
+out_skb_err:
+ skb_queue_purge(&cmd->msg_frags);
+ kfree(cmd);
+
+ return err;
+}
+
+/*
+ * Receive hcp message for pipe, with type and cmd.
+ * skb contains optional message data only.
+ */
+void nfc_hci_hcp_message_rx(struct nfc_hci_dev *hdev, u8 pipe, u8 type,
+ u8 instruction, struct sk_buff *skb)
+{
+ switch (type) {
+ case NFC_HCI_HCP_RESPONSE:
+ nfc_hci_resp_received(hdev, instruction, skb);
+ break;
+ case NFC_HCI_HCP_COMMAND:
+ nfc_hci_cmd_received(hdev, pipe, instruction, skb);
+ break;
+ case NFC_HCI_HCP_EVENT:
+ nfc_hci_event_received(hdev, pipe, instruction, skb);
+ break;
+ default:
+ pr_err("UNKNOWN MSG Type %d, instruction=%d\n",
+ type, instruction);
+ kfree_skb(skb);
+ break;
+ }
+}
diff --git a/net/nfc/hci/llc.c b/net/nfc/hci/llc.c
new file mode 100644
index 000000000..2140f6724
--- /dev/null
+++ b/net/nfc/hci/llc.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Link Layer Control manager
+ *
+ * Copyright (C) 2012 Intel Corporation. All rights reserved.
+ */
+
+#include <net/nfc/llc.h>
+
+#include "llc.h"
+
+static LIST_HEAD(llc_engines);
+
+int __init nfc_llc_init(void)
+{
+ int r;
+
+ r = nfc_llc_nop_register();
+ if (r)
+ goto exit;
+
+ r = nfc_llc_shdlc_register();
+ if (r)
+ goto exit;
+
+ return 0;
+
+exit:
+ nfc_llc_exit();
+ return r;
+}
+
+void nfc_llc_exit(void)
+{
+ struct nfc_llc_engine *llc_engine, *n;
+
+ list_for_each_entry_safe(llc_engine, n, &llc_engines, entry) {
+ list_del(&llc_engine->entry);
+ kfree(llc_engine->name);
+ kfree(llc_engine);
+ }
+}
+
+int nfc_llc_register(const char *name, const struct nfc_llc_ops *ops)
+{
+ struct nfc_llc_engine *llc_engine;
+
+ llc_engine = kzalloc(sizeof(struct nfc_llc_engine), GFP_KERNEL);
+ if (llc_engine == NULL)
+ return -ENOMEM;
+
+ llc_engine->name = kstrdup(name, GFP_KERNEL);
+ if (llc_engine->name == NULL) {
+ kfree(llc_engine);
+ return -ENOMEM;
+ }
+ llc_engine->ops = ops;
+
+ INIT_LIST_HEAD(&llc_engine->entry);
+ list_add_tail(&llc_engine->entry, &llc_engines);
+
+ return 0;
+}
+
+static struct nfc_llc_engine *nfc_llc_name_to_engine(const char *name)
+{
+ struct nfc_llc_engine *llc_engine;
+
+ list_for_each_entry(llc_engine, &llc_engines, entry) {
+ if (strcmp(llc_engine->name, name) == 0)
+ return llc_engine;
+ }
+
+ return NULL;
+}
+
+void nfc_llc_unregister(const char *name)
+{
+ struct nfc_llc_engine *llc_engine;
+
+ llc_engine = nfc_llc_name_to_engine(name);
+ if (llc_engine == NULL)
+ return;
+
+ list_del(&llc_engine->entry);
+ kfree(llc_engine->name);
+ kfree(llc_engine);
+}
+
+struct nfc_llc *nfc_llc_allocate(const char *name, struct nfc_hci_dev *hdev,
+ xmit_to_drv_t xmit_to_drv,
+ rcv_to_hci_t rcv_to_hci, int tx_headroom,
+ int tx_tailroom, llc_failure_t llc_failure)
+{
+ struct nfc_llc_engine *llc_engine;
+ struct nfc_llc *llc;
+
+ llc_engine = nfc_llc_name_to_engine(name);
+ if (llc_engine == NULL)
+ return NULL;
+
+ llc = kzalloc(sizeof(struct nfc_llc), GFP_KERNEL);
+ if (llc == NULL)
+ return NULL;
+
+ llc->data = llc_engine->ops->init(hdev, xmit_to_drv, rcv_to_hci,
+ tx_headroom, tx_tailroom,
+ &llc->rx_headroom, &llc->rx_tailroom,
+ llc_failure);
+ if (llc->data == NULL) {
+ kfree(llc);
+ return NULL;
+ }
+ llc->ops = llc_engine->ops;
+
+ return llc;
+}
+
+void nfc_llc_free(struct nfc_llc *llc)
+{
+ llc->ops->deinit(llc);
+ kfree(llc);
+}
+
+int nfc_llc_start(struct nfc_llc *llc)
+{
+ return llc->ops->start(llc);
+}
+EXPORT_SYMBOL(nfc_llc_start);
+
+int nfc_llc_stop(struct nfc_llc *llc)
+{
+ return llc->ops->stop(llc);
+}
+EXPORT_SYMBOL(nfc_llc_stop);
+
+void nfc_llc_rcv_from_drv(struct nfc_llc *llc, struct sk_buff *skb)
+{
+ llc->ops->rcv_from_drv(llc, skb);
+}
+
+int nfc_llc_xmit_from_hci(struct nfc_llc *llc, struct sk_buff *skb)
+{
+ return llc->ops->xmit_from_hci(llc, skb);
+}
+
+void *nfc_llc_get_data(struct nfc_llc *llc)
+{
+ return llc->data;
+}
diff --git a/net/nfc/hci/llc.h b/net/nfc/hci/llc.h
new file mode 100644
index 000000000..d66271d21
--- /dev/null
+++ b/net/nfc/hci/llc.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Link Layer Control manager
+ *
+ * Copyright (C) 2012 Intel Corporation. All rights reserved.
+ */
+
+#ifndef __LOCAL_LLC_H_
+#define __LOCAL_LLC_H_
+
+#include <net/nfc/hci.h>
+#include <net/nfc/llc.h>
+#include <linux/skbuff.h>
+
+struct nfc_llc_ops {
+ void *(*init) (struct nfc_hci_dev *hdev, xmit_to_drv_t xmit_to_drv,
+ rcv_to_hci_t rcv_to_hci, int tx_headroom,
+ int tx_tailroom, int *rx_headroom, int *rx_tailroom,
+ llc_failure_t llc_failure);
+ void (*deinit) (struct nfc_llc *llc);
+ int (*start) (struct nfc_llc *llc);
+ int (*stop) (struct nfc_llc *llc);
+ void (*rcv_from_drv) (struct nfc_llc *llc, struct sk_buff *skb);
+ int (*xmit_from_hci) (struct nfc_llc *llc, struct sk_buff *skb);
+};
+
+struct nfc_llc_engine {
+ const char *name;
+ const struct nfc_llc_ops *ops;
+ struct list_head entry;
+};
+
+struct nfc_llc {
+ void *data;
+ const struct nfc_llc_ops *ops;
+ int rx_headroom;
+ int rx_tailroom;
+};
+
+void *nfc_llc_get_data(struct nfc_llc *llc);
+
+int nfc_llc_register(const char *name, const struct nfc_llc_ops *ops);
+void nfc_llc_unregister(const char *name);
+
+int nfc_llc_nop_register(void);
+
+#if defined(CONFIG_NFC_SHDLC)
+int nfc_llc_shdlc_register(void);
+#else
+static inline int nfc_llc_shdlc_register(void)
+{
+ return 0;
+}
+#endif
+
+#endif /* __LOCAL_LLC_H_ */
diff --git a/net/nfc/hci/llc_nop.c b/net/nfc/hci/llc_nop.c
new file mode 100644
index 000000000..a58716f16
--- /dev/null
+++ b/net/nfc/hci/llc_nop.c
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * nop (passthrough) Link Layer Control
+ *
+ * Copyright (C) 2012 Intel Corporation. All rights reserved.
+ */
+
+#include <linux/types.h>
+
+#include "llc.h"
+
+struct llc_nop {
+ struct nfc_hci_dev *hdev;
+ xmit_to_drv_t xmit_to_drv;
+ rcv_to_hci_t rcv_to_hci;
+ int tx_headroom;
+ int tx_tailroom;
+ llc_failure_t llc_failure;
+};
+
+static void *llc_nop_init(struct nfc_hci_dev *hdev, xmit_to_drv_t xmit_to_drv,
+ rcv_to_hci_t rcv_to_hci, int tx_headroom,
+ int tx_tailroom, int *rx_headroom, int *rx_tailroom,
+ llc_failure_t llc_failure)
+{
+ struct llc_nop *llc_nop;
+
+ *rx_headroom = 0;
+ *rx_tailroom = 0;
+
+ llc_nop = kzalloc(sizeof(struct llc_nop), GFP_KERNEL);
+ if (llc_nop == NULL)
+ return NULL;
+
+ llc_nop->hdev = hdev;
+ llc_nop->xmit_to_drv = xmit_to_drv;
+ llc_nop->rcv_to_hci = rcv_to_hci;
+ llc_nop->tx_headroom = tx_headroom;
+ llc_nop->tx_tailroom = tx_tailroom;
+ llc_nop->llc_failure = llc_failure;
+
+ return llc_nop;
+}
+
+static void llc_nop_deinit(struct nfc_llc *llc)
+{
+ kfree(nfc_llc_get_data(llc));
+}
+
+static int llc_nop_start(struct nfc_llc *llc)
+{
+ return 0;
+}
+
+static int llc_nop_stop(struct nfc_llc *llc)
+{
+ return 0;
+}
+
+static void llc_nop_rcv_from_drv(struct nfc_llc *llc, struct sk_buff *skb)
+{
+ struct llc_nop *llc_nop = nfc_llc_get_data(llc);
+
+ llc_nop->rcv_to_hci(llc_nop->hdev, skb);
+}
+
+static int llc_nop_xmit_from_hci(struct nfc_llc *llc, struct sk_buff *skb)
+{
+ struct llc_nop *llc_nop = nfc_llc_get_data(llc);
+
+ return llc_nop->xmit_to_drv(llc_nop->hdev, skb);
+}
+
+static const struct nfc_llc_ops llc_nop_ops = {
+ .init = llc_nop_init,
+ .deinit = llc_nop_deinit,
+ .start = llc_nop_start,
+ .stop = llc_nop_stop,
+ .rcv_from_drv = llc_nop_rcv_from_drv,
+ .xmit_from_hci = llc_nop_xmit_from_hci,
+};
+
+int nfc_llc_nop_register(void)
+{
+ return nfc_llc_register(LLC_NOP_NAME, &llc_nop_ops);
+}
diff --git a/net/nfc/hci/llc_shdlc.c b/net/nfc/hci/llc_shdlc.c
new file mode 100644
index 000000000..e90f70385
--- /dev/null
+++ b/net/nfc/hci/llc_shdlc.c
@@ -0,0 +1,818 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * shdlc Link Layer Control
+ *
+ * Copyright (C) 2012 Intel Corporation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "shdlc: %s: " fmt, __func__
+
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/slab.h>
+#include <linux/skbuff.h>
+
+#include "llc.h"
+
+enum shdlc_state {
+ SHDLC_DISCONNECTED = 0,
+ SHDLC_CONNECTING = 1,
+ SHDLC_NEGOTIATING = 2,
+ SHDLC_HALF_CONNECTED = 3,
+ SHDLC_CONNECTED = 4
+};
+
+struct llc_shdlc {
+ struct nfc_hci_dev *hdev;
+ xmit_to_drv_t xmit_to_drv;
+ rcv_to_hci_t rcv_to_hci;
+
+ struct mutex state_mutex;
+ enum shdlc_state state;
+ int hard_fault;
+
+ wait_queue_head_t *connect_wq;
+ int connect_tries;
+ int connect_result;
+ struct timer_list connect_timer;/* aka T3 in spec 10.6.1 */
+
+ u8 w; /* window size */
+ bool srej_support;
+
+ struct timer_list t1_timer; /* send ack timeout */
+ bool t1_active;
+
+ struct timer_list t2_timer; /* guard/retransmit timeout */
+ bool t2_active;
+
+ int ns; /* next seq num for send */
+ int nr; /* next expected seq num for receive */
+ int dnr; /* oldest sent unacked seq num */
+
+ struct sk_buff_head rcv_q;
+
+ struct sk_buff_head send_q;
+ bool rnr; /* other side is not ready to receive */
+
+ struct sk_buff_head ack_pending_q;
+
+ struct work_struct sm_work;
+
+ int tx_headroom;
+ int tx_tailroom;
+
+ llc_failure_t llc_failure;
+};
+
+#define SHDLC_LLC_HEAD_ROOM 2
+
+#define SHDLC_MAX_WINDOW 4
+#define SHDLC_SREJ_SUPPORT false
+
+#define SHDLC_CONTROL_HEAD_MASK 0xe0
+#define SHDLC_CONTROL_HEAD_I 0x80
+#define SHDLC_CONTROL_HEAD_I2 0xa0
+#define SHDLC_CONTROL_HEAD_S 0xc0
+#define SHDLC_CONTROL_HEAD_U 0xe0
+
+#define SHDLC_CONTROL_NS_MASK 0x38
+#define SHDLC_CONTROL_NR_MASK 0x07
+#define SHDLC_CONTROL_TYPE_MASK 0x18
+
+#define SHDLC_CONTROL_M_MASK 0x1f
+
+enum sframe_type {
+ S_FRAME_RR = 0x00,
+ S_FRAME_REJ = 0x01,
+ S_FRAME_RNR = 0x02,
+ S_FRAME_SREJ = 0x03
+};
+
+enum uframe_modifier {
+ U_FRAME_UA = 0x06,
+ U_FRAME_RSET = 0x19
+};
+
+#define SHDLC_CONNECT_VALUE_MS 5
+#define SHDLC_T1_VALUE_MS(w) ((5 * w) / 4)
+#define SHDLC_T2_VALUE_MS 300
+
+#define SHDLC_DUMP_SKB(info, skb) \
+do { \
+ pr_debug("%s:\n", info); \
+ print_hex_dump(KERN_DEBUG, "shdlc: ", DUMP_PREFIX_OFFSET, \
+ 16, 1, skb->data, skb->len, 0); \
+} while (0)
+
+/* checks x < y <= z modulo 8 */
+static bool llc_shdlc_x_lt_y_lteq_z(int x, int y, int z)
+{
+ if (x < z)
+ return ((x < y) && (y <= z)) ? true : false;
+ else
+ return ((y > x) || (y <= z)) ? true : false;
+}
+
+/* checks x <= y < z modulo 8 */
+static bool llc_shdlc_x_lteq_y_lt_z(int x, int y, int z)
+{
+ if (x <= z)
+ return ((x <= y) && (y < z)) ? true : false;
+ else /* x > z -> z+8 > x */
+ return ((y >= x) || (y < z)) ? true : false;
+}
+
+static struct sk_buff *llc_shdlc_alloc_skb(const struct llc_shdlc *shdlc,
+ int payload_len)
+{
+ struct sk_buff *skb;
+
+ skb = alloc_skb(shdlc->tx_headroom + SHDLC_LLC_HEAD_ROOM +
+ shdlc->tx_tailroom + payload_len, GFP_KERNEL);
+ if (skb)
+ skb_reserve(skb, shdlc->tx_headroom + SHDLC_LLC_HEAD_ROOM);
+
+ return skb;
+}
+
+/* immediately sends an S frame. */
+static int llc_shdlc_send_s_frame(const struct llc_shdlc *shdlc,
+ enum sframe_type sframe_type, int nr)
+{
+ int r;
+ struct sk_buff *skb;
+
+ pr_debug("sframe_type=%d nr=%d\n", sframe_type, nr);
+
+ skb = llc_shdlc_alloc_skb(shdlc, 0);
+ if (skb == NULL)
+ return -ENOMEM;
+
+ *(u8 *)skb_push(skb, 1) = SHDLC_CONTROL_HEAD_S | (sframe_type << 3) | nr;
+
+ r = shdlc->xmit_to_drv(shdlc->hdev, skb);
+
+ kfree_skb(skb);
+
+ return r;
+}
+
+/* immediately sends an U frame. skb may contain optional payload */
+static int llc_shdlc_send_u_frame(const struct llc_shdlc *shdlc,
+ struct sk_buff *skb,
+ enum uframe_modifier uframe_modifier)
+{
+ int r;
+
+ pr_debug("uframe_modifier=%d\n", uframe_modifier);
+
+ *(u8 *)skb_push(skb, 1) = SHDLC_CONTROL_HEAD_U | uframe_modifier;
+
+ r = shdlc->xmit_to_drv(shdlc->hdev, skb);
+
+ kfree_skb(skb);
+
+ return r;
+}
+
+/*
+ * Free ack_pending frames until y_nr - 1, and reset t2 according to
+ * the remaining oldest ack_pending frame sent time
+ */
+static void llc_shdlc_reset_t2(struct llc_shdlc *shdlc, int y_nr)
+{
+ struct sk_buff *skb;
+ int dnr = shdlc->dnr; /* MUST initially be < y_nr */
+
+ pr_debug("release ack pending up to frame %d excluded\n", y_nr);
+
+ while (dnr != y_nr) {
+ pr_debug("release ack pending frame %d\n", dnr);
+
+ skb = skb_dequeue(&shdlc->ack_pending_q);
+ kfree_skb(skb);
+
+ dnr = (dnr + 1) % 8;
+ }
+
+ if (skb_queue_empty(&shdlc->ack_pending_q)) {
+ if (shdlc->t2_active) {
+ del_timer_sync(&shdlc->t2_timer);
+ shdlc->t2_active = false;
+
+ pr_debug("All sent frames acked. Stopped T2(retransmit)\n");
+ }
+ } else {
+ skb = skb_peek(&shdlc->ack_pending_q);
+
+ mod_timer(&shdlc->t2_timer, *(unsigned long *)skb->cb +
+ msecs_to_jiffies(SHDLC_T2_VALUE_MS));
+ shdlc->t2_active = true;
+
+ pr_debug("Start T2(retransmit) for remaining unacked sent frames\n");
+ }
+}
+
+/*
+ * Receive validated frames from lower layer. skb contains HCI payload only.
+ * Handle according to algorithm at spec:10.8.2
+ */
+static void llc_shdlc_rcv_i_frame(struct llc_shdlc *shdlc,
+ struct sk_buff *skb, int ns, int nr)
+{
+ int x_ns = ns;
+ int y_nr = nr;
+
+ pr_debug("recvd I-frame %d, remote waiting frame %d\n", ns, nr);
+
+ if (shdlc->state != SHDLC_CONNECTED)
+ goto exit;
+
+ if (x_ns != shdlc->nr) {
+ llc_shdlc_send_s_frame(shdlc, S_FRAME_REJ, shdlc->nr);
+ goto exit;
+ }
+
+ if (!shdlc->t1_active) {
+ shdlc->t1_active = true;
+ mod_timer(&shdlc->t1_timer, jiffies +
+ msecs_to_jiffies(SHDLC_T1_VALUE_MS(shdlc->w)));
+ pr_debug("(re)Start T1(send ack)\n");
+ }
+
+ if (skb->len) {
+ shdlc->rcv_to_hci(shdlc->hdev, skb);
+ skb = NULL;
+ }
+
+ shdlc->nr = (shdlc->nr + 1) % 8;
+
+ if (llc_shdlc_x_lt_y_lteq_z(shdlc->dnr, y_nr, shdlc->ns)) {
+ llc_shdlc_reset_t2(shdlc, y_nr);
+
+ shdlc->dnr = y_nr;
+ }
+
+exit:
+ kfree_skb(skb);
+}
+
+static void llc_shdlc_rcv_ack(struct llc_shdlc *shdlc, int y_nr)
+{
+ pr_debug("remote acked up to frame %d excluded\n", y_nr);
+
+ if (llc_shdlc_x_lt_y_lteq_z(shdlc->dnr, y_nr, shdlc->ns)) {
+ llc_shdlc_reset_t2(shdlc, y_nr);
+ shdlc->dnr = y_nr;
+ }
+}
+
+static void llc_shdlc_requeue_ack_pending(struct llc_shdlc *shdlc)
+{
+ struct sk_buff *skb;
+
+ pr_debug("ns reset to %d\n", shdlc->dnr);
+
+ while ((skb = skb_dequeue_tail(&shdlc->ack_pending_q))) {
+ skb_pull(skb, 1); /* remove control field */
+ skb_queue_head(&shdlc->send_q, skb);
+ }
+ shdlc->ns = shdlc->dnr;
+}
+
+static void llc_shdlc_rcv_rej(struct llc_shdlc *shdlc, int y_nr)
+{
+ struct sk_buff *skb;
+
+ pr_debug("remote asks retransmission from frame %d\n", y_nr);
+
+ if (llc_shdlc_x_lteq_y_lt_z(shdlc->dnr, y_nr, shdlc->ns)) {
+ if (shdlc->t2_active) {
+ del_timer_sync(&shdlc->t2_timer);
+ shdlc->t2_active = false;
+ pr_debug("Stopped T2(retransmit)\n");
+ }
+
+ if (shdlc->dnr != y_nr) {
+ while ((shdlc->dnr = ((shdlc->dnr + 1) % 8)) != y_nr) {
+ skb = skb_dequeue(&shdlc->ack_pending_q);
+ kfree_skb(skb);
+ }
+ }
+
+ llc_shdlc_requeue_ack_pending(shdlc);
+ }
+}
+
+/* See spec RR:10.8.3 REJ:10.8.4 */
+static void llc_shdlc_rcv_s_frame(struct llc_shdlc *shdlc,
+ enum sframe_type s_frame_type, int nr)
+{
+ struct sk_buff *skb;
+
+ if (shdlc->state != SHDLC_CONNECTED)
+ return;
+
+ switch (s_frame_type) {
+ case S_FRAME_RR:
+ llc_shdlc_rcv_ack(shdlc, nr);
+ if (shdlc->rnr == true) { /* see SHDLC 10.7.7 */
+ shdlc->rnr = false;
+ if (shdlc->send_q.qlen == 0) {
+ skb = llc_shdlc_alloc_skb(shdlc, 0);
+ if (skb)
+ skb_queue_tail(&shdlc->send_q, skb);
+ }
+ }
+ break;
+ case S_FRAME_REJ:
+ llc_shdlc_rcv_rej(shdlc, nr);
+ break;
+ case S_FRAME_RNR:
+ llc_shdlc_rcv_ack(shdlc, nr);
+ shdlc->rnr = true;
+ break;
+ default:
+ break;
+ }
+}
+
+static void llc_shdlc_connect_complete(struct llc_shdlc *shdlc, int r)
+{
+ pr_debug("result=%d\n", r);
+
+ del_timer_sync(&shdlc->connect_timer);
+
+ if (r == 0) {
+ shdlc->ns = 0;
+ shdlc->nr = 0;
+ shdlc->dnr = 0;
+
+ shdlc->state = SHDLC_HALF_CONNECTED;
+ } else {
+ shdlc->state = SHDLC_DISCONNECTED;
+ }
+
+ shdlc->connect_result = r;
+
+ wake_up(shdlc->connect_wq);
+}
+
+static int llc_shdlc_connect_initiate(const struct llc_shdlc *shdlc)
+{
+ struct sk_buff *skb;
+
+ skb = llc_shdlc_alloc_skb(shdlc, 2);
+ if (skb == NULL)
+ return -ENOMEM;
+
+ skb_put_u8(skb, SHDLC_MAX_WINDOW);
+ skb_put_u8(skb, SHDLC_SREJ_SUPPORT ? 1 : 0);
+
+ return llc_shdlc_send_u_frame(shdlc, skb, U_FRAME_RSET);
+}
+
+static int llc_shdlc_connect_send_ua(const struct llc_shdlc *shdlc)
+{
+ struct sk_buff *skb;
+
+ skb = llc_shdlc_alloc_skb(shdlc, 0);
+ if (skb == NULL)
+ return -ENOMEM;
+
+ return llc_shdlc_send_u_frame(shdlc, skb, U_FRAME_UA);
+}
+
+static void llc_shdlc_rcv_u_frame(struct llc_shdlc *shdlc,
+ struct sk_buff *skb,
+ enum uframe_modifier u_frame_modifier)
+{
+ u8 w = SHDLC_MAX_WINDOW;
+ bool srej_support = SHDLC_SREJ_SUPPORT;
+ int r;
+
+ pr_debug("u_frame_modifier=%d\n", u_frame_modifier);
+
+ switch (u_frame_modifier) {
+ case U_FRAME_RSET:
+ switch (shdlc->state) {
+ case SHDLC_NEGOTIATING:
+ case SHDLC_CONNECTING:
+ /*
+ * We sent RSET, but chip wants to negotiate or we
+ * got RSET before we managed to send out our.
+ */
+ if (skb->len > 0)
+ w = skb->data[0];
+
+ if (skb->len > 1)
+ srej_support = skb->data[1] & 0x01 ? true :
+ false;
+
+ if ((w <= SHDLC_MAX_WINDOW) &&
+ (SHDLC_SREJ_SUPPORT || (srej_support == false))) {
+ shdlc->w = w;
+ shdlc->srej_support = srej_support;
+ r = llc_shdlc_connect_send_ua(shdlc);
+ llc_shdlc_connect_complete(shdlc, r);
+ }
+ break;
+ case SHDLC_HALF_CONNECTED:
+ /*
+ * Chip resent RSET due to its timeout - Ignote it
+ * as we already sent UA.
+ */
+ break;
+ case SHDLC_CONNECTED:
+ /*
+ * Chip wants to reset link. This is unexpected and
+ * unsupported.
+ */
+ shdlc->hard_fault = -ECONNRESET;
+ break;
+ default:
+ break;
+ }
+ break;
+ case U_FRAME_UA:
+ if ((shdlc->state == SHDLC_CONNECTING &&
+ shdlc->connect_tries > 0) ||
+ (shdlc->state == SHDLC_NEGOTIATING)) {
+ llc_shdlc_connect_complete(shdlc, 0);
+ shdlc->state = SHDLC_CONNECTED;
+ }
+ break;
+ default:
+ break;
+ }
+
+ kfree_skb(skb);
+}
+
+static void llc_shdlc_handle_rcv_queue(struct llc_shdlc *shdlc)
+{
+ struct sk_buff *skb;
+ u8 control;
+ int nr;
+ int ns;
+ enum sframe_type s_frame_type;
+ enum uframe_modifier u_frame_modifier;
+
+ if (shdlc->rcv_q.qlen)
+ pr_debug("rcvQlen=%d\n", shdlc->rcv_q.qlen);
+
+ while ((skb = skb_dequeue(&shdlc->rcv_q)) != NULL) {
+ control = skb->data[0];
+ skb_pull(skb, 1);
+ switch (control & SHDLC_CONTROL_HEAD_MASK) {
+ case SHDLC_CONTROL_HEAD_I:
+ case SHDLC_CONTROL_HEAD_I2:
+ if (shdlc->state == SHDLC_HALF_CONNECTED)
+ shdlc->state = SHDLC_CONNECTED;
+
+ ns = (control & SHDLC_CONTROL_NS_MASK) >> 3;
+ nr = control & SHDLC_CONTROL_NR_MASK;
+ llc_shdlc_rcv_i_frame(shdlc, skb, ns, nr);
+ break;
+ case SHDLC_CONTROL_HEAD_S:
+ if (shdlc->state == SHDLC_HALF_CONNECTED)
+ shdlc->state = SHDLC_CONNECTED;
+
+ s_frame_type = (control & SHDLC_CONTROL_TYPE_MASK) >> 3;
+ nr = control & SHDLC_CONTROL_NR_MASK;
+ llc_shdlc_rcv_s_frame(shdlc, s_frame_type, nr);
+ kfree_skb(skb);
+ break;
+ case SHDLC_CONTROL_HEAD_U:
+ u_frame_modifier = control & SHDLC_CONTROL_M_MASK;
+ llc_shdlc_rcv_u_frame(shdlc, skb, u_frame_modifier);
+ break;
+ default:
+ pr_err("UNKNOWN Control=%d\n", control);
+ kfree_skb(skb);
+ break;
+ }
+ }
+}
+
+static int llc_shdlc_w_used(int ns, int dnr)
+{
+ int unack_count;
+
+ if (dnr <= ns)
+ unack_count = ns - dnr;
+ else
+ unack_count = 8 - dnr + ns;
+
+ return unack_count;
+}
+
+/* Send frames according to algorithm at spec:10.8.1 */
+static void llc_shdlc_handle_send_queue(struct llc_shdlc *shdlc)
+{
+ struct sk_buff *skb;
+ int r;
+ unsigned long time_sent;
+
+ if (shdlc->send_q.qlen)
+ pr_debug("sendQlen=%d ns=%d dnr=%d rnr=%s w_room=%d unackQlen=%d\n",
+ shdlc->send_q.qlen, shdlc->ns, shdlc->dnr,
+ shdlc->rnr == false ? "false" : "true",
+ shdlc->w - llc_shdlc_w_used(shdlc->ns, shdlc->dnr),
+ shdlc->ack_pending_q.qlen);
+
+ while (shdlc->send_q.qlen && shdlc->ack_pending_q.qlen < shdlc->w &&
+ (shdlc->rnr == false)) {
+
+ if (shdlc->t1_active) {
+ del_timer_sync(&shdlc->t1_timer);
+ shdlc->t1_active = false;
+ pr_debug("Stopped T1(send ack)\n");
+ }
+
+ skb = skb_dequeue(&shdlc->send_q);
+
+ *(u8 *)skb_push(skb, 1) = SHDLC_CONTROL_HEAD_I | (shdlc->ns << 3) |
+ shdlc->nr;
+
+ pr_debug("Sending I-Frame %d, waiting to rcv %d\n", shdlc->ns,
+ shdlc->nr);
+ SHDLC_DUMP_SKB("shdlc frame written", skb);
+
+ r = shdlc->xmit_to_drv(shdlc->hdev, skb);
+ if (r < 0) {
+ shdlc->hard_fault = r;
+ break;
+ }
+
+ shdlc->ns = (shdlc->ns + 1) % 8;
+
+ time_sent = jiffies;
+ *(unsigned long *)skb->cb = time_sent;
+
+ skb_queue_tail(&shdlc->ack_pending_q, skb);
+
+ if (shdlc->t2_active == false) {
+ shdlc->t2_active = true;
+ mod_timer(&shdlc->t2_timer, time_sent +
+ msecs_to_jiffies(SHDLC_T2_VALUE_MS));
+ pr_debug("Started T2 (retransmit)\n");
+ }
+ }
+}
+
+static void llc_shdlc_connect_timeout(struct timer_list *t)
+{
+ struct llc_shdlc *shdlc = from_timer(shdlc, t, connect_timer);
+
+ schedule_work(&shdlc->sm_work);
+}
+
+static void llc_shdlc_t1_timeout(struct timer_list *t)
+{
+ struct llc_shdlc *shdlc = from_timer(shdlc, t, t1_timer);
+
+ pr_debug("SoftIRQ: need to send ack\n");
+
+ schedule_work(&shdlc->sm_work);
+}
+
+static void llc_shdlc_t2_timeout(struct timer_list *t)
+{
+ struct llc_shdlc *shdlc = from_timer(shdlc, t, t2_timer);
+
+ pr_debug("SoftIRQ: need to retransmit\n");
+
+ schedule_work(&shdlc->sm_work);
+}
+
+static void llc_shdlc_sm_work(struct work_struct *work)
+{
+ struct llc_shdlc *shdlc = container_of(work, struct llc_shdlc, sm_work);
+ int r;
+
+ mutex_lock(&shdlc->state_mutex);
+
+ switch (shdlc->state) {
+ case SHDLC_DISCONNECTED:
+ skb_queue_purge(&shdlc->rcv_q);
+ skb_queue_purge(&shdlc->send_q);
+ skb_queue_purge(&shdlc->ack_pending_q);
+ break;
+ case SHDLC_CONNECTING:
+ if (shdlc->hard_fault) {
+ llc_shdlc_connect_complete(shdlc, shdlc->hard_fault);
+ break;
+ }
+
+ if (shdlc->connect_tries++ < 5)
+ r = llc_shdlc_connect_initiate(shdlc);
+ else
+ r = -ETIME;
+ if (r < 0) {
+ llc_shdlc_connect_complete(shdlc, r);
+ } else {
+ mod_timer(&shdlc->connect_timer, jiffies +
+ msecs_to_jiffies(SHDLC_CONNECT_VALUE_MS));
+
+ shdlc->state = SHDLC_NEGOTIATING;
+ }
+ break;
+ case SHDLC_NEGOTIATING:
+ if (timer_pending(&shdlc->connect_timer) == 0) {
+ shdlc->state = SHDLC_CONNECTING;
+ schedule_work(&shdlc->sm_work);
+ }
+
+ llc_shdlc_handle_rcv_queue(shdlc);
+
+ if (shdlc->hard_fault) {
+ llc_shdlc_connect_complete(shdlc, shdlc->hard_fault);
+ break;
+ }
+ break;
+ case SHDLC_HALF_CONNECTED:
+ case SHDLC_CONNECTED:
+ llc_shdlc_handle_rcv_queue(shdlc);
+ llc_shdlc_handle_send_queue(shdlc);
+
+ if (shdlc->t1_active && timer_pending(&shdlc->t1_timer) == 0) {
+ pr_debug("Handle T1(send ack) elapsed (T1 now inactive)\n");
+
+ shdlc->t1_active = false;
+ r = llc_shdlc_send_s_frame(shdlc, S_FRAME_RR,
+ shdlc->nr);
+ if (r < 0)
+ shdlc->hard_fault = r;
+ }
+
+ if (shdlc->t2_active && timer_pending(&shdlc->t2_timer) == 0) {
+ pr_debug("Handle T2(retransmit) elapsed (T2 inactive)\n");
+
+ shdlc->t2_active = false;
+
+ llc_shdlc_requeue_ack_pending(shdlc);
+ llc_shdlc_handle_send_queue(shdlc);
+ }
+
+ if (shdlc->hard_fault)
+ shdlc->llc_failure(shdlc->hdev, shdlc->hard_fault);
+ break;
+ default:
+ break;
+ }
+ mutex_unlock(&shdlc->state_mutex);
+}
+
+/*
+ * Called from syscall context to establish shdlc link. Sleeps until
+ * link is ready or failure.
+ */
+static int llc_shdlc_connect(struct llc_shdlc *shdlc)
+{
+ DECLARE_WAIT_QUEUE_HEAD_ONSTACK(connect_wq);
+
+ mutex_lock(&shdlc->state_mutex);
+
+ shdlc->state = SHDLC_CONNECTING;
+ shdlc->connect_wq = &connect_wq;
+ shdlc->connect_tries = 0;
+ shdlc->connect_result = 1;
+
+ mutex_unlock(&shdlc->state_mutex);
+
+ schedule_work(&shdlc->sm_work);
+
+ wait_event(connect_wq, shdlc->connect_result != 1);
+
+ return shdlc->connect_result;
+}
+
+static void llc_shdlc_disconnect(struct llc_shdlc *shdlc)
+{
+ mutex_lock(&shdlc->state_mutex);
+
+ shdlc->state = SHDLC_DISCONNECTED;
+
+ mutex_unlock(&shdlc->state_mutex);
+
+ schedule_work(&shdlc->sm_work);
+}
+
+/*
+ * Receive an incoming shdlc frame. Frame has already been crc-validated.
+ * skb contains only LLC header and payload.
+ * If skb == NULL, it is a notification that the link below is dead.
+ */
+static void llc_shdlc_recv_frame(struct llc_shdlc *shdlc, struct sk_buff *skb)
+{
+ if (skb == NULL) {
+ pr_err("NULL Frame -> link is dead\n");
+ shdlc->hard_fault = -EREMOTEIO;
+ } else {
+ SHDLC_DUMP_SKB("incoming frame", skb);
+ skb_queue_tail(&shdlc->rcv_q, skb);
+ }
+
+ schedule_work(&shdlc->sm_work);
+}
+
+static void *llc_shdlc_init(struct nfc_hci_dev *hdev, xmit_to_drv_t xmit_to_drv,
+ rcv_to_hci_t rcv_to_hci, int tx_headroom,
+ int tx_tailroom, int *rx_headroom, int *rx_tailroom,
+ llc_failure_t llc_failure)
+{
+ struct llc_shdlc *shdlc;
+
+ *rx_headroom = SHDLC_LLC_HEAD_ROOM;
+ *rx_tailroom = 0;
+
+ shdlc = kzalloc(sizeof(struct llc_shdlc), GFP_KERNEL);
+ if (shdlc == NULL)
+ return NULL;
+
+ mutex_init(&shdlc->state_mutex);
+ shdlc->state = SHDLC_DISCONNECTED;
+
+ timer_setup(&shdlc->connect_timer, llc_shdlc_connect_timeout, 0);
+ timer_setup(&shdlc->t1_timer, llc_shdlc_t1_timeout, 0);
+ timer_setup(&shdlc->t2_timer, llc_shdlc_t2_timeout, 0);
+
+ shdlc->w = SHDLC_MAX_WINDOW;
+ shdlc->srej_support = SHDLC_SREJ_SUPPORT;
+
+ skb_queue_head_init(&shdlc->rcv_q);
+ skb_queue_head_init(&shdlc->send_q);
+ skb_queue_head_init(&shdlc->ack_pending_q);
+
+ INIT_WORK(&shdlc->sm_work, llc_shdlc_sm_work);
+
+ shdlc->hdev = hdev;
+ shdlc->xmit_to_drv = xmit_to_drv;
+ shdlc->rcv_to_hci = rcv_to_hci;
+ shdlc->tx_headroom = tx_headroom;
+ shdlc->tx_tailroom = tx_tailroom;
+ shdlc->llc_failure = llc_failure;
+
+ return shdlc;
+}
+
+static void llc_shdlc_deinit(struct nfc_llc *llc)
+{
+ struct llc_shdlc *shdlc = nfc_llc_get_data(llc);
+
+ skb_queue_purge(&shdlc->rcv_q);
+ skb_queue_purge(&shdlc->send_q);
+ skb_queue_purge(&shdlc->ack_pending_q);
+
+ kfree(shdlc);
+}
+
+static int llc_shdlc_start(struct nfc_llc *llc)
+{
+ struct llc_shdlc *shdlc = nfc_llc_get_data(llc);
+
+ return llc_shdlc_connect(shdlc);
+}
+
+static int llc_shdlc_stop(struct nfc_llc *llc)
+{
+ struct llc_shdlc *shdlc = nfc_llc_get_data(llc);
+
+ llc_shdlc_disconnect(shdlc);
+
+ return 0;
+}
+
+static void llc_shdlc_rcv_from_drv(struct nfc_llc *llc, struct sk_buff *skb)
+{
+ struct llc_shdlc *shdlc = nfc_llc_get_data(llc);
+
+ llc_shdlc_recv_frame(shdlc, skb);
+}
+
+static int llc_shdlc_xmit_from_hci(struct nfc_llc *llc, struct sk_buff *skb)
+{
+ struct llc_shdlc *shdlc = nfc_llc_get_data(llc);
+
+ skb_queue_tail(&shdlc->send_q, skb);
+
+ schedule_work(&shdlc->sm_work);
+
+ return 0;
+}
+
+static const struct nfc_llc_ops llc_shdlc_ops = {
+ .init = llc_shdlc_init,
+ .deinit = llc_shdlc_deinit,
+ .start = llc_shdlc_start,
+ .stop = llc_shdlc_stop,
+ .rcv_from_drv = llc_shdlc_rcv_from_drv,
+ .xmit_from_hci = llc_shdlc_xmit_from_hci,
+};
+
+int nfc_llc_shdlc_register(void)
+{
+ return nfc_llc_register(LLC_SHDLC_NAME, &llc_shdlc_ops);
+}